from langchain_cognee import CogneeRetriever
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
# Instantiate the retriever with your Cognee config
retriever = CogneeRetriever(llm_api_key="sk-", dataset_name="my_dataset", k=3)
# Optionally, prune/reset the dataset for a clean slate
retriever.prune()
# Add some documents
docs = [
Document(page_content="Elon Musk is the CEO of SpaceX."),
Document(page_content="SpaceX focuses on space travel."),
]
retriever.add_documents(docs)
retriever.process_data()
prompt = ChatPromptTemplate.from_template(
"""Answer the question based only on the context provided.
Context: {context}
Question: {question}"""
)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)