I am new to Langchain and followed this Retrival QA - Langchain. I have a custom prompt but when I try to pass Prompt with chain_type_kwargs
its throws error in pydantic
StufDocumentsChain
. and on removing chain_type_kwargs
itt just works.
how can pass to the prompt?
File /usr/local/lib/python3.11/site-packages/pydantic/main.py:341, in pydantic.main.BaseModel.__init__()
ValidationError: 1 validation error for StuffDocumentsChain
__root__
document_variable_name context was not found in llm_chain input_variables: ['question'] (type=value_error)
import json, os
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.document_loaders import JSONLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
from pathlib import Path
from pprint import pprint
os.environ["OPENAI_API_KEY"] = "my-key"
def metadata_func(record: dict, metadata: dict) -> dict:
metadata["drug_name"] = record["drug_name"]
return metadata
loader = JSONLoader(
file_path='./drugs_data_v2.json',
jq_schema='.drugs[]',
content_key="data",
metadata_func=metadata_func)
docs = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=5000, chunk_overlap=200)
texts = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings)
template = """/
example custom prommpt
Question: {question}
Answer:
"""
PROMPT = PromptTemplate(template=template, input_variables=['question'])
qa = RetrievalQA.from_chain_type(
llm=ChatOpenAI(
model_name='gpt-3.5-turbo-16k'
),
chain_type="stuff",
chain_type_kwargs={"prompt": PROMPT},
retriever=docsearch.as_retriever(),
)
query = "What did the president say about Ketanji Brown Jackson"
qa.run(query)
{context} is missing in template.