Search code examples
pythonopenai-apilangchainpy-langchain

How to retrieve source documents via LangChain's get_relevant_documents method only if the answer is from the custom knowledge base


I am making a chatbot which accesses an external knowledge base docs. I want to get the relevant documents the bot accessed for its answer, but this shouldn't be the case when the user input is something like "hello", "how are you", "what's 2+2", or any answer that is not retrieved from the external knowledge base docs. In this case, I want retriever.get_relevant_documents(query) or any other line to return an empty list or something similar.

import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain 
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate

os.environ['OPENAI_API_KEY'] = ''

custom_template = """
This is conversation with a human. Answer the questions you get based on the knowledge you have.
If you don't know the answer, just say that you don't, don't try to make up an answer.
Chat History:
{chat_history}
Follow Up Input: {question}
"""
CUSTOM_QUESTION_PROMPT = PromptTemplate.from_template(custom_template)

llm = ChatOpenAI(
    model_name="gpt-3.5-turbo",  # Name of the language model
    temperature=0  # Parameter that controls the randomness of the generated responses
)

embeddings = OpenAIEmbeddings()

docs = [
    "Buildings are made out of brick",
    "Buildings are made out of wood",
    "Buildings are made out of stone",
    "Buildings are made out of atoms",
    "Buildings are made out of building materials",
    "Cars are made out of metal",
    "Cars are made out of plastic",
  ]

vectorstore = FAISS.from_texts(docs, embeddings)

retriever = vectorstore.as_retriever()

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)

qa = ConversationalRetrievalChain.from_llm(
    llm,
    retriever,
    condense_question_prompt=CUSTOM_QUESTION_PROMPT,
    memory=memory
)

query = "what are cars made of?"
result = qa({"question": query})
print(result)
print(retriever.get_relevant_documents(query))

I tried setting a threshold for the retriever but I still get relevant documents with high similarity scores. And in other user prompts where there is a relevant document, I do not get back any relevant documents.

retriever = vectorstore.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .9})

Solution

  • To solve this problem, I had to change the chain type to RetrievalQA and introduce agents and tools.

    import os
    from langchain.embeddings.openai import OpenAIEmbeddings
    from langchain.vectorstores import FAISS
    from langchain.chains import RetrievalQA
    from langchain.memory import ConversationBufferMemory
    from langchain.chat_models import ChatOpenAI
    from langchain.prompts import PromptTemplate
    from langchain.agents import AgentExecutor, Tool,initialize_agent
    from langchain.agents.types import AgentType
    
    os.environ['OPENAI_API_KEY'] = ''
    
    system_message = """
    "You are the XYZ bot."
    "This is conversation with a human. Answer the questions you get based on the knowledge you have."
    "If you don't know the answer, just say that you don't, don't try to make up an answer."
    """
    
    llm = ChatOpenAI(
        model_name="gpt-3.5-turbo",  # Name of the language model
        temperature=0  # Parameter that controls the randomness of the generated responses
    )
    
    embeddings = OpenAIEmbeddings()
    
    docs = [
        "Buildings are made out of brick",
        "Buildings are made out of wood",
        "Buildings are made out of stone",
        "Buildings are made out of atoms",
        "Buildings are made out of building materials",
        "Cars are made out of metal",
        "Cars are made out of plastic",
      ]
    
    vectorstore = FAISS.from_texts(docs, embeddings)
    
    retriever = vectorstore.as_retriever()
    
    memory = ConversationBufferMemory(memory_key="chat_history", input_key='input', return_messages=True, output_key='output')
    
    qa = RetrievalQA.from_chain_type(
            llm=llm,
            chain_type="stuff",
            retriever=vectorstore.as_retriever(),
            verbose=True,
            return_source_documents=True
        )
    
    tools = [
            Tool(
                name="doc_search_tool",
                func=qa,
                description=(
                   "This tool is used to retrieve information from the knowledge base"
                )
            )
        ]
    
    agent = initialize_agent(
            agent = AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
            tools=tools,
            llm=llm,
            memory=memory,
            return_source_documents=True,
            return_intermediate_steps=True,
            agent_kwargs={"system_message": system_message}
            )
    
    query1 = "what are buildings made of?"
    result1 = agent(query1)
    
    
    query2 = "who are you?"
    result2 = agent(query2)
    

    if result accessed sources, it will have values for the key "intermediate_steps" then source documents can be accessed through result1["intermediate_steps"][0][1]["source_documents"]

    otherwise, when the query didn't need sources, result2["intermediate_steps"] will be empty.