如何向 load_qa_chain 添加内存或如何使用具有多个输入的自定义提示来实现 ConversationalRetrievalChain

问题描述 投票:0回答:2

我正在尝试提供在 langchain 中进行问答的自定义提示。我无法使用 ConversationalRetrievalChain 做到这一点,因为它不允许在自定义提示中进行多个自定义输入。因此,我使用了 load_qa_chain,但是使用 load_qa_chain,我无法使用内存。

如何向 load_qa_chain 添加内存或如何使用具有多个输入的自定义提示来实现 ConversationalRetrievalChain。

    import openai
    import numpy as np
    import pandas as pd
    import os
    from langchain.embeddings.openai import OpenAIEmbeddings
    from langchain.vectorstores import Chroma
    from langchain.text_splitter import RecursiveCharacterTextSplitter
    from langchain.llms import OpenAI
    from langchain.chains import RetrievalQA, ConversationalRetrievalChain,RetrievalQAWithSourcesChain
    from langchain.chains.qa_with_sources import load_qa_with_sources_chain
    from langchain.chains.question_answering import load_qa_chain
    from langchain.document_loaders import UnstructuredFileLoader
    from langchain.prompts import PromptTemplate
    
    from langchain.document_loaders import UnstructuredExcelLoader
    loader = UnstructuredFileLoader("../document.pdf", mode="elements")
    documents = loader.load()
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
    texts = text_splitter.split_documents(documents)
    #embeddings = OpenAIEmbeddings()
    from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
    embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
    vectorDB = Chroma.from_documents(texts,embeddings)
    
    
    prompt_template = "You are a Chat customer support agent.\
            Address the customer as Dear Mr. or Miss. depending on customer's gender followed by Customer's First Name.\
            Use the following pieces of context to answer the question at the end.\
            If you don't know the answer, just say that you don't know, don't try to make up an answer.\
            Below are the details of the customer:\
            Customer's Name : {Customer_Name} \
            Customer's Resident State: {Customer_State}\
            Customer's Gender: {Customer_Gender}\
            {context}\
            Question: {question}\
            Answer: "
    
    import json
     
    # Opening JSON file
    with open('Customer_profile.json', 'r') as openfile:
    # Reading from json file
        json_object = json.load(openfile)
     
    cName=json_object['Customer_Name']
    cState=json_object['Customer_State']
    cGen=json_object['Customer_Gender']
    
    PROMPT = PromptTemplate(
        template=prompt_template, input_variables=["context", "question","Customer_Name","Customer_State","Customer_Gender"]
    )
    
    chain_type_kwargs = {"prompt": PROMPT}
    
    from langchain.memory import ConversationBufferMemory
    memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer',return_messages=True)
    
    #qa = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), retriever=vectorDB.as_retriever(),chain_type="stuff", memory=memory,return_source_documents=True,chain_type_kwargs=chain_type_kwargs)
    #qa = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), retriever=vectorDB.as_retriever(),chain_type="stuff", memory=memory,return_source_documents=True)
    #qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), retriever=vectorDB.as_retriever(),chain_type="stuff", memory=memory,return_source_documents=True,chain_type_kwargs=chain_type_kwargs)
    #qa = load_qa_with_sources_chain(OpenAI(temperature=0),chain_type="stuff",prompt=PROMPT)
    qa = load_qa_chain(OpenAI(temperature=0.1),chain_type="stuff",prompt=PROMPT)
    
    import langchain
    langchain.debug=False
    query="How's the weather in my place?"
    docs = vectorDB.similarity_search(query)
    
    
    
    #vectordbkwargs = {"search_distance": 0.9}
    #result=qa({"input_documents": docs,"question": query,'Customer_Gender':'Male','Customer_State':'Madhya Pradesh','Customer_Name':'Bob'})
    result=qa({"input_documents": docs,"question": query,'Customer_Gender':'Male','Customer_State':'Madhya Pradesh','Customer_Name':'Bob'})
    #result=qa({"question": query})
    print(result['output_text'])

客户资料.JSON

    {
        "Customer_Name": "Bob",
        "Customer_State": "NY",
        "Customer_Gender": "Male"
    }
openai-api langchain py-langchain large-language-model openaiembeddings
2个回答
0
投票

我认为您不能在 load_qa_chain 函数中存储内存,因为它接受文档和输入问题,而是您需要将上一个问题的响应存储在本地缓存中并每次将其传递给提示。下一个对话RetrievalChain 会为您提供帮助。

您可以在初始化 ConversationalRetrievalChain.from_llm() 时使用 condense_question_prompt 参数..

你可以这样定义提示

def get_new_prompt():
    custom_template = """Given the following conversation and a follow up question, 
    rephrase the follow up question to be a standalone question. At the end of standalone question 
    add this 'Do not answer the question based on you own knowledge.' If you do not know the answer 
    reply with 'I am sorry'.
    Chat History:
    {chat_history}
    Follow Up Input: {question}
    Standalone question:"""
    CUSTOM_QUESTION_PROMPT = PromptTemplate.from_template(custom_template)
    return CUSTOM_QUESTION_PROMPT

模型可能是

retriever = vector_store.as_retriever(search_kwargs={'k': 1})
memory = ConversationBufferMemory(return_messages=True, memory_key='chat_history', human_prefix = 'Human', ai_prefix= 'AI')
chat = ChatOpenAI(model=LLM, openai_api_key=OPENAPI_TOKEN, temperature=0, max_tokens=100)

conv_chain = ConversationalRetrievalChain.from_llm(chat, condense_question_prompt=get_new_prompt(), retriever=retriever,memory=memory)  

试试这个!

您可能需要在每次运行后使用会话缓存或内存缓存来存储内存,并为下一次 api 调用(问题)使用缓存从现有内存创建新链。


-1
投票

基于我的自定义PDF,你可以有以下逻辑: 你可以参考我的笔记本了解更多详情。

# doc string prompt
#
prompt_template = """You are a Chat customer support agent.\
        Address the customer as Dear Mr. or Miss. depending on customer's gender followed by Customer's First Name.\
        Use the following pieces of context to answer the question at the end.\
        If you don't know the answer, just say that you don't know, don't try to make up an answer.\
        Below are the details of the customer:\
        Customer's Name : {Customer_Name} \
        Customer's Resident State: {Customer_State}\
        Customer's Gender: {Customer_Gender}\
        {context}\
        Question: {question}\
        Answer: """
PROMPT = PromptTemplate(
    template=prompt_template, input_variables=["context", "question","Customer_Name","Customer_State","Customer_Gender"]
)
from langchain.chains.question_answering import load_qa_chain

memory = ConversationBufferMemory(memory_key="chat_history", input_key="question")
chain = load_qa_chain(
    OpenAI(temperature=0), chain_type="stuff", memory=memory, prompt=PROMPT
)


query="why frog sent letter to Todd??"
docs=db.similarity_search(query=query)

# building the dictionary for chain

chain_input={
    "input_documents": docs,
    "context":"This is contextless",
    "question":query,
    "Customer_Name":"Bob",
    "Customer_State":"NY",
    "Customer_Gender":"Male"
}

result=chain(chain_input, return_only_outputs=True)

输出:

 Dear Mr. Bob, Frog sent a letter to Toad because he wanted to show Toad that he was glad to be his best friend. He wrote in the letter, "Dear Toad, I am glad that you are my best friend. Your best friend, Frog."'

如果您打算拥有聊天记录,可以使用

memory

© www.soinside.com 2019 - 2024. All rights reserved.