无法使用 Amazon Lex 和 Langchain 将 session_attributes['session_context'] 传递到 langchain 的历史记录

问题描述 投票:0回答:1

`我尝试将对话历史记录保存在 session_attributes['sessionContext'] 中,它有效,但是当生成日志时,我可以在 session_attributes['sessionContext'] 中看到对话历史记录,但不能在 {history} 中看到对话历史记录,即这里只有来自 lex appers i 的当前消息想要在提示符中将 session_attributes 作为历史记录传递。

import all necessary pacakages
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

def close(session_attributes, active_contexts, fulfillment_state, intent, message):
    
    response = {
        'sessionState': {
            'activeContexts':[{
                'name': 'intentContext',
                'contextAttributes': active_contexts,
                'timeToLive': {
                    'timeToLiveInSeconds': 600,
                    'turnsToLive': 1
                }
            }],
            'sessionAttributes': session_attributes,
            'dialogAction': {
                'type': 'Close',
            },
            'intent': intent,
        },
        'messages': [{'contentType': 'PlainText', 'content': message}]
    }

    return response
    
def delegate(session_attributes, active_contexts, intent, message):
    return {
        'sessionState': {
            'activeContexts':[{
                'name': 'intentContext',
                'contextAttributes': active_contexts,
                'timeToLive': {
                    'timeToLiveInSeconds': 600,
                    'turnsToLive': 1
                }
            }],
            'sessionAttributes': session_attributes,
            'dialogAction': {
                'type': 'Delegate',
            },
            'intent': intent,
        },
        'messages': [{'contentType': 'PlainText', 'content': message}]
    }


def initial_message(intent_name):
    response = {
            'sessionState': {
                'dialogAction': {
                    'type': 'ElicitSlot',
                    'slotToElicit': 'Location' if intent_name=='BookHotel' else 'PickUpCity'
                },
                'intent': {
                    'confirmationState': 'None',
                    'name': intent_name,
                    'state': 'InProgress'
                }
            }
    }
    
    return response

# --- Helper Functions ---

def try_ex(value):
    """
    Call passed in function in try block. If KeyError is encountered return None.
    This function is intended to be used to safely access dictionary of the Slots section in the payloads.
    Note that this function would have negative impact on performance.
    """

    if value is not None:
        return value['value']['interpretedValue']
    else:
        return None

def invoke_llm(query, session_history):
    
    endpoint_name = 'endpoint-name'
    region = 'us-east-1'
    kendra_index_id = 'kendra-index-id'
    print("invoke LLM session__history:: ", session_history )
    class ContentHandler(ContentHandlerBase):
        content_type = "application/json"
        accepts = "application/json"
    
        def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
            input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
            return input_str.encode('utf-8')
        def transform_output(self, output: bytes) -> str:
            response_json = json.loads(output.read().decode("utf-8"))
            return response_json["generated_texts"][0]
    
    content_handler = ContentHandler()
    llm=SagemakerEndpoint(
            endpoint_name=endpoint_name, 
            region_name=region, 
            model_kwargs={"temperature":1e-10, "max_length": 500},
            content_handler=content_handler
        )
    
    retriever = KendraIndexRetriever(kendraindex=kendra_index_id, 
        awsregion=region, 
        return_source_documents=True)
    
    
    
    
    template = """
    Use the following context (delimited by <ctx></ctx>) and the chat history (delimited by <hs></hs>) to answer the question:
    ------
    <ctx>
    {context}
    </ctx>
    ------
    <hs>
    {history}
    </hs>
    ------
    {question}
    Answer:
    """
    prompt = PromptTemplate(
        input_variables=["history", "context", "question"],
        template=template
    )

    qa = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type='stuff',
        retriever=retriever,
        verbose=True,
        
        chain_type_kwargs={
            "verbose": True,
            "prompt": prompt,
            "memory": ConversationBufferMemory(
                memory_key="history",
                input_key="question",
                return_messages=True),
        }
    )
    # chat_history = []
    # while True:
    result = qa({'query':query, 'history': session_history})
    # result = qa({'query':query, 'history': lex_conv_history})
    
    response = result['result']
    
    response = qa(query)
    print("Answer: ", response['result'])

    return response


def lambda_handler(intent_request, context):
    
    print("input received: ", intent_request)
    
    logger.debug(intent_request)
    
    intent = intent_request['sessionState']['intent']
    
    
    
    session_attributes = intent_request['sessionState']['sessionAttributes']
    
    #print ("Session attributes -----",session_attributes)
    if 'sessionContext' not in session_attributes.keys():
        print("First Execution")
        session_attributes['sessionContext'] = ''
    
    active_contexts = {}
    if intent['name']=='FallbackIntent':
        query = intent_request['inputTranscript']+session_attributes['sessionContext']
        response = invoke_llm(query, session_attributes['sessionContext'])
        
        response_json = json.dumps({
            'Answer': response['result'],
        })
        
        active_contexts['Query'] = response_json
        logger.debug('Answer from LLM={}'.format(response_json))
        intent['confirmationState']="Confirmed"
        intent['state']="Fulfilled"
        session_attributes['sessionContext'] = session_attributes['sessionContext'] + ' ' + intent_request['inputTranscript']  + ' ' + response['result']
        print ("History - - - ",session_attributes['sessionContext'])
        return close(session_attributes, active_contexts, 'Fulfilled', intent, response['result'])
    
    #confirmation_status = intent_request['sessionState']['intent']['confirmationState']
    query = try_ex(intent_request['sessionState']['intent']['slots']['Query'])
    print("Question by user: ", query)
    
    if query or intent['name']=='FallbackIntent':
        response = invoke_llm(query, session_attributes['sessionContext'])
        # response = invoke_llm(query)
        
        response_json = json.dumps({
            'Answer': response['result'],
        })
        
        logger.debug('Answer from LLM={}'.format(response_json))
        intent['confirmationState']="Confirmed"
        intent['state']="Fulfilled"
        session_attributes['sessionContext'] = session_attributes['sessionContext'] + ' ' + intent_request['inputTranscript']  + ' ' + response['result']
        print ("History - - - ",session_attributes['sessionContext'])
        return close(session_attributes, active_contexts, 'Fulfilled', intent, response['result'])
python amazon-web-services amazon-lex langchain py-langchain
1个回答
0
投票

您需要从会话变量中迭代消息并将其添加到内存中。

memory = ConversationBufferMemory()
memory.chat_memory.add_user_message("hi!")
memory.chat_memory.add_ai_message("whats up?") 

现在加载内存变量

memory.load_memory_variables({})

您现在应该能够看到历史记录了

{'history': 'Human: hi!\nAI: whats up?'}
© www.soinside.com 2019 - 2024. All rights reserved.