from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.agents import Tool, AgentExecutor, initialize_agent, AgentType, Agent
from langchain.memory import ConversationBufferMemory
from langchain.chains import LLMChain
from langchain.agents.chat.base import ChatAgent
tools = [
]
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = LlamaCpp(
model_path="llama-2-7b-chat.Q4_K_M.gguf",
input={"temperature": 0.75,
"max_length": 5000,
"top_p": 0.1},
callback_manager=callback_manager,
verbose=True
)
prefix = """
You are a chatbot having a conversation with a human. Only respond to the user's input.
"""
suffix = """
{chat_history}
{agent_scratchpad}
User: {input}
Chatbot:
"""
#ai_prefix = """"""
#human_prefix = """"""
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
prompt = ChatAgent.create_prompt(
tools,
system_message_prefix=prefix,
system_message_suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=llm, prompt=prompt, memory=memory)
agent = ChatAgent(
llm_chain=llm_chain,
tools=tools,
verbose=True,
prompt=prompt,
)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
memory=memory,
verbose = True
)
resp = agent_executor.run(input = "Hi")
这是我的代码。稍后将添加这些工具。我想创建一个能够使用 langchain 记住对话内容的单一对话代理。但是,当我使用这段代码时,
> Entering new AgentExecutor chain...
System: Hello! What's up?
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[22], line 1
----> 1 resp = agent_executor.run(input = "Hi")
File ~\anaconda3\Lib\site-packages\langchain\chains\base.py:492, in Chain.run(self, callbacks, tags, metadata, *args, **kwargs)
487 return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
488 _output_key
489 ]
491 if kwargs and not args:
--> 492 return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
493 _output_key
494 ]
496 if not kwargs and not args:
497 raise ValueError(
498 "`run` supported with either positional arguments or keyword arguments,"
499 " but none were provided."
500 )
File ~\anaconda3\Lib\site-packages\langchain\chains\base.py:292, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
290 except BaseException as e:
291 run_manager.on_chain_error(e)
--> 292 raise e
293 run_manager.on_chain_end(outputs)
294 final_outputs: Dict[str, Any] = self.prep_outputs(
295 inputs, outputs, return_only_outputs
296 )
File ~\anaconda3\Lib\site-packages\langchain\chains\base.py:286, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
279 run_manager = callback_manager.on_chain_start(
280 dumpd(self),
281 inputs,
282 name=run_name,
283 )
284 try:
285 outputs = (
--> 286 self._call(inputs, run_manager=run_manager)
287 if new_arg_supported
288 else self._call(inputs)
289 )
290 except BaseException as e:
291 run_manager.on_chain_error(e)
File ~\anaconda3\Lib\site-packages\langchain\agents\agent.py:1122, in AgentExecutor._call(self, inputs, run_manager)
1120 # We now enter the agent loop (until it returns something).
1121 while self._should_continue(iterations, time_elapsed):
-> 1122 next_step_output = self._take_next_step(
1123 name_to_tool_map,
1124 color_mapping,
1125 inputs,
1126 intermediate_steps,
1127 run_manager=run_manager,
1128 )
1129 if isinstance(next_step_output, AgentFinish):
1130 return self._return(
1131 next_step_output, intermediate_steps, run_manager=run_manager
1132 )
File ~\anaconda3\Lib\site-packages\langchain\agents\agent.py:919, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
916 intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
918 # Call the LLM to see what to do.
--> 919 output = self.agent.plan(
920 intermediate_steps,
921 callbacks=run_manager.get_child() if run_manager else None,
922 **inputs,
923 )
924 except OutputParserException as e:
925 if isinstance(self.handle_parsing_errors, bool):
File ~\anaconda3\Lib\site-packages\langchain\agents\agent.py:531, in Agent.plan(self, intermediate_steps, callbacks, **kwargs)
519 """Given input, decided what to do.
520
521 Args:
(...)
528 Action specifying what tool to use.
529 """
530 full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
--> 531 full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs)
532 return self.output_parser.parse(full_output)
File ~\anaconda3\Lib\site-packages\langchain\chains\llm.py:257, in LLMChain.predict(self, callbacks, **kwargs)
242 def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
243 """Format prompt with kwargs and pass to LLM.
244
245 Args:
(...)
255 completion = llm.predict(adjective="funny")
256 """
--> 257 return self(kwargs, callbacks=callbacks)[self.output_key]
File ~\anaconda3\Lib\site-packages\langchain\chains\base.py:294, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
292 raise e
293 run_manager.on_chain_end(outputs)
--> 294 final_outputs: Dict[str, Any] = self.prep_outputs(
295 inputs, outputs, return_only_outputs
296 )
297 if include_run_info:
298 final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
File ~\anaconda3\Lib\site-packages\langchain\chains\base.py:390, in Chain.prep_outputs(self, inputs, outputs, return_only_outputs)
388 self._validate_outputs(outputs)
389 if self.memory is not None:
--> 390 self.memory.save_context(inputs, outputs)
391 if return_only_outputs:
392 return outputs
File ~\anaconda3\Lib\site-packages\langchain\memory\chat_memory.py:35, in BaseChatMemory.save_context(self, inputs, outputs)
33 def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
34 """Save context from this conversation to buffer."""
---> 35 input_str, output_str = self._get_input_output(inputs, outputs)
36 self.chat_memory.add_user_message(input_str)
37 self.chat_memory.add_ai_message(output_str)
File ~\anaconda3\Lib\site-packages\langchain\memory\chat_memory.py:22, in BaseChatMemory._get_input_output(self, inputs, outputs)
18 def _get_input_output(
19 self, inputs: Dict[str, Any], outputs: Dict[str, str]
20 ) -> Tuple[str, str]:
21 if self.input_key is None:
---> 22 prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
23 else:
24 prompt_input_key = self.input_key
File ~\anaconda3\Lib\site-packages\langchain\memory\utils.py:19, in get_prompt_input_key(inputs, memory_variables)
17 prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
18 if len(prompt_input_keys) != 1:
---> 19 raise ValueError(f"One input key expected got {prompt_input_keys}")
20 return prompt_input_keys[0]
ValueError: One input key expected got ['agent_scratchpad', 'input']
显示此错误。我还尝试删除“agent_scratchpad”,但遇到另一个错误,表明它是一个重要组件。
我应该如何引入“agent_scratchpad”?如果您能帮助我解决此代码错误,我将不胜感激。谢谢你。
我还尝试删除“agent_scratchpad”,但遇到另一个错误,表明它是一个重要组件。
我尝试更改 input_variables 的顺序,还尝试了不同的后缀提示。
ZeroshotAgent 没有遇到这个错误。
您在使用 LangChain 库时似乎遇到了与 ChatAgent 输入配置相关的问题。您看到的错误消息表明它需要一个输入键,但收到多个键(['agent_scratchpad', 'input'])。
在您的代码中,您在创建 ChatAgent 时已将 input_variables 定义为 ["input", "chat_history", "agent_scratchpad"]。这意味着当您调用 agent_executor.run(input="Hi") 时,它会预期所有这三个变量的输入。
为了解决这个问题,您可以在调用 agent_executor.run() 时为所有这三个输入变量提供值。但是,如果您不打算使用“agent_scratchpad”输入,则可以在创建 ChatAgent 时将其从 input_variables 列表中删除。您可以这样做:
prompt = ChatAgent.create_prompt(
tools,
system_message_prefix=prefix,
system_message_suffix=suffix,
input_variables=["input", "chat_history"]
)
通过进行此调整,您应该不再遇到与“agent_scratchpad”相关的错误。请记住相应地更新您的提示和代码,以与修改后的输入配置保持一致。