from langchain.memory import ChatMessageHistory from langchain.memory import ConversationBufferMemory history = ChatMessageHistory() history.add_user_message("hi!") history.add_ai_message("whats up?")
fir = conversation.predict(input="Hello World!") sec = conversation.predict(input="How to evaluate the world?") print(fir) # >> Hello! How can I assist you today? print(sec) # >> Evaluating the world can be a complex task as it involves considering multiple factors and perspectives. Some common approaches to evaluating the world include assessing the state of the economy, analyzing social and political systems, examining environmental conditions, and evaluating the well-being of individuals and communities. It can also involve considering ethical and moral values, cultural differences, and historical contexts. Ultimately, the process of evaluating the world is subjective and can vary depending on individual beliefs, values, and priorities. Is there any specific aspect of the world you would like to evaluate?
因为 verbose 参数,会在过程中输出如下日志
1 2 3 4 5 6 7 8 9 10 11
> Entering new ConversationChain chain... Prompt after formatting: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation: Human: Hello World! AI: Hello! How can I assist you today? Human: How to evaluate the world? AI:
memory.save_context({"input": "not much you"}, {"output": "not much"}) print(memory.load_memory_variables({})) # >> {'history': 'Human: not much you\nAI: not much'}
from langchain.memory import ConversationSummaryMemory, ChatMessageHistory from langchain.llms import OpenAI
memory = ConversationSummaryMemory( llm=chat_model, return_messages=True ) memory.save_context({"input": "hi"}, {"output": "whats up"}) print(memory.load_memory_variables({})) # >> {'history': [SystemMessage(content='The human greets the AI with "Hello World!" and the AI responds with "Yes!"')]}
messages = memory.chat_memory.messages previous_summary = "" print(memory.predict_new_summary(messages, previous_summary)) # >> The human greets the AI with "Hello World!" and the AI responds with "Yes!"
可以选择使用以前生成的摘要来加快初始化速度,并通过直接初始化来避免重新生成摘要
1 2 3 4 5 6 7
memory = ConversationSummaryMemory( llm=chat_module, # 这里的 buffer 构造参数用于初始化 buffer="The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.", chat_memory=history, return_messages=True )
else: raise NotImplementedError( f"get_num_tokens_from_messages() is not presently implemented " f"for model {model}." "See https://github.com/openai/openai-python/blob/main/chatml.md for " "information on how messages are converted to tokens." )
{'input': '我又来了,还记得我昨天为什么要来买花吗?', 'history':"System: \nThe human asked the AI for advice on buying a bouquet for their sister's birthday. The AI suggested buying a vibrant bouquet as a representation of their wishes and blessings, and recommended looking for special bouquets like colorful roses or lilies for something more unique.\nHuman: 她喜欢粉色玫瑰,颜色是粉色的。\nAI: 好的,那粉色玫瑰就是一个很好的选择!你可以买一束粉色玫瑰花束,这样你姐姐会很开心的!你可以在花店里找到粉色玫瑰,也可以从网上订购,你可以根据你的预算,选择合适的数量。另外,你可以考虑添加一些装饰,比如细绳、彩带或者小礼品", 'response': ' 是的,我记得你昨天来买花是为了给你姐姐的生日。你想买一束粉色玫瑰花束来表达你的祝福和祝愿,你可以在花店里找到粉色玫瑰,也可以从网上订购,你可以根据你的预算,选择合适的数量。另外,你可以考虑添加一些装饰,比如细绳、彩带或者小礼品}
memory = ConversationEntityMemory(llm=Azure.chat_model) _input = {"input": "Deven & Sam are working on a hackathon project"} # 这里生成新的 entity key memory.load_memory_variables(_input) # 这里会对 entity 进行总结 memory.save_context( _input, {"output": " That sounds like a great project! What kind of project are they working on?"} )
1 2 3 4 5
print(memory.load_memory_variables({"input": 'The relation between Deven and Sam?'})) # >> {'history': 'Human: Deven & Sam are working on a hackathon project\nAI: That sounds like a great project! What kind of project are they working on?', 'entities': {'Deven': 'Deven is currently working on a hackathon project with Sam.', 'Sam': 'Sam is working on a hackathon project with Deven.'}}
print(memory.load_memory_variables({"input": 'What is Sam doing?'})) # >> {'history': 'Human: Deven & Sam are working on a hackathon project\nAI: That sounds like a great project! What kind of project are they working on?', 'entities': {'Sam': 'Sam is working on a hackathon project with Deven.'}}
上面的例子可以看出来,当话题和两个人相关时
The relation between Deven and Sam?,记忆会总结并同时返回
Deven 和 Sam 的信息;当话题只和 Sam 有关时
What is Sam doing?,则只会返回 Sam 的实体信息
知识图(Knowledge Graph)
ConversationKGMemory 使用知识图来重新创建记忆
1 2 3 4 5 6
llm = Azure.chat_model memory = ConversationKGMemory(llm=llm) memory.save_context({"input": "say hi to Sam"}, {"output": "who is Sam"}) memory.save_context({"input": "Sam is a friend"}, {"output": "okay"}) memory.save_context({"input": "Sam and Deven are my teachers"}, {"output": "okay,I known"}) memory.save_context({"input": "Deven has a white watch"}, {"output": "okay"})
1 2 3 4 5
print(memory.load_memory_variables({"input": "who is sam"})) # >> {'history': 'On Sam: Sam is a person. Sam is a friend. Sam is my teacher.\nOn Deven: Deven is my teacher. Deven has white watch.'}
print(memory.load_memory_variables({"input": "who is Deven"})) # >> {'history': 'On Deven: Deven is my teacher. Deven has white watch.'}
和实体类似,都是使用 LLM 对内容进行概括和匹配
向量存储(Vector Store)
VectorStoreRetrieverMemory
将记忆存储在向量存储中,并在每次调用时查询前 K 个最匹配的文档
与大多数其他记忆类不同的是,它不明确跟踪交互的顺序,”文档“是历史的对话片段,有助于帮助
AI 了解早期历史的内容
初始化向量存储工具
取决于所使用的向量存储工具
1 2 3 4 5 6 7 8 9
import faiss
from langchain.docstore import InMemoryDocstore from langchain.vectorstores import FAISS
# 当添加到代理程序时,内存对象可以保存来自对话或使用工具的相关信息 memory.save_context({"input": "My favorite food is pizza"}, {"output": "that's good to know"}) memory.save_context({"input": "My favorite sport is soccer"}, {"output": "..."}) memory.save_context({"input": "I don't the Celtics"}, {"output": "ok"})
使用
1 2
print(memory.load_memory_variables({"prompt": "what sport should i watch?"})["history"]) # >> input: My favorite sport is soccer\noutput: ...
llm_chain.predict(human_input="Hi there my friend")
聊天模型同理
ConversationChain
ConversationChain 是 LLMChain
的子类,最主要的特点是它提供了包含 AI
前缀和人类前缀的对话摘要格式,这个对话格式和记忆机制结合得非常紧密
看一下 ConversationChain 内置的模板
1 2 3 4 5
# 初始化对话链 conv_chain = ConversationChain(llm=llm)
# 打印对话的模板 print(conv_chain.prompt.template)
输出
1 2 3 4 5 6 7
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details fromits context. If the AI doesnot know the answer to a question, it truthfully says itdoesnot know.
Current conversation: {history} Human: {input} AI:
这个提示试图通过说明以下内容来减少幻觉,也就是尽量减少模型编造的信息:
如果 AI 不知道问题的答案,它就会如实说它不知道
两个参数会通过提示模板传递给 LLM,我们希望返回的输出只是对话的延续
{history} 存储会话记忆
{input} 设置新的输入
当有了 {history} 参数,以及 Human 和 AI
这两个前缀,我们就能够把历史对话信息存储在提示模板中,并作为新的提示内容在新一轮的对话过程中传递给
LLM,这就是记忆机制的原理
自定义会话记忆
上面介绍了 ConversationChain
将以对话形式格式化历史记录并生成提示,依赖所用的记忆类,AI 默认前缀为
AI,人类的默认前缀为 Human
1 2 3 4 5 6
classConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory."""
classFavoriteFoodMemory(BaseMemory): """Memory class for storing information about entities."""
# Define dictionary to store information about entities. entities: dict = {} # Define key to pass information about entities into prompt. memory_key: str = "entities"
defclear(self): self.entities = {}
@property defmemory_variables(self) -> List[str]: """Define the variables we are providing to the prompt.""" return [self.memory_key]
defload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Load the memory variables, in this case the entity key.""" s = "" for key, value in self.entities.items(): s += (value + "\n") return {self.memory_key: s}
defsave_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" pattern = r"My favorite food is (\w+)" for key, value in inputs.items(): matches = re.findall(pattern, value) num_matches = len(matches) if num_matches > 0: match = re.search(pattern, value) food_name = match.group(1) self.entities[key] = food_name
template = """You are a chef, directly output the dish name you want to make based on user preferences. Don't continue asking questions. Do not reply to questions. User Preferences: {entities} Conversation: Human: {input} AI:""" # prompt,让 AI 扮演一个厨师 prompt = PromptTemplate(input_variables=["entities", "input"], template=template) # 自定义记忆类 custom_memory = FavoriteFoodMemory() # 模拟记忆类事先保存了互动,提取过用户的喜好食物 custom_memory.save_context({'input': 'My favorite food is banana.'}, {'output': 'Greate!'})
> Entering new ConversationChain chain... Prompt after formatting: You are a chef, directly output the dish name you want to make based on user preferences. Don't continue asking questions. Donot reply to questions.
# 缓冲记忆 conv_memory = ConversationBufferMemory( memory_key="chat_history_lines", input_key="input" ) # 摘要记忆 summary_memory = ConversationSummaryMemory(llm=Azure.chat_model, input_key="input") # 通过 CombinedMemory 组合 memory = CombinedMemory(memories=[conv_memory, summary_memory]) _DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Summary of conversation: {history} Current conversation: {chat_history_lines} Human: {input} AI:""" PROMPT = PromptTemplate( input_variables=["history", "input", "chat_history_lines"], template=_DEFAULT_TEMPLATE, ) # ConversationChain conversation = ConversationChain(llm=Azure.chat_model, verbose=True, memory=memory, prompt=PROMPT)
执行查看结果
1 2 3 4 5 6 7 8 9
conversation.run("Hi")
pprint(conversation.memory.memories[0].load_memory_variables({})) # 这里是组合的 ConversationBufferMemory # >> {'chat_history_lines': 'Human: Hi\nAI: Hello! How can I assist you today?'}
pprint(conversation.memory.memories[1].load_memory_variables({})) # 这里是组合的 ConversationSummaryMemory # >> {'history': 'The human greets the AI and the AI asks how it can assist the human.'}