-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmemory.py
More file actions
83 lines (61 loc) · 3.03 KB
/
memory.py
File metadata and controls
83 lines (61 loc) · 3.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from langchain_core.messages import ToolMessage, HumanMessage, AIMessage
import re
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field
from prompts import KEY_FINDINGS_PROMPT
def initialize_memory():
return {
"conversation_history": [],
"key_findings": [],
}
def update_memory(state, llm, explicit_update=None):
memory = state.get('memory', initialize_memory().copy())
if explicit_update:
if 'findings' in explicit_update:
memory['key_findings'].extend(explicit_update['findings'])
recent_msgs = state['messages'][-10:]
memory['conversation_history'] = [x.content for x in recent_msgs]
ai_messages = [msg.content for msg in recent_msgs if isinstance(msg, AIMessage)]
key_findings_prompt = KEY_FINDINGS_PROMPT.format(ai_messages=" | ".join(ai_messages))
findings = llm.invoke([HumanMessage(content=key_findings_prompt)])
if hasattr(findings, 'content'):
formatted_finding = {"type": "ai_summary", "content": findings.content}
memory['key_findings'].append(formatted_finding)
memory['key_findings'] = memory['key_findings'][-10:]
return memory
def get_memory_context(memory):
if not memory:
return ''
context = []
context.append('History: ' + '\n'.join(memory['conversation_history']))
if memory.get('key_findings'):
recent_findings = memory['key_findings']
findings_text = "; ".join([f["content"][:100] for f in recent_findings])
context.append(f"Recent findings: {findings_text}")
return " | ".join(context) if context else ""
class MemoryUpdate(BaseModel):
context: str | None = Field(default=None, description="User's working context if mentioned")
findings: list[dict] | None = Field(default=None, description="Any explicit findings or notes")
def update_memory_node(state, llm):
recent_messages = state["messages"][-3:]
conversation = "\n".join([f"{msg.type.upper()}: {msg.content}" for msg in recent_messages])
extractor_prompt = ChatPromptTemplate.from_messages([
("system", """You are a memory extraction assistant.
Analyze the recent conversation and extract structured updates.
- If the user explicitly asks you to remember something, add it to 'findings' as type 'user_note'.
- If the assistant states conclusions, issues, or results, add them to 'findings' as type 'ai_conclusion'.
Return ONLY valid JSON.
"""),
("human", "Conversation:\n{conversation}")
])
parser = JsonOutputParser(pydantic_object=MemoryUpdate)
# Run extractor LLM
chain = extractor_prompt | llm | parser
explicit_updates = chain.invoke({"conversation": conversation})
if hasattr(explicit_updates, 'dict'):
updates_dict = explicit_updates.dict(exclude_none=True)
else:
updates_dict = explicit_updates
updated_memory = update_memory(state, llm, explicit_update=updates_dict)
return {"memory": updated_memory}