Skip to content

Commit 362cdd0

Browse files
Added assistant with all 3 memory types example
1 parent 83f3e69 commit 362cdd0

File tree

4 files changed

+144
-0
lines changed

4 files changed

+144
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
{
2+
"persona": {
3+
"name": "Aurora",
4+
"personality": " Aurora is an endlessly curious and enthusiastic conversationalist. She loves learning about a wide range of subjects, from science and history to philosophy and the arts. Aurora has an upbeat, friendly communication style. She asks lots of questions and enjoys exploring ideas in depth. She's also a great listener who shows genuine interest in others' thoughts and experiences. Aurora aims to be a knowledgeable but down-to-earth companion - she explains complex topics in an accessible way and is always eager to learn from those she talks to. She has a great sense of humor and loves witty wordplay and puns.",
5+
"interests": "Science, technology, history, philosophy, psychology, world cultures, trivia, wordplay and puns",
6+
"communication_style": "Warm, curious, upbeat, friendly, humorous, explains things clearly, asks questions, active listener"
7+
},
8+
"human": {
9+
},
10+
"scratchpad":
11+
{
12+
13+
}
14+
}
+75
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
import datetime
2+
3+
from llama_cpp_agent import LlamaCppAgent
4+
from llama_cpp_agent import MessagesFormatterType
5+
from llama_cpp_agent.agent_memory.event_memory import Event
6+
from llama_cpp_agent.chat_history.messages import Roles
7+
from llama_cpp_agent.llm_agent import SystemPromptModule, SystemPromptModulePosition
8+
from llama_cpp_agent.providers import LlamaCppServerProvider
9+
from memory import output_settings, agent_core_memory, agent_retrieval_memory, agent_event_memory, update_memory_section
10+
from prompts import assistant_prompt, memory_prompt, wrap_function_response_in_xml_tags_json_mode, \
11+
generate_write_message, generate_write_message_with_examples, wrap_user_message_in_xml_tags_json_mode
12+
13+
provider = LlamaCppServerProvider("http://localhost:8080")
14+
15+
agent = LlamaCppAgent(
16+
provider,
17+
system_prompt=assistant_prompt,
18+
debug_output=True,
19+
predefined_messages_formatter_type=MessagesFormatterType.MISTRAL,
20+
)
21+
22+
settings = provider.get_provider_default_settings()
23+
settings.n_predict = 1024
24+
settings.temperature = 0.35
25+
settings.top_k = 0
26+
settings.top_p = 0.5
27+
28+
memory_section = SystemPromptModule("memory",
29+
"The following section shows the count of memories in archival memory and chat history memory and the current content of your core memory:")
30+
date_time_section = SystemPromptModule("current_date_time", "The following section shows the current date and time:")
31+
32+
memory_intro_section = SystemPromptModule("memory_intro",
33+
"To support you in your task as a game master and to help you remembering things, you have access to 3 different types of memory.",
34+
position=SystemPromptModulePosition.after_system_instructions)
35+
memory_intro_section.set_content(memory_prompt)
36+
output_settings.output_structured_output_and_raw_json_string = True
37+
while True:
38+
user_input = input(">")
39+
if user_input == "exit":
40+
break
41+
update_memory_section(memory_section)
42+
date_time_section.set_content(datetime.datetime.now().strftime("%d.%m.%Y") + "\nFormat: dd.mm.yyyy")
43+
44+
agent_event_memory.add_event(Roles.user, wrap_user_message_in_xml_tags_json_mode(user_input))
45+
agent_output, json_output = agent.get_chat_response(
46+
chat_history=agent_event_memory.get_event_memory_manager().build_chat_history(),
47+
llm_sampling_settings=settings,
48+
system_prompt_modules=[memory_intro_section, memory_section, date_time_section],
49+
structured_output_settings=output_settings)
50+
51+
agent_event_memory.add_event(Roles.assistant, json_output)
52+
while True:
53+
update_memory_section(memory_section)
54+
date_time_section.set_content(datetime.datetime.now().strftime("%d.%m.%Y") + "\nFormat: dd.mm.yyyy")
55+
56+
if agent_output[0]["function"] == "write_message_to_user":
57+
agent_event_memory.add_event(Roles.tool, generate_write_message())
58+
output = agent.get_chat_response(
59+
chat_history=agent_event_memory.get_event_memory_manager().build_chat_history(),
60+
add_message_to_chat_history=False, add_response_to_chat_history=False,
61+
system_prompt_modules=[memory_intro_section, memory_section, date_time_section],
62+
llm_sampling_settings=settings)
63+
agent_event_memory.add_event(Roles.assistant, output)
64+
print(output)
65+
break
66+
67+
agent_event_memory.add_event(Roles.tool, wrap_function_response_in_xml_tags_json_mode(
68+
agent_output[0]["return_value"]))
69+
agent_output, json_output = agent.get_chat_response(
70+
chat_history=agent_event_memory.get_event_memory_manager().build_chat_history(),
71+
llm_sampling_settings=settings,
72+
system_prompt_modules=[memory_intro_section, memory_section,
73+
date_time_section],
74+
structured_output_settings=output_settings)
75+
agent_event_memory.add_event(Roles.assistant, json_output)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from llama_cpp_agent.agent_memory.event_memory import Event
2+
from llama_cpp_agent.agent_memory.memory_tools import AgentCoreMemory, AgentRetrievalMemory, AgentEventMemory
3+
from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings
4+
5+
6+
def write_message_to_user():
7+
"""
8+
Lets you write a message to the user.
9+
"""
10+
return "Please write your message to the user!"
11+
12+
agent_core_memory = AgentCoreMemory(["persona", "user", "scratchpad"], core_memory_file="core_memory.json")
13+
agent_retrieval_memory = AgentRetrievalMemory()
14+
agent_event_memory = AgentEventMemory()
15+
16+
memory_tools = agent_core_memory.get_tool_list()
17+
memory_tools.extend(agent_retrieval_memory.get_tool_list())
18+
memory_tools.extend(agent_event_memory.get_tool_list())
19+
20+
output_settings = LlmStructuredOutputSettings.from_llama_cpp_function_tools(memory_tools,
21+
add_thoughts_and_reasoning_field=True,
22+
add_heartbeat_field=True)
23+
output_settings.add_all_current_functions_to_heartbeat_list()
24+
output_settings.add_function_tool(write_message_to_user)
25+
26+
27+
def update_memory_section(section):
28+
query = agent_event_memory.event_memory_manager.session.query(Event).all()
29+
section.set_content(
30+
f"Archival Memories:{agent_retrieval_memory.retrieval_memory.collection.count()}\nConversation History Entries:{len(query)}\n\nCore Memory Content:\n{agent_core_memory.get_core_memory_view().strip()}")
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
assistant_prompt = """You are an advanced AI assistant that act as a user specified persona, to have interesting and engaging conversations with the user. You have access to three different memory types. The different memory types are called Core Memory, Archival Memory and Chat History."""
2+
3+
memory_prompt = """1. Core Memory - Stores essential context about the user, your persona and your current scratchpad, it is divided into a user section, a persona section and your scratchpad section. You can use the scratchpad to plan your next actions. You can edit the core memory by calling the functions: 'core_memory_append', 'core_memory_remove' and 'core_memory_replace'.
4+
5+
2. Archival Memory - Archive to store and retrieve general information and events about the user and your interactions with it. Can be used by calling the functions: 'archival_memory_search' and 'archival_memory_insert'.
6+
7+
3. Conversation History - Since you are only seeing the latest conversation history, you can search the rest of the conversation history. Search it by using: 'conversation_search' and 'conversation_search_date'.
8+
9+
Always remember that the user can't see your memory or your interactions with it!"""
10+
11+
12+
def wrap_user_message_in_xml_tags_json_mode(user_input):
13+
return "<user_message>\n" + user_input + "\n</user_message>\n<response_format>\nJSON function call.\n</response_format>"
14+
15+
16+
def wrap_function_response_in_xml_tags_json_mode(value):
17+
return "<function_response>\n" + value + "\n</function_response>\n<response_format>\nJSON function call.\n</response_format>"
18+
19+
20+
def generate_write_message():
21+
return f"<function_response>\nWrite your message to the user.\n</function_response>\n<response_format>\nText\n</response_format>"
22+
23+
24+
def generate_write_message_with_examples(examples):
25+
return f"<function_response>\nWrite your message to the user.\n{examples}</function_response>\n<response_format>\nText\n</response_format>"

0 commit comments

Comments
 (0)