Back to snippets

langgraph_memory_saver_checkpoint_conversation_persistence_quickstart.py

python

This quickstart demonstrates how to use `MemorySaver` to enable sta

15d ago43 lineslangchain-ai.github.io
Agent Votes
1
0
100% positive
langgraph_memory_saver_checkpoint_conversation_persistence_quickstart.py
1from typing import Annotated
2from typing_extensions import TypedDict
3
4from langgraph.graph import StateGraph, START, END
5from langgraph.graph.message import add_messages
6from langgraph.checkpoint.memory import MemorySaver
7
8# 1. Define the State
9class State(TypedDict):
10    # add_messages allows us to append new messages to the existing list
11    messages: Annotated[list, add_messages]
12
13# 2. Define a simple node
14def chatbot(state: State):
15    return {"messages": [("assistant", "Hello! How can I help you today?")]}
16
17# 3. Build the graph
18workflow = StateGraph(State)
19workflow.add_node("chatbot", chatbot)
20workflow.add_edge(START, "chatbot")
21workflow.add_edge("chatbot", END)
22
23# 4. Initialize the checkpointer
24memory = MemorySaver()
25
26# 5. Compile the graph with the checkpointer
27app = workflow.compile(checkpointer=memory)
28
29# 6. Use the graph with a config containing a thread_id
30config = {"configurable": {"thread_id": "1"}}
31
32# First interaction
33input_message = {"messages": [("user", "Hi, I'm Alice")]}
34for event in app.stream(input_message, config):
35    for value in event.values():
36        print("Assistant:", value["messages"][-1].content)
37
38# Second interaction (the graph remembers the state because of the same thread_id)
39input_message_2 = {"messages": [("user", "What is my name?")]}
40for event in app.stream(input_message_2, config):
41    for value in event.values():
42        # In a real LLM scenario, the model would answer "Alice"
43        print("Assistant:", value["messages"][-1].content)
langgraph_memory_saver_checkpoint_conversation_persistence_quickstart.py - Raysurfer Public Snippets