LangGraph Integration

Automatic tracing for LangGraph workflows and agents.

Quick Start

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import tracium
from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
# Enable auto-instrumentation
tracium.trace()
# Define your graph
from typing import TypedDict
class AgentState(TypedDict):
messages: list
next: str
def agent_node(state):
llm = ChatOpenAI(model="gpt-4")
response = llm.invoke(state["messages"])
return {"messages": state["messages"] + [response]}
# Build graph
workflow = StateGraph(AgentState)
workflow.add_node("agent", agent_node)
workflow.set_entry_point("agent")
workflow.add_edge("agent", END)
app = workflow.compile()
# Graph execution is fully traced
result = app.invoke({"messages": ["Hello!"], "next": ""})

What Gets Captured

  • Graph execution - Complete workflow run
  • Node execution - Each node in the graph
  • State transitions - State changes between nodes
  • Conditional edges - Decision points
  • LLM calls - All LLM invocations within nodes
  • Tool calls - Tool executions

Multi-Agent Workflow

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import tracium
from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
tracium.trace()
class State(TypedDict):
messages: list
current_agent: str
def researcher(state):
llm = ChatOpenAI(model="gpt-4")
response = llm.invoke([
{"role": "system", "content": "You are a researcher."},
*state["messages"]
])
return {"messages": state["messages"] + [response]}
def writer(state):
llm = ChatOpenAI(model="gpt-4")
response = llm.invoke([
{"role": "system", "content": "You are a writer."},
*state["messages"]
])
return {"messages": state["messages"] + [response]}
def router(state):
if len(state["messages"]) < 3:
return "research"
return "write"
workflow = StateGraph(State)
workflow.add_node("researcher", researcher)
workflow.add_node("writer", writer)
workflow.set_entry_point("researcher")
workflow.add_conditional_edges("researcher", router, {
"research": "researcher",
"write": "writer"
})
workflow.add_edge("writer", END)
app = workflow.compile()
# Multi-agent workflow is traced
result = app.invoke({
"messages": [{"role": "user", "content": "Research and write about AI"}],
"current_agent": ""
})

ReAct Agent

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import tracium
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool
tracium.trace()
@tool
def search(query: str) -> str:
"""Search for information."""
return f"Results for: {query}"
@tool
def calculate(expression: str) -> str:
"""Calculate a math expression."""
return str(eval(expression))
llm = ChatOpenAI(model="gpt-4")
agent = create_react_agent(llm, [search, calculate])
# ReAct loop is traced with tool spans
result = agent.invoke({
"messages": [{"role": "user", "content": "What's 15% of 200?"}]
})

Streaming

import tracium
from langgraph.graph import StateGraph, END

tracium.trace()

# ... define your graph ...

app = workflow.compile()

# Streaming execution is traced
for event in app.stream({"messages": ["Hello!"]}):
    print(event)