LangGraph Integration
Automatic tracing for LangGraph workflows and agents.
Quick Start
1234567891011121314151617181920212223242526272829import traciumfrom langgraph.graph import StateGraph, ENDfrom langchain_openai import ChatOpenAI # Enable auto-instrumentationtracium.trace() # Define your graphfrom typing import TypedDict class AgentState(TypedDict): messages: list next: str def agent_node(state): llm = ChatOpenAI(model="gpt-4") response = llm.invoke(state["messages"]) return {"messages": state["messages"] + [response]} # Build graphworkflow = StateGraph(AgentState)workflow.add_node("agent", agent_node)workflow.set_entry_point("agent")workflow.add_edge("agent", END) app = workflow.compile() # Graph execution is fully tracedresult = app.invoke({"messages": ["Hello!"], "next": ""})What Gets Captured
- Graph execution - Complete workflow run
- Node execution - Each node in the graph
- State transitions - State changes between nodes
- Conditional edges - Decision points
- LLM calls - All LLM invocations within nodes
- Tool calls - Tool executions
Multi-Agent Workflow
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748import traciumfrom langgraph.graph import StateGraph, ENDfrom langchain_openai import ChatOpenAI tracium.trace() class State(TypedDict): messages: list current_agent: str def researcher(state): llm = ChatOpenAI(model="gpt-4") response = llm.invoke([ {"role": "system", "content": "You are a researcher."}, *state["messages"] ]) return {"messages": state["messages"] + [response]} def writer(state): llm = ChatOpenAI(model="gpt-4") response = llm.invoke([ {"role": "system", "content": "You are a writer."}, *state["messages"] ]) return {"messages": state["messages"] + [response]} def router(state): if len(state["messages"]) < 3: return "research" return "write" workflow = StateGraph(State)workflow.add_node("researcher", researcher)workflow.add_node("writer", writer)workflow.set_entry_point("researcher")workflow.add_conditional_edges("researcher", router, { "research": "researcher", "write": "writer"})workflow.add_edge("writer", END) app = workflow.compile() # Multi-agent workflow is tracedresult = app.invoke({ "messages": [{"role": "user", "content": "Research and write about AI"}], "current_agent": ""})ReAct Agent
123456789101112131415161718192021222324import traciumfrom langgraph.prebuilt import create_react_agentfrom langchain_openai import ChatOpenAIfrom langchain_core.tools import tool tracium.trace() @tooldef search(query: str) -> str: """Search for information.""" return f"Results for: {query}" @tooldef calculate(expression: str) -> str: """Calculate a math expression.""" return str(eval(expression)) llm = ChatOpenAI(model="gpt-4")agent = create_react_agent(llm, [search, calculate]) # ReAct loop is traced with tool spansresult = agent.invoke({ "messages": [{"role": "user", "content": "What's 15% of 200?"}]})Streaming
import tracium
from langgraph.graph import StateGraph, END
tracium.trace()
# ... define your graph ...
app = workflow.compile()
# Streaming execution is traced
for event in app.stream({"messages": ["Hello!"]}):
print(event)