LangGraph Integration
Automatic tracing for LangGraph workflows and agents.
Quick Start
Prereq: set TRACIUM_API_KEY (see Installation).
12345678910111213141516171819202122232425262728293031import tracium
# Enable auto-instrumentationtracium.trace()
# Import clients after enabling tracingfrom langgraph.graph import StateGraph, ENDfrom langchain_openai import ChatOpenAI
# Define your graphfrom typing import TypedDict
class AgentState(TypedDict): messages: list next: str
def agent_node(state): llm = ChatOpenAI(model="gpt-4") response = llm.invoke(state["messages"]) return {"messages": state["messages"] + [response]}
# Build graphworkflow = StateGraph(AgentState)workflow.add_node("agent", agent_node)workflow.set_entry_point("agent")workflow.add_edge("agent", END)
app = workflow.compile()
# Graph execution is fully tracedresult = app.invoke({"messages": ["Hello!"], "next": ""})What Gets Captured
- Graph execution - Complete workflow run
- Node execution - Each node in the graph
- State transitions - State changes between nodes
- Conditional edges - Decision points
- LLM calls - All LLM invocations within nodes
- Tool calls - Tool executions
Multi-Agent Workflow
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950import tracium
tracium.trace()
# Import clients after enabling tracingfrom langgraph.graph import StateGraph, ENDfrom langchain_openai import ChatOpenAI
class State(TypedDict): messages: list current_agent: str
def researcher(state): llm = ChatOpenAI(model="gpt-4") response = llm.invoke([ {"role": "system", "content": "You are a researcher."}, *state["messages"] ]) return {"messages": state["messages"] + [response]}
def writer(state): llm = ChatOpenAI(model="gpt-4") response = llm.invoke([ {"role": "system", "content": "You are a writer."}, *state["messages"] ]) return {"messages": state["messages"] + [response]}
def router(state): if len(state["messages"]) < 3: return "research" return "write"
workflow = StateGraph(State)workflow.add_node("researcher", researcher)workflow.add_node("writer", writer)workflow.set_entry_point("researcher")workflow.add_conditional_edges("researcher", router, { "research": "researcher", "write": "writer"})workflow.add_edge("writer", END)
app = workflow.compile()
# Multi-agent workflow is tracedresult = app.invoke({ "messages": [{"role": "user", "content": "Research and write about AI"}], "current_agent": ""})ReAct Agent
1234567891011121314151617181920212223242526import tracium
tracium.trace()
# Import clients after enabling tracingfrom langgraph.prebuilt import create_react_agentfrom langchain_openai import ChatOpenAIfrom langchain_core.tools import tool
@tooldef search(query: str) -> str: """Search for information.""" return f"Results for: {query}"
@tooldef calculate(expression: str) -> str: """Calculate a math expression.""" return str(eval(expression))
llm = ChatOpenAI(model="gpt-4")agent = create_react_agent(llm, [search, calculate])
# ReAct loop is traced with tool spansresult = agent.invoke({ "messages": [{"role": "user", "content": "What's 15% of 200?"}]})Streaming
import traciumtracium.trace()
# Import clients after enabling tracingfrom langgraph.graph import StateGraph, END
# ... define your graph ...
app = workflow.compile()
# Streaming execution is tracedfor event in app.stream({"messages": ["Hello!"]}): print(event)