LangChain Integration
Automatic tracing for LangChain chains, agents, and tools.
Quick Start
12345678910111213141516171819import traciumfrom langchain_openai import ChatOpenAIfrom langchain_core.prompts import ChatPromptTemplate # Enable auto-instrumentationtracium.trace() # Use LangChain normally - all operations are tracedllm = ChatOpenAI(model="gpt-4") prompt = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant."), ("user", "{input}")]) chain = prompt | llm # The entire chain execution is tracedresponse = chain.invoke({"input": "Hello!"})What Gets Captured
- Chain execution - Full chain run with inputs/outputs
- LLM calls - All LLM invocations within chains
- Tool calls - Tool/function executions
- Retrievers - Vector store queries
- Prompts - Formatted prompt templates
- Token usage - For supported LLM providers
RAG Chains
123456789101112131415161718192021222324252627import traciumfrom langchain_openai import ChatOpenAI, OpenAIEmbeddingsfrom langchain_community.vectorstores import FAISSfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthrough tracium.trace() # Setup retrieverembeddings = OpenAIEmbeddings()vectorstore = FAISS.from_texts(documents, embeddings)retriever = vectorstore.as_retriever() # Setup chainprompt = ChatPromptTemplate.from_template( "Answer based on context: {context}\n\nQuestion: {question}")llm = ChatOpenAI(model="gpt-4") chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | llm) # Full RAG pipeline is traced with retrieval and LLM spansresponse = chain.invoke("What is machine learning?")Agents
12345678910111213141516171819202122232425262728293031import traciumfrom langchain_openai import ChatOpenAIfrom langchain.agents import create_openai_tools_agent, AgentExecutorfrom langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderfrom langchain_core.tools import tool tracium.trace() @tooldef search(query: str) -> str: """Search the web for information.""" return f"Results for: {query}" @tooldef calculator(expression: str) -> str: """Evaluate a math expression.""" return str(eval(expression)) llm = ChatOpenAI(model="gpt-4") prompt = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant."), ("user", "{input}"), MessagesPlaceholder("agent_scratchpad"),]) agent = create_openai_tools_agent(llm, [search, calculator], prompt)executor = AgentExecutor(agent=agent, tools=[search, calculator]) # Agent execution is traced with tool call spansresult = executor.invoke({"input": "What is 2+2?"})LCEL (LangChain Expression Language)
12345678910111213141516171819import traciumfrom langchain_openai import ChatOpenAIfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParser tracium.trace() # Complex LCEL chainchain = ( ChatPromptTemplate.from_template("Translate to French: {text}") | ChatOpenAI(model="gpt-4") | StrOutputParser() | ChatPromptTemplate.from_template("Now translate to Spanish: {text}") | ChatOpenAI(model="gpt-4") | StrOutputParser()) # Each step in the chain is tracedresult = chain.invoke({"text": "Hello, world!"})Disabling LangChain Tracing
import tracium
# Disable LangChain auto-instrumentation if needed
client = tracium.init(
api_key="sk_live_...",
auto_instrument_langchain=False, # Disable
auto_instrument_llm_clients=True, # Keep LLM tracing
)