LangChain Integration
Automatic tracing for LangChain chains, agents, and tools.
Quick Start
Prereq: set TRACIUM_API_KEY (see Installation).
123456789101112131415161718192021import tracium
# Enable auto-instrumentationtracium.trace()
# Import clients after enabling tracingfrom langchain_openai import ChatOpenAIfrom langchain_core.prompts import ChatPromptTemplate
# Use LangChain normally - all operations are tracedllm = ChatOpenAI(model="gpt-4")
prompt = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant."), ("user", "{input}")])
chain = prompt | llm
# The entire chain execution is tracedresponse = chain.invoke({"input": "Hello!"})What Gets Captured
- Chain execution - Full chain run with inputs/outputs
- LLM calls - All LLM invocations within chains
- Tool calls - Tool/function executions
- Retrievers - Vector store queries
- Prompts - Formatted prompt templates
- Token usage - For supported LLM providers
RAG Chains
1234567891011121314151617181920212223242526272829import tracium
tracium.trace()
# Import clients after enabling tracingfrom langchain_openai import ChatOpenAI, OpenAIEmbeddingsfrom langchain_community.vectorstores import FAISSfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthrough
# Setup retrieverembeddings = OpenAIEmbeddings()vectorstore = FAISS.from_texts(documents, embeddings)retriever = vectorstore.as_retriever()
# Setup chainprompt = ChatPromptTemplate.from_template( "Answer based on context: {context}\n\nQuestion: {question}")llm = ChatOpenAI(model="gpt-4")
chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | llm)
# Full RAG pipeline is traced with retrieval and LLM spansresponse = chain.invoke("What is machine learning?")Agents
123456789101112131415161718192021222324252627282930313233import tracium
tracium.trace()
# Import clients after enabling tracingfrom langchain_openai import ChatOpenAIfrom langchain.agents import create_openai_tools_agent, AgentExecutorfrom langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderfrom langchain_core.tools import tool
@tooldef search(query: str) -> str: """Search the web for information.""" return f"Results for: {query}"
@tooldef calculator(expression: str) -> str: """Evaluate a math expression.""" return str(eval(expression))
llm = ChatOpenAI(model="gpt-4")
prompt = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant."), ("user", "{input}"), MessagesPlaceholder("agent_scratchpad"),])
agent = create_openai_tools_agent(llm, [search, calculator], prompt)executor = AgentExecutor(agent=agent, tools=[search, calculator])
# Agent execution is traced with tool call spansresult = executor.invoke({"input": "What is 2+2?"})LCEL (LangChain Expression Language)
123456789101112131415161718192021import tracium
tracium.trace()
# Import clients after enabling tracingfrom langchain_openai import ChatOpenAIfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParser
# Complex LCEL chainchain = ( ChatPromptTemplate.from_template("Translate to French: {text}") | ChatOpenAI(model="gpt-4") | StrOutputParser() | ChatPromptTemplate.from_template("Now translate to Spanish: {text}") | ChatOpenAI(model="gpt-4") | StrOutputParser())
# Each step in the chain is tracedresult = chain.invoke({"text": "Hello, world!"})Disabling LangChain Tracing
import tracium
# Disable LangChain auto-instrumentation if neededclient = tracium.init( api_key="sk_live_...", auto_instrument_langchain=False, # Disable auto_instrument_llm_clients=True, # Keep LLM tracing)