LangChain Integration

Automatic tracing for LangChain chains, agents, and tools.

Quick Start

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import tracium
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
# Enable auto-instrumentation
tracium.trace()
# Use LangChain normally - all operations are traced
llm = ChatOpenAI(model="gpt-4")
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("user", "{input}")
])
chain = prompt | llm
# The entire chain execution is traced
response = chain.invoke({"input": "Hello!"})

What Gets Captured

  • Chain execution - Full chain run with inputs/outputs
  • LLM calls - All LLM invocations within chains
  • Tool calls - Tool/function executions
  • Retrievers - Vector store queries
  • Prompts - Formatted prompt templates
  • Token usage - For supported LLM providers

RAG Chains

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import tracium
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
tracium.trace()
# Setup retriever
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_texts(documents, embeddings)
retriever = vectorstore.as_retriever()
# Setup chain
prompt = ChatPromptTemplate.from_template(
"Answer based on context: {context}\n\nQuestion: {question}"
)
llm = ChatOpenAI(model="gpt-4")
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| llm
)
# Full RAG pipeline is traced with retrieval and LLM spans
response = chain.invoke("What is machine learning?")

Agents

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import tracium
from langchain_openai import ChatOpenAI
from langchain.agents import create_openai_tools_agent, AgentExecutor
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.tools import tool
tracium.trace()
@tool
def search(query: str) -> str:
"""Search the web for information."""
return f"Results for: {query}"
@tool
def calculator(expression: str) -> str:
"""Evaluate a math expression."""
return str(eval(expression))
llm = ChatOpenAI(model="gpt-4")
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("user", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
])
agent = create_openai_tools_agent(llm, [search, calculator], prompt)
executor = AgentExecutor(agent=agent, tools=[search, calculator])
# Agent execution is traced with tool call spans
result = executor.invoke({"input": "What is 2+2?"})

LCEL (LangChain Expression Language)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import tracium
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
tracium.trace()
# Complex LCEL chain
chain = (
ChatPromptTemplate.from_template("Translate to French: {text}")
| ChatOpenAI(model="gpt-4")
| StrOutputParser()
| ChatPromptTemplate.from_template("Now translate to Spanish: {text}")
| ChatOpenAI(model="gpt-4")
| StrOutputParser()
)
# Each step in the chain is traced
result = chain.invoke({"text": "Hello, world!"})

Disabling LangChain Tracing

import tracium

# Disable LangChain auto-instrumentation if needed
client = tracium.init(
    api_key="sk_live_...",
    auto_instrument_langchain=False,  # Disable
    auto_instrument_llm_clients=True,  # Keep LLM tracing
)