FastAPI Integration
Automatic request tracing for FastAPI applications.
Quick Start
main.pypython
12345678910111213141516171819import traciumfrom fastapi import FastAPIfrom openai import OpenAI # Initialize Tracium FIRSTtracium.trace() app = FastAPI()openai_client = OpenAI() @app.post("/chat")async def chat(message: str): # This request is automatically traced # LLM calls within are captured as child spans response = openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": message}] ) return {"response": response.choices[0].message.content}What Gets Captured
- Request path - URL and HTTP method
- Route name - FastAPI route name/function
- Request headers - Selected headers (configurable)
- Response status - HTTP status code
- Latency - Total request duration
- Child spans - LLM calls, DB queries, etc.
Manual Traces in Endpoints
1234567891011121314151617181920212223242526272829303132import traciumfrom fastapi import FastAPIfrom openai import OpenAI tracium.trace() app = FastAPI()openai_client = OpenAI()tracium_client = tracium.get_client() @app.post("/analyze")async def analyze(text: str): # Create a manual trace for more control with tracium_client.agent_trace( agent_name="analyzer", tags=["fastapi", "analyze"] ) as trace: # Custom span for preprocessing with trace.span(span_type="tool", name="preprocess") as span: span.record_input({"text": text}) processed = preprocess(text) span.record_output({"processed": processed}) # LLM call is captured as child span response = openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": processed}] ) trace.set_summary({"result": "success"}) return {"analysis": response.choices[0].message.content}Async Endpoints
12345678910111213141516171819202122import traciumfrom fastapi import FastAPIfrom openai import AsyncOpenAIimport asyncio tracium.trace() app = FastAPI()client = AsyncOpenAI() @app.post("/batch")async def batch_process(prompts: list[str]): # Parallel async calls are traced correctly tasks = [ client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": p}] ) for p in prompts ] responses = await asyncio.gather(*tasks) return {"results": [r.choices[0].message.content for r in responses]}Streaming Responses
1234567891011121314151617181920212223import traciumfrom fastapi import FastAPIfrom fastapi.responses import StreamingResponsefrom openai import OpenAI tracium.trace() app = FastAPI()client = OpenAI() @app.post("/stream")async def stream_chat(message: str): def generate(): stream = client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": message}], stream=True ) for chunk in stream: if chunk.choices[0].delta.content: yield chunk.choices[0].delta.content return StreamingResponse(generate(), media_type="text/plain")With Background Tasks
123456789101112131415161718192021import traciumfrom fastapi import FastAPI, BackgroundTasksfrom openai import OpenAI tracium.trace() app = FastAPI()client = OpenAI() def process_in_background(data: str): # Background task is traced with context propagation response = client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": data}] ) save_result(response) @app.post("/async-process")async def async_process(data: str, background_tasks: BackgroundTasks): background_tasks.add_task(process_in_background, data) return {"status": "processing"}Running with Uvicorn
# Run your FastAPI app
uvicorn main:app --reload