Django Integration
Automatic request tracing for Django applications.
Quick Start
Initialize Tracium in your Django settings or wsgi.py:
wsgi.pypython
12345678# wsgi.pyimport tracium # Initialize before Django application loadstracium.trace() from django.core.wsgi import get_wsgi_applicationapplication = get_wsgi_application()View Example
views.pypython
123456789101112131415161718192021222324# views.pyfrom django.http import JsonResponsefrom django.views.decorators.http import require_POSTfrom django.views.decorators.csrf import csrf_exemptfrom openai import OpenAIimport json openai_client = OpenAI() @csrf_exempt@require_POSTdef chat(request): # This view is automatically traced data = json.loads(request.body) message = data.get("message") response = openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": message}] ) return JsonResponse({ "response": response.choices[0].message.content })What Gets Captured
- Request path - URL and HTTP method
- View name - Django view function/class
- Response status - HTTP status code
- Latency - Request duration
- Child spans - LLM calls, ORM queries, etc.
Class-Based Views
123456789101112131415161718192021222324# views.pyfrom django.http import JsonResponsefrom django.views import Viewfrom django.utils.decorators import method_decoratorfrom django.views.decorators.csrf import csrf_exemptfrom openai import OpenAIimport json openai_client = OpenAI() @method_decorator(csrf_exempt, name='dispatch')class ChatView(View): def post(self, request): # Class-based views are traced automatically data = json.loads(request.body) response = openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": data["message"]}] ) return JsonResponse({ "response": response.choices[0].message.content })Manual Traces
123456789101112131415161718192021222324252627282930# views.pyimport traciumfrom django.http import JsonResponsefrom openai import OpenAIimport json openai_client = OpenAI()tracium_client = tracium.get_client() def analyze(request): data = json.loads(request.body) with tracium_client.agent_trace( agent_name="django-analyzer", tags=["django"] ) as trace: with trace.span(span_type="retrieval", name="fetch_data") as span: span.record_input({"query": data["query"]}) results = fetch_from_db(data["query"]) span.record_output({"count": len(results)}) response = openai_client.chat.completions.create( model="gpt-4", messages=[ {"role": "system", "content": f"Context: {results}"}, {"role": "user", "content": data["query"]} ] ) return JsonResponse({"result": response.choices[0].message.content})Django REST Framework
12345678910111213141516171819202122# views.pyimport traciumfrom rest_framework.views import APIViewfrom rest_framework.response import Responsefrom openai import OpenAI # Make sure tracium.trace() is called in wsgi.pyopenai_client = OpenAI() class ChatAPIView(APIView): def post(self, request): # DRF views are traced message = request.data.get("message") response = openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": message}] ) return Response({ "response": response.choices[0].message.content })Async Views (Django 4.1+)
12345678910111213141516171819# views.pyfrom django.http import JsonResponsefrom openai import AsyncOpenAIimport json openai_client = AsyncOpenAI() async def async_chat(request): # Async views are traced automatically data = json.loads(request.body) response = await openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": data["message"]}] ) return JsonResponse({ "response": response.choices[0].message.content })