Django Integration

Automatic request tracing for Django applications.

Quick Start

Initialize Tracium in your Django settings or wsgi.py:

wsgi.pypython
1
2
3
4
5
6
7
8
# wsgi.py
import tracium
# Initialize before Django application loads
tracium.trace()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()

View Example

views.pypython
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# views.py
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from openai import OpenAI
import json
openai_client = OpenAI()
@csrf_exempt
@require_POST
def chat(request):
# This view is automatically traced
data = json.loads(request.body)
message = data.get("message")
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": message}]
)
return JsonResponse({
"response": response.choices[0].message.content
})

What Gets Captured

  • Request path - URL and HTTP method
  • View name - Django view function/class
  • Response status - HTTP status code
  • Latency - Request duration
  • Child spans - LLM calls, ORM queries, etc.

Class-Based Views

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# views.py
from django.http import JsonResponse
from django.views import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from openai import OpenAI
import json
openai_client = OpenAI()
@method_decorator(csrf_exempt, name='dispatch')
class ChatView(View):
def post(self, request):
# Class-based views are traced automatically
data = json.loads(request.body)
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": data["message"]}]
)
return JsonResponse({
"response": response.choices[0].message.content
})

Manual Traces

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# views.py
import tracium
from django.http import JsonResponse
from openai import OpenAI
import json
openai_client = OpenAI()
tracium_client = tracium.get_client()
def analyze(request):
data = json.loads(request.body)
with tracium_client.agent_trace(
agent_name="django-analyzer",
tags=["django"]
) as trace:
with trace.span(span_type="retrieval", name="fetch_data") as span:
span.record_input({"query": data["query"]})
results = fetch_from_db(data["query"])
span.record_output({"count": len(results)})
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": f"Context: {results}"},
{"role": "user", "content": data["query"]}
]
)
return JsonResponse({"result": response.choices[0].message.content})

Django REST Framework

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# views.py
import tracium
from rest_framework.views import APIView
from rest_framework.response import Response
from openai import OpenAI
# Make sure tracium.trace() is called in wsgi.py
openai_client = OpenAI()
class ChatAPIView(APIView):
def post(self, request):
# DRF views are traced
message = request.data.get("message")
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": message}]
)
return Response({
"response": response.choices[0].message.content
})

Async Views (Django 4.1+)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# views.py
from django.http import JsonResponse
from openai import AsyncOpenAI
import json
openai_client = AsyncOpenAI()
async def async_chat(request):
# Async views are traced automatically
data = json.loads(request.body)
response = await openai_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": data["message"]}]
)
return JsonResponse({
"response": response.choices[0].message.content
})