Flask Integration
Automatic request tracing for Flask applications.
Quick Start
app.pypython
123456789101112131415161718192021222324import traciumfrom flask import Flask, requestfrom openai import OpenAI # Initialize Tracium FIRSTtracium.trace() app = Flask(__name__)openai_client = OpenAI() @app.route("/chat", methods=["POST"])def chat(): # This request is automatically traced message = request.json["message"] response = openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": message}] ) return {"response": response.choices[0].message.content} if __name__ == "__main__": app.run(debug=True)What Gets Captured
- Request path - URL and HTTP method
- Endpoint name - Flask endpoint/function name
- Response status - HTTP status code
- Latency - Request duration
- Child spans - LLM calls, database queries, etc.
Manual Traces
1234567891011121314151617181920212223242526272829import traciumfrom flask import Flask, requestfrom openai import OpenAI tracium.trace() app = Flask(__name__)openai_client = OpenAI()tracium_client = tracium.get_client() @app.route("/analyze", methods=["POST"])def analyze(): text = request.json["text"] with tracium_client.agent_trace( agent_name="flask-analyzer", tags=["flask", "analyze"] ) as trace: with trace.span(span_type="tool", name="preprocess") as span: span.record_input({"text": text}) processed = preprocess(text) span.record_output({"processed": processed}) response = openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": processed}] ) return {"result": response.choices[0].message.content}Blueprints
12345678910111213141516171819202122import traciumfrom flask import Flask, Blueprintfrom openai import OpenAI tracium.trace() # Blueprint routes are traced automaticallyapi = Blueprint("api", __name__, url_prefix="/api")openai_client = OpenAI() @api.route("/chat", methods=["POST"])def chat(): from flask import request message = request.json["message"] response = openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": message}] ) return {"response": response.choices[0].message.content} app = Flask(__name__)app.register_blueprint(api)Error Handling
1234567891011121314151617181920212223242526import traciumfrom flask import Flask, jsonifyfrom openai import OpenAI, RateLimitError tracium.trace() app = Flask(__name__)openai_client = OpenAI() @app.errorhandler(500)def handle_error(error): # Errors are captured in the trace return jsonify({"error": str(error)}), 500 @app.route("/chat", methods=["POST"])def chat(): try: from flask import request response = openai_client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": request.json["message"]}] ) return {"response": response.choices[0].message.content} except RateLimitError as e: # Exception is recorded in trace return jsonify({"error": "Rate limited"}), 429With Gunicorn
# Run with gunicorn
gunicorn -w 4 app:app