Configuration

Advanced configuration options for the Tracium SDK.

TraciumClientConfig

The configuration class for advanced client settings.

from tracium import TraciumClientConfig

config = TraciumClientConfig(
    base_url: str = "https://api.tracium.ai",  # API endpoint
    timeout: float = 30.0,                      # Request timeout (seconds)
    user_agent: str | None = None,              # Custom user agent
    retry_config: RetryConfig | None = None,    # Retry configuration
    fail_open: bool = True,                     # Continue on errors
    
    # Queue settings
    max_queue_size: int = 10000,                # Max events in queue
    block_on_full_queue: bool = False,          # Wait when queue is full
    queue_timeout: float = 5.0,                 # Max wait time when blocking
    queue_warning_threshold: float = 0.8,       # Warn at 80% capacity
)

RetryConfig

Configure retry behavior for failed API requests.

from tracium import RetryConfig

retry_config = RetryConfig(
    max_retries: int = 3,           # Maximum retry attempts
    backoff_factor: float = 1.0,    # Exponential backoff multiplier
    retry_statuses: set[int] = {500, 502, 503, 504},  # Status codes to retry
)

Usage

import tracium
from tracium import TraciumClientConfig, RetryConfig

config = TraciumClientConfig(
    retry_config=RetryConfig(
        max_retries=5,
        backoff_factor=2.0,  # Wait 2s, 4s, 8s, 16s, 32s between retries
    )
)

client = tracium.init(config=config)

Environment Variables

Configure Tracium using environment variables:

VariableDescriptionDefault
TRACIUM_API_KEYYour Tracium API keyNone (required)
TRACIUM_BASE_URLCustom API endpointhttps://api.tracium.ai
.envbash
# .env file
TRACIUM_API_KEY=sk_live_your_api_key
TRACIUM_BASE_URL=https://api.tracium.ai

Queue Management

Tracium uses a background queue to send telemetry without blocking your application.

High-Volume Configuration

import tracium
from tracium import TraciumClientConfig

# For applications generating many events
config = TraciumClientConfig(
    max_queue_size=50000,  # Increase queue size
)

client = tracium.init(config=config)

Prevent Event Loss

# Enable blocking mode to prevent event loss
config = TraciumClientConfig(
    block_on_full_queue=True,   # Wait instead of dropping
    queue_timeout=30.0,          # Max wait time
)

# Warning: This may slow your application if queue fills up

Monitoring Queue Health

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import tracium
tracium.init()
# Get queue statistics
stats = tracium.get_queue_stats()
print(f"Queue: {stats['queue_size']}/{stats['max_queue_size']}")
print(f"Capacity: {stats['capacity_percent']:.1f}%")
print(f"Healthy: {stats['is_healthy']}")
print(f"Dropped: {stats['total_dropped']}")
# Alert on issues
if stats['total_dropped'] > 0:
print(f"Warning: {stats['total_dropped']} events dropped!")
if stats['capacity_percent'] > 80:
print("Warning: Queue approaching capacity!")

Logging Configuration

Configure Tracium's logging output for debugging.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import tracium
from tracium import configure_logging
import logging
# Enable debug logging
configure_logging(level=logging.DEBUG)
# Now initialize - you'll see detailed logs
client = tracium.init()
# Logs include:
# - API request/response details (sensitive data redacted)
# - Retry attempts
# - Instrumentation activity
# - Context propagation

Multi-Tenant Configuration

Set tenant context for multi-tenant applications.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import tracium
from tracium import set_tenant, get_current_tenant
tracium.init()
# Set tenant for current context
set_tenant("tenant_123")
# All traces/spans now include tenant_id
with tracium.start_trace(agent_name="api") as trace:
# This trace has tenant_id="tenant_123"
pass
# Get current tenant
current = get_current_tenant() # "tenant_123"

Default Options

Set default values that apply to all traces.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import tracium
client = tracium.init(
api_key="sk_live_...",
# These defaults apply to all auto-traces
default_agent_name="my-app",
default_model_id="gpt-4",
default_version="1.2.3",
default_tags=["production", "us-east-1"],
default_metadata={
"environment": "production",
"region": "us-east-1",
},
)
# Auto-traced calls use these defaults
from openai import OpenAI
openai = OpenAI()
# This call is traced with agent_name="my-app", tags=["production", "us-east-1"]
response = openai.chat.completions.create(...)

Complete Example

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import tracium
from tracium import TraciumClientConfig, RetryConfig, configure_logging
import logging
# Enable debug logging during development
configure_logging(level=logging.DEBUG)
# Create comprehensive configuration
config = TraciumClientConfig(
base_url="https://api.tracium.ai",
timeout=30.0,
retry_config=RetryConfig(
max_retries=3,
backoff_factor=1.5,
),
max_queue_size=20000,
block_on_full_queue=False, # Don't block in production
queue_warning_threshold=0.9,
)
# Initialize with all options
client = tracium.init(
api_key="sk_live_...",
config=config,
default_agent_name="my-service",
default_version="2.0.0",
default_tags=["production"],
auto_instrument_llm_clients=True,
auto_instrument_langchain=True,
auto_instrument_langgraph=True,
)
# Enable auto-instrumentation
client.trace()