Advanced Usage
Advanced features and configurations
Advanced Usage
Custom Metadata & Tags
import palantyra
# Set metadata for current trace
palantyra.set_trace_metadata(
environment="production",
region="us-east-1",
customer_tier="premium",
feature_flag="new_model_v2"
)
# Set span attributes
palantyra.set_span_attributes({
"cache_hit": True,
"processing_time_ms": 145,
"model_temperature": 0.7
})
Error Handling & Recording
import palantyra
@palantyra.observe(name="AI Task")
def risky_ai_operation():
try:
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Complex query"}]
)
return response
except Exception as e:
# Error is automatically recorded in span
palantyra.set_span_attributes({
"error_type": type(e).__name__,
"error_handled": True
})
raise # Re-raise to maintain error flow
Async Support
import palantyra
import asyncio
@palantyra.observe(name="Async AI Task")
async def async_ai_call():
# Works with async LLM clients
response = await async_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello"}]
)
return response
# Run async
result = asyncio.run(async_ai_call())
Batched Export Configuration
import palantyra
palantyra.initialize(
project_api_key="your-api-key",
use_batching=True, # Enable batching
batch_size=100, # Spans per batch
flush_interval=5, # Seconds between flushes
timeout=30 # Request timeout
)
Cost Tracking
Automatic cost calculation for LLM calls:
from palantyra.utils import CostCalculator
# Manual cost calculation
cost = CostCalculator.calculate_cost(
model="gpt-4",
input_tokens=100,
output_tokens=50,
provider="openai"
)
print(f"Estimated cost: ${cost:.4f}")
# Costs are automatically added to traces
# when using auto-instrumentation