Docs
Integrations
Python SDK

Python SDK

The official Ants Platform Python SDK for monitoring AI agents and LLM applications.

Installation

pip install ants-platform
 
# With framework integrations
pip install ants-platform[langchain]
pip install ants-platform[openai]

Quick Start

from ants_platform import AntsPlatform
import os
 
# Initialize
client = AntsPlatform(
    public_key=os.getenv('ANTS_PLATFORM_PUBLIC_KEY'),
    secret_key=os.getenv('ANTS_PLATFORM_SECRET_KEY'),
    host=os.getenv('ANTS_PLATFORM_HOST', 'https://api.agenticants.ai')
)
 
# Create a trace using context manager
with client.start_as_current_span(
    name='customer-support-agent',
    input={'user_input': 'Help me with my order'},
    metadata={'agent': 'customer-support'}
) as trace_span:
    # Your agent logic
    result = process_query('Help me with my order')
    
    # Update trace with output
    trace_span.update(output={'result': result})
    client.update_current_trace(
        status='success',
        output_data={'result': result}
    )

Core Features

Tracing

from ants_platform import AntsPlatform
 
client = AntsPlatform(
    public_key=os.getenv('ANTS_PLATFORM_PUBLIC_KEY'),
    secret_key=os.getenv('ANTS_PLATFORM_SECRET_KEY')
)
 
# Start a trace using context manager
with client.start_as_current_span(
    name='agent-execution',
    input={'user_query': user_query},
    metadata={
        'user_id': 'user_123',
        'session_id': 'session_abc',
        'agent': 'my-agent'
    }
) as trace_span:
    # Update trace with user and tags
    client.update_current_trace(
        user_id='user_123',
        tags=['agent:my-agent', 'service:my-service', 'version:1.0.0']
    )
    
    # Create spans for detailed tracking
    validation_span = client.create_span(
        trace_id=None,  # Uses current trace context
        name='request_validation',
        input_data={'raw_input': user_query},
        output_data={'validated_input': user_query, 'input_length': len(user_query)},
        metadata={'step': 'validation'}
    )
    
    # Create generation span for LLM calls
    generation = client.create_generation(
        trace_id=None,  # Uses current trace context
        name='gpt-4_llm_call',
        model='gpt-4',
        prompt=user_query,
        completion=llm_response,
        input_tokens=input_tokens,
        output_tokens=output_tokens,
        metadata={
            'step': 'llm_call',
            'status': 'success',
            'agent': 'my-agent'
        }
    )
    
    # Update trace with final results
    client.update_current_trace(
        status='success',
        output_data={'result': llm_response},
        metadata={
            'input_tokens': input_tokens,
            'output_tokens': output_tokens,
            'total_tokens': input_tokens + output_tokens,
            'status': 'success'
        }
    )
    
    trace_span.update(output={'result': llm_response})

Error Handling

try:
    with client.start_as_current_span(
        name='agent-process',
        input={'query': query}
    ) as trace_span:
        result = agent.process(query)
        
        client.update_current_trace(
            status='success',
            output_data={'result': result}
        )
        trace_span.update(output={'result': result})
        
except Exception as error:
    # Log error span
    client.create_span(
        trace_id=None,
        name='error_handling',
        input_data={'error': str(error)},
        output_data={'error_response': str(error)},
        metadata={
            'error_type': type(error).__name__,
            'step': 'error_handling'
        }
    )
    
    client.update_current_trace(
        status='error',
        output_data={'error': str(error)}
    )
    raise

Using Helper Functions

For a cleaner API, you can use helper functions similar to the QA agent pattern:

# Helper functions (create these or import from shared utilities)
def create_trace(name, input_data, metadata=None):
    return client.start_as_current_span(
        name=name,
        input=input_data,
        metadata=metadata or {}
    )
 
def create_span(name, input_data, output_data, metadata=None):
    return client.create_span(
        trace_id=None,  # Uses current trace context
        name=name,
        input_data=input_data,
        output_data=output_data,
        metadata=metadata or {}
    )
 
def create_generation(name, model, prompt, completion, input_tokens, output_tokens, metadata=None):
    return client.create_generation(
        trace_id=None,  # Uses current trace context
        name=name,
        model=model,
        prompt=prompt,
        completion=completion,
        input_tokens=input_tokens,
        output_tokens=output_tokens,
        metadata=metadata or {}
    )
 
def update_trace(status=None, output_data=None, metadata=None):
    return client.update_current_trace(
        status=status,
        output_data=output_data,
        metadata=metadata
    )
 
# Usage
with create_trace('my-agent', {'input': 'query'}, {'agent': 'my-agent'}) as trace:
    span = create_span('validation', {'input': 'query'}, {'validated': True})
    gen = create_generation('llm-call', 'gpt-4', 'prompt', 'response', 10, 20)
    update_trace(status='success', output_data={'result': 'response'})

Configuration

from ants_platform import AntsPlatform
import os
 
# Custom configuration
client = AntsPlatform(
    public_key=os.getenv('ANTS_PLATFORM_PUBLIC_KEY'),
    secret_key=os.getenv('ANTS_PLATFORM_SECRET_KEY'),
    host=os.getenv('ANTS_PLATFORM_HOST', 'https://api.agenticants.ai'),
    environment=os.getenv('ANTS_PLATFORM_ENVIRONMENT', 'production')
)

Using the observe() Decorator

from ants_platform import observe
 
@observe()
def handle_customer_query(query: str) -> str:
    context = get_context(query)
    response = generate_response(query, context)
    return response
 
# Automatically traced!
result = handle_customer_query("Help with my order")

Framework Integrations

LangChain

from ants_platform import AntsPlatform
# LangChain integration would be available via the SDK
# Check SDK documentation for specific integration patterns

API Reference

AntsPlatform Class

class AntsPlatform:
    def __init__(
        self,
        public_key: str,
        secret_key: str,
        host: str = 'https://api.agenticants.ai',
        environment: str = 'production',
        **kwargs
    ): ...
    
    def start_as_current_span(
        self,
        name: str,
        input: Dict[str, Any] = None,
        output: Dict[str, Any] = None,
        metadata: Dict[str, Any] = None
    ) -> ContextManager: ...
    
    def create_span(
        self,
        trace_id: Optional[str],
        name: str,
        input_data: Dict[str, Any],
        output_data: Dict[str, Any],
        metadata: Dict[str, Any] = None
    ) -> None: ...
    
    def create_generation(
        self,
        trace_id: Optional[str],
        name: str,
        model: str,
        prompt: str,
        completion: str,
        input_tokens: int,
        output_tokens: int,
        metadata: Dict[str, Any] = None
    ) -> None: ...
    
    def update_current_trace(
        self,
        status: Optional[str] = None,
        user_id: Optional[str] = None,
        tags: List[str] = None,
        output_data: Dict[str, Any] = None,
        metadata: Dict[str, Any] = None
    ) -> None: ...

Best Practices

# 1. Use context managers for traces
with client.start_as_current_span(name='operation', input={'data': data}) as trace:
    result = do_work()
    trace.update(output={'result': result})
 
# 2. Add rich metadata
client.update_current_trace(
    user_id='user_123',
    tags=['agent:my-agent', 'service:my-service', 'version:1.0.0'],
    metadata={
        'user_id': 'user_123',
        'model': 'gpt-4',
        'version': '1.0.0'
    }
)
 
# 3. Handle errors properly
try:
    with client.start_as_current_span(name='process', input={'query': query}) as trace:
        result = process()
        client.update_current_trace(status='success', output_data={'result': result})
except Exception as e:
    client.create_span(
        trace_id=None,
        name='error_handling',
        input_data={'error': str(e)},
        output_data={'error_response': str(e)},
        metadata={'error_type': type(e).__name__}
    )
    client.update_current_trace(status='error', output_data={'error': str(e)})
    raise
 
# 4. Use helper functions for cleaner code (recommended pattern)
# See "Using Helper Functions" section above

Next Steps