View on PyPI netra-sdk on Python Package Index
Source on GitHub View source code and contribute
Installation
Netra Class
The Netra class is the main entry point for all SDK operations. All methods are static and can be called directly on the class.
init
Initialize the Netra SDK with configuration options. Call this once at the start of your application.
from netra import Netra
from netra.instrumentation.instruments import InstrumentSet
Netra.init(
app_name = "my-ai-app" ,
headers = f "x-api-key= { os.getenv( 'NETRA_API_KEY' ) } " ,
environment = "production" ,
trace_content = True ,
instruments = {InstrumentSet. OPENAI , InstrumentSet. LANGCHAIN },
)
Parameters:
Parameter Type Description app_nameOptional[str]Name of your application for identification in the dashboard headersOptional[str]Headers for API requests (e.g., x-api-key=your-key) disable_batchOptional[bool]Disable batch processing of spans (default: False) trace_contentOptional[bool]Enable tracing of prompt/completion content (default: True) resource_attributesOptional[Dict[str, Any]]Additional OpenTelemetry resource attributes environmentOptional[str]Application environment (e.g., “production”, “staging”) instrumentsOptional[Set[InstrumentSet]]Set of instrumentations to enable (enables only these) block_instrumentsOptional[Set[InstrumentSet]]Set of instrumentations to disable enable_root_spanOptional[bool]Create a root span wrapping all traces debug_modeOptional[bool]Enable debug logging enable_scrubbingOptional[bool]Enable PII scrubbing blocked_spansOptional[List[str]]Span names to filter out globally (supports wildcards) enable_metricsOptional[bool]Enable metrics for dashboard metrics_export_interval_msOptional[int]Interval for exporting metrics export_auto_metricsOptional[bool]
Returns: None
start_span
Create a new span for manual tracing. Use as a context manager for automatic span lifecycle management.
from netra import Netra, SpanType
# Basic usage
with Netra.start_span( "process-document" ) as span:
result = process_document(doc)
span.set_attribute( "pages" , result.page_count)
# With span type and attributes
with Netra.start_span(
"generate-response" ,
as_type = SpanType. GENERATION ,
attributes = { "model" : "gpt-4" },
module_name = "chat"
) as span:
response = generate_response(prompt)
Parameters:
Parameter Type Description namestrName of the span (required) as_typeOptional[SpanType]Type of span for categorization attributesOptional[Dict[str, Any]]Initial attributes to set module_nameOptional[str]Module name for organization
Returns: SpanWrapper instance
set_session_id
Set the session ID for the current context. All subsequent spans will be associated with this session.
Netra.set_session_id( "session-abc123" )
Parameters:
Parameter Type Description session_idstrUnique identifier for the session
Returns: None
set_user_id
Set the user ID for the current context.
Netra.set_user_id( "user-456" )
Parameters:
Parameter Type Description user_idstrUnique identifier for the user
Returns: None
set_tenant_id
Set the tenant ID for multi-tenant applications.
Netra.set_tenant_id( "tenant-org-789" )
Parameters:
Parameter Type Description tenant_idstrUnique identifier for the tenant
Returns: None
set_custom_attributes
Add custom key-value attributes to the current context.
Netra.set_custom_attributes( key = "customer_tier" , value = "premium" )
Netra.set_custom_attributes( key = "region" , value = "us-east" )
Parameters:
Parameter Type Description keystrAttribute key valueAnyAttribute value
Returns: None
set_custom_event
Record a custom event in the current context.
Netra.set_custom_event(
event_name = "user_feedback" ,
attributes = {
"rating" : 5 ,
"comment" : "Great response!" ,
"category" : "positive"
}
)
Parameters:
Parameter Type Description event_namestrName of the event attributesAnyEvent attributes
Returns: None
add_conversation
Append a conversation entry to the current span. Useful for tracking multi-turn conversations.
from netra import Netra
from netra.session_manager import ConversationType
Netra.add_conversation(
conversation_type = ConversationType. INPUT ,
role = "user" ,
value = "What is the weather today?"
)
Netra.add_conversation(
conversation_type = ConversationType. OUTPUT ,
role = "assistant" ,
value = "The weather is sunny with a high of 72°F."
)
Parameters:
Parameter Type Description conversation_typeConversationTypeType of message (INPUT, OUTPUT) rolestrRole name (e.g., “user”, “assistant”, “system”) valueAnyMessage content
Returns: None
If you’re using Netra’s auto-instrumentation for LLM calls, conversation messages are captured automatically. Using this method may result in duplicate messages.
shutdown
Gracefully shutdown the SDK, flushing any pending spans.
Returns: None
SpanWrapper Class
The SpanWrapper class provides methods for enriching spans with additional context. It’s returned by Netra.start_span() and supports method chaining.
set_attribute
Add a custom attribute to the span.
span.set_attribute( "query" , user_query)
span.set_attribute( "results.count" , len (results))
Parameters:
Parameter Type Description keystrAttribute key valueAnyAttribute value
Returns: SpanWrapper (for chaining)
add_event
Record a timestamped event within the span.
span.add_event( "validation-started" )
span.add_event( "validation-completed" , { "valid" : True , "errors" : 0 })
Parameters:
Parameter Type Description namestrEvent name attributesOptional[Dict[str, Any]]Event attributes
Returns: SpanWrapper (for chaining)
set_prompt
Set the input prompt for LLM spans.
span.set_prompt( "Summarize this article: ..." )
Parameters:
Parameter Type Description promptstrThe prompt text
Returns: SpanWrapper (for chaining)
set_negative_prompt
Set the negative prompt (commonly used for image generation).
span.set_negative_prompt( "blurry, low quality, distorted" )
Parameters:
Parameter Type Description promptstrThe negative prompt text
Returns: SpanWrapper (for chaining)
set_model
Set the model name used in the operation.
span.set_model( "gpt-4-turbo" )
Parameters:
Parameter Type Description modelstrModel name
Returns: SpanWrapper (for chaining)
set_llm_system
Set the LLM provider/system name.
span.set_llm_system( "openai" )
Parameters:
Parameter Type Description systemstrLLM system name
Returns: SpanWrapper (for chaining)
set_usage
Record token usage and cost metrics.
from netra import UsageModel
span.set_usage([
UsageModel(
model = "gpt-4" ,
cost_in_usd = 0.006 ,
usage_type = "chat" ,
units_used = 1
)
])
Parameters:
Parameter Type Description usage_dataList[UsageModel]List of usage records
Returns: SpanWrapper (for chaining)
set_action
Track actions or tool calls within the span.
from netra import ActionModel
span.set_action([
ActionModel(
action = "DB" ,
action_type = "INSERT" ,
affected_records = [
{ "record_id" : "123" , "record_type" : "user" }
],
metadata = { "table" : "users" },
success = True
)
])
Parameters:
Parameter Type Description actionsList[ActionModel]List of action records
Returns: SpanWrapper (for chaining)
set_success
Mark the span as successful.
Returns: SpanWrapper (for chaining)
set_error
Mark the span as failed with an error message.
span.set_error( "Failed to connect to database" )
Parameters:
Parameter Type Description messagestrError message
Returns: SpanWrapper (for chaining)
Models
SpanType
Enum for categorizing spans.
from netra import SpanType
SpanType. SPAN # General operations (default)
SpanType. GENERATION # LLM completions, image generation
SpanType. EMBEDDING # Vector embedding operations
SpanType. TOOL # Function calls, API requests
SpanType. AGENT # AI agent operations
UsageModel
Model for tracking token usage and costs.
from netra import UsageModel
usage = UsageModel(
model = "gpt-4" ,
cost_in_usd = 0.0045 ,
usage_type = "chat" ,
units_used = 1
)
Fields:
Field Type Description modelstrModel name cost_in_usdOptional[float]Cost in USD usage_typeOptional[str]Type of usage units_usedOptional[int]Units consumed
ActionModel
Model for tracking actions and tool calls.
from netra import ActionModel
action = ActionModel(
action = "API" ,
action_type = "CALL" ,
affected_records = [
{ "record_id" : "order-123" , "record_type" : "order" }
],
metadata = {
"endpoint" : "/api/orders" ,
"method" : "POST" ,
"status_code" : "201"
},
success = True
)
Fields:
Field Type Description actionstrAction category (e.g., “DB”, “API”, “CACHE”) action_typestrAction subtype (e.g., “INSERT”, “CALL”) affected_recordsOptional[List[Dict]]Records affected by the action metadataOptional[Dict[str, str]]Additional metadata successboolWhether the action succeeded
ConversationType
Enum for conversation entry types.
from netra.session_manager import ConversationType
ConversationType. INPUT # User input messages
ConversationType. OUTPUT # Model output messages
Decorators
The SDK provides decorators for easy function instrumentation.
@agent
Mark a function or class as an AI agent.
from netra.decorators import agent
@agent
def research_agent ( query : str ):
return perform_research(query)
@agent ( name = "customer-support" )
def support_agent ( request : dict ):
return handle_request(request)
@task
Mark a function or class as a task or tool.
from netra.decorators import task
@task
def fetch_data ( query : str ):
return database.query(query)
@task ( name = "web-search" )
def search_web ( query : str ):
return search_api.search(query)
@workflow
Mark a function or class as a workflow.
from netra.decorators import workflow
@workflow
def process_order ( order : dict ):
validate_order(order)
process_payment(order)
fulfill_order(order)
Dashboard Client
The dashboard client provides methods to query dashboard data, session summaries, and session statistics programmatically.
query_data
Fetch dashboard data with customizable metrics, dimensions, and filters.
from netra import Netra
from netra.dashboard import *
Netra.init( app_name = "my-app" )
result = Netra.dashboard.query_data(
scope = Scope. SPANS ,
chart_type = ChartType. LINE_TIME_SERIES ,
metrics = Metrics(
measure = Measure. TOTAL_COST ,
aggregation = Aggregation. TOTAL_COUNT ,
),
dimension = Dimension( field = DimensionField. SERVICE ),
filter = FilterConfig(
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
group_by = GroupBy. DAY ,
),
)
Parameters:
Parameter Type Description scopeScopeData scope (SPANS or TRACES) chart_typeChartTypeVisualization type (LINE_TIME_SERIES, BAR_TIME_SERIES, PIE, etc.) metricsMetricsWhat to measure and how to aggregate filterFilterConfigTime range, grouping, and filter conditions dimensionOptional[Dimension]Optional grouping dimension
Returns: dict | Any
get_session_summary
Retrieve aggregated session metrics including total sessions, costs, and latency.
from netra import Netra
from netra.dashboard import *
result = Netra.dashboard.get_session_summary(
filter = SessionFilterConfig(
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
filters = [
SessionFilter(
field = SessionFilterField. TENANT_ID ,
operator = SessionFilterOperator. ANY_OF ,
type = SessionFilterType. ARRAY ,
value = [ "TenantA" , "TenantB" ]
)
]
)
)
Parameters:
Parameter Type Description filterSessionFilterConfigFilter configuration with time range and optional filters
Returns: dict | Any
get_session_stats
Fetch a paginated list of sessions with individual metrics.
from netra import Netra
from netra.dashboard import *
stats = Netra.dashboard.get_session_stats(
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
limit = 10 ,
sort_field = SortField. TOTAL_COST ,
sort_order = SortOrder. DESC
)
for session in stats.data:
print (session)
# Handle pagination
if stats.has_next_page:
next_stats = Netra.dashboard.get_session_stats(
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
cursor = stats.next_cursor
)
Parameters:
Parameter Type Description start_timestrStart of time window (ISO 8601 UTC) end_timestrEnd of time window (ISO 8601 UTC) filtersOptional[List[SessionFilter]]Optional filter conditions limitOptional[int]Maximum results per page cursorOptional[str]Pagination cursor sort_fieldOptional[SortField]Sort field (SESSION_ID, START_TIME, TOTAL_REQUESTS, TOTAL_COST) sort_orderOptional[SortOrder]Sort direction (ASC, DESC)
Returns: SessionStatsResult
iter_session_stats
Iterator that automatically handles pagination for session stats.
from netra import Netra
from netra.dashboard import *
for session in Netra.dashboard.iter_session_stats(
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
sort_field = SortField. TOTAL_COST ,
sort_order = SortOrder. DESC
):
print (session)
Parameters:
Parameter Type Description start_timestrStart of time window (ISO 8601 UTC) end_timestrEnd of time window (ISO 8601 UTC) filtersOptional[List[SessionFilter]]Optional filter conditions sort_fieldOptional[SortField]Sort field sort_orderOptional[SortOrder]Sort direction
Returns: Iterator[SessionStatsResult]
For detailed documentation on all dashboard enums, types, and filtering options, see Dashboard Query .
Usage Client
The usage client provides methods to query usage metrics, list traces, and fetch span data.
get_session_usage
Fetch usage metrics for a single session.
from netra import Netra
Netra.init( app_name = "my-app" )
session_usage = Netra.usage.get_session_usage(
session_id = "session-123" ,
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
)
print (session_usage)
Parameters:
Parameter Type Description session_idstrUnique session identifier start_timestrStart of time window (ISO 8601 UTC) end_timestrEnd of time window (ISO 8601 UTC)
Returns: SessionUsageData | Any
get_tenant_usage
Fetch aggregated usage metrics for a tenant.
from netra import Netra
tenant_usage = Netra.usage.get_tenant_usage(
tenant_id = "AceTech" ,
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
)
print (tenant_usage)
Parameters:
Parameter Type Description tenant_idstrTenant identifier start_timestrStart of time window (ISO 8601 UTC) end_timestrEnd of time window (ISO 8601 UTC)
Returns: TenantUsageData | Any
list_traces
Query traces for a time range with optional filtering and pagination.
from netra import Netra
traces_page = Netra.usage.list_traces(
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
tenant_id = "AceTech" ,
limit = 10 ,
sort_field = "start_time" ,
sort_order = "desc" ,
)
for trace in traces_page.traces:
print (trace)
# Handle pagination
if traces_page.has_next_page:
next_page = Netra.usage.list_traces(
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
cursor = traces_page.next_cursor
)
Parameters:
Parameter Type Description start_timestrStart of time range (ISO 8601 UTC) end_timestrEnd of time range (ISO 8601 UTC) tenant_idOptional[str]Filter by tenant user_idOptional[str]Filter by user session_idOptional[str]Filter by session limitOptional[int]Maximum traces per page cursorOptional[str]Pagination cursor directionOptional[Literal["up", "down"]]Pagination direction sort_fieldOptional[str]Field to sort by sort_orderOptional[Literal["asc", "desc"]]Sort order
Returns: TracesPage | Any
iter_traces
Iterator that automatically handles pagination for traces.
from netra import Netra
for trace in Netra.usage.iter_traces(
start_time = "2026-01-01T00:00:00.000Z" ,
end_time = "2026-01-31T23:59:59.000Z" ,
tenant_id = "AceTech" ,
sort_field = "start_time" ,
sort_order = "desc" ,
):
print (trace)
Returns: Iterator[TraceSummary]
list_spans_by_trace_id
Fetch spans within a single trace.
from netra import Netra
spans_page = Netra.usage.list_spans_by_trace_id(
trace_id = "728c1de6fa4c53de143a7d7fef33ff91" ,
limit = 10 ,
span_name = "openai.chat" ,
)
for span in spans_page.spans:
print (span)
Parameters:
Parameter Type Description trace_idstrID of the trace cursorOptional[str]Pagination cursor directionOptional[Literal["up", "down"]]Pagination direction limitOptional[int]Maximum spans per page span_nameOptional[str]Filter by span name
Returns: SpansPage | Any
iter_spans_by_trace_id
Iterator that automatically handles pagination for spans.
from netra import Netra
for span in Netra.usage.iter_spans_by_trace_id(
trace_id = "728c1de6fa4c53de143a7d7fef33ff91" ,
span_name = "generation_pipeline" ,
):
print (span)
Returns: Iterator[TraceSpan]
Instruments
Available instrumentations for auto-tracing.
from netra.instrumentation.instruments import InstrumentSet
# LLM Providers
InstrumentSet. OPENAI
InstrumentSet. ANTHROPIC
InstrumentSet. COHERE
InstrumentSet. GOOGLE_GENERATIVEAI
InstrumentSet. MISTRALAI
InstrumentSet. GROQ
InstrumentSet. BEDROCK
InstrumentSet. VERTEXAI
InstrumentSet. OLLAMA
InstrumentSet. REPLICATE
InstrumentSet. TOGETHER
InstrumentSet. TRANSFORMERS
InstrumentSet. LITELLM
# AI Frameworks
InstrumentSet. LANGCHAIN
InstrumentSet. LANGGRAPH
InstrumentSet. LLAMAINDEX
InstrumentSet. HAYSTACK
InstrumentSet. CREWAI
InstrumentSet. DSPY
InstrumentSet. PYDANTIC_AI
# Vector Databases
InstrumentSet. PINECONE
InstrumentSet. WEAVIATE
InstrumentSet. QDRANT
InstrumentSet. CHROMADB
InstrumentSet. MILVUS
InstrumentSet. LANCEDB
InstrumentSet. MARQO
# HTTP Clients
InstrumentSet. HTTPX
InstrumentSet. AIOHTTP
InstrumentSet. REQUESTS
InstrumentSet. URLLIB
InstrumentSet. URLLIB3
# Web Frameworks
InstrumentSet. FASTAPI
InstrumentSet. FLASK
InstrumentSet. DJANGO
InstrumentSet. STARLETTE
# Databases
InstrumentSet. SQLALCHEMY
InstrumentSet. SQLITE3
InstrumentSet. PSYCOPG
InstrumentSet. PSYCOPG2
InstrumentSet. PYMYSQL
InstrumentSet. PYMONGO
InstrumentSet. REDIS
InstrumentSet. ELASTICSEARCH
# Message Queues
InstrumentSet. CELERY
InstrumentSet. PIKA
InstrumentSet. AIO_PIKA
InstrumentSet. KAFKA_PYTHON
InstrumentSet. CONFLUENT_KAFKA
Complete Example
import os
from netra import Netra, SpanType, UsageModel
from netra.instrumentation.instruments import InstrumentSet
from openai import OpenAI
# Initialize SDK
Netra.init(
app_name = "my-ai-app" ,
headers = f "x-api-key= { os.getenv( 'NETRA_API_KEY' ) } " ,
environment = "production" ,
instruments = {InstrumentSet. OPENAI },
)
# Set context
Netra.set_user_id( "user-123" )
Netra.set_session_id( "session-abc" )
# Create a traced operation
def chat_with_ai ( user_message : str ) -> str :
with Netra.start_span( "chat-completion" , as_type = SpanType. GENERATION ) as span:
span.set_prompt(user_message)
span.set_model( "gpt-4" )
span.set_llm_system( "openai" )
client = OpenAI()
response = client.chat.completions.create(
model = "gpt-4" ,
messages = [{ "role" : "user" , "content" : user_message}]
)
# Calculate cost based on token usage
prompt_cost = (response.usage.prompt_tokens / 1000 ) * 0.03
completion_cost = (response.usage.completion_tokens / 1000 ) * 0.06
total_cost = prompt_cost + completion_cost
span.set_usage([
UsageModel(
model = "gpt-4" ,
cost_in_usd = total_cost,
usage_type = "chat" ,
units_used = 1
)
])
span.set_success()
return response.choices[ 0 ].message.content
# Use the function
result = chat_with_ai( "What is the capital of France?" )
print (result)
# Shutdown gracefully
Netra.shutdown()
Next Steps