Skip to main content

Installation

Install both the Netra SDK and DSPy:
pip install netra-sdk dspy-ai

Usage

Initialize the Netra SDK to automatically trace all DSPy operations:
from netra import Netra
import dspy
import os

# Initialize Netra
Netra.init(
    headers=f"x-api-key={os.environ.get('NETRA_API_KEY')}",
    trace_content=True
)

# Configure DSPy - automatically traced
lm = dspy.OpenAI(model="gpt-3.5-turbo")
dspy.settings.configure(lm=lm)

# Use DSPy as normal
qa = dspy.Predict("question -> answer")
result = qa(question="What is DSPy?")

Getting Started

Trace DSPy signatures and modules:
from netra import workflow, task, SpanWrapper
import dspy

# Define signature with task decorator
class QA(dspy.Signature):
    """Answer questions with short factual answers."""
    question = dspy.InputField()
    answer = dspy.OutputField(desc="short answer")

@task()
def simple_qa(question: str) -> str:
    span = SpanWrapper("dspy-qa", {
        "question": question
    }).start()
    
    predictor = dspy.Predict(QA)
    result = predictor(question=question)
    
    span.set_attribute("answer", result.answer)
    span.end()
    
    return result.answer

result = simple_qa("What is machine learning?")

Advanced Features

Trace DSPy chains and optimization:
from netra import workflow, agent, SpanWrapper
import dspy

class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate = dspy.ChainOfThought("context, question -> answer")
    
    @task()
    def forward(self, question):
        context = self.retrieve(question).passages
        answer = self.generate(context=context, question=question)
        return answer

@workflow()
def rag_pipeline(question: str):
    span = SpanWrapper("rag-pipeline", {
        "question": question
    }).start()
    
    rag = RAG()
    result = rag.forward(question=question)
    
    span.set_attribute("answer", result.answer)
    span.end()
    
    return result

Chain of Thought

Trace reasoning chains:
from netra import task, SpanWrapper

@task()
def reasoning_task(question: str):
    span = SpanWrapper("chain-of-thought", {
        "question": question
    }).start()
    
    # Use Chain of Thought
    cot = dspy.ChainOfThought("question -> answer")
    result = cot(question=question)
    
    span.set_attribute("reasoning", result.rationale)
    span.set_attribute("answer", result.answer)
    span.end()
    
    return result

Optimization

Trace DSPy optimizers:
from netra import workflow, SpanWrapper
import dspy
from dspy.teleprompt import BootstrapFewShot

@workflow()
def optimize_module(module, trainset):
    opt_span = SpanWrapper("dspy-optimization", {
        "trainset.size": len(trainset)
    }).start()
    
    # Configure optimizer
    optimizer = BootstrapFewShot(metric=lambda x, y: x.answer == y.answer)
    
    # Compile the module
    compiled = optimizer.compile(module, trainset=trainset)
    
    opt_span.set_attribute("optimization.status", "completed")
    opt_span.end()
    
    return compiled

Configuration

Configure DSPy tracing options:
from netra import Netra
from netra.instrumentation.instruments import InstrumentSet

# Enable specific instruments for DSPy's underlying LLM
Netra.init(
    headers=f"x-api-key={os.environ.get('NETRA_API_KEY')}",
    trace_content=True,
    instruments={InstrumentSet.OPENAI}  # DSPy uses OpenAI
)

Next Steps

Last modified on February 3, 2026