Python SDK
The Python SDK lets you instrument AI agents built with LangChain, CrewAI, or custom Python code. It supports Python 3.9 and above.
Installation
pip install nodeloom-sdkQuick Start
from nodeloom import NodeLoom, SpanType, TraceStatus
client = NodeLoom(api_key="sdk_...")
# Start a trace for your agent
trace = client.trace("customer-support-agent", input={"query": "How do I reset my password?"})
# Track an LLM call
with trace.span("openai-chat", type=SpanType.LLM) as span:
result = openai.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "How do I reset my password?"}]
)
span.set_output({"response": result.choices[0].message.content})
span.set_token_usage(
prompt_tokens=result.usage.prompt_tokens,
completion_tokens=result.usage.completion_tokens,
model="gpt-4o"
)
# Track a tool call
with trace.span("lookup-user", type=SpanType.TOOL) as span:
span.set_input({"email": "[email protected]"})
user = lookup_user("[email protected]")
span.set_output({"user_id": user.id})
# End the trace
trace.end(status=TraceStatus.SUCCESS, output={"answer": "You can reset your password from..."})
# Always shut down before exit
client.shutdown()Client Configuration
| Parameter | Type | Default | Description |
|---|---|---|---|
api_key | str | (required) | Your SDK token (starts with sdk_) |
endpoint | str | "https://api.nodeloom.io" | NodeLoom API URL |
batch_size | int | 100 | Maximum events per batch |
flush_interval | float | 5.0 | Seconds between automatic flushes |
max_queue_size | int | 10000 | Maximum events held in memory before dropping |
client = NodeLoom(
api_key="sdk_...",
batch_size=50,
flush_interval=3.0,
max_queue_size=5000
)Traces
A trace represents one run of your agent. Create a trace at the start and end it when the agent finishes.
# Basic trace
trace = client.trace("my-agent")
# ... agent work ...
trace.end(status=TraceStatus.SUCCESS)
# Trace with input, output, and metadata
trace = client.trace(
"my-agent",
input={"query": "Tell me about AI"},
metadata={"user_id": "u123", "session_id": "s456"},
environment="production" # or "development", "staging"
)
# ... agent work ...
trace.end(
status=TraceStatus.SUCCESS,
output={"answer": "AI is..."}
)
# Trace with error
trace = client.trace("my-agent", input={"query": "..."})
try:
# ... agent work ...
trace.end(status=TraceStatus.SUCCESS, output=result)
except Exception as e:
trace.end(status=TraceStatus.ERROR, error=str(e))Spans
Spans track individual operations within a trace. Use the context manager pattern to automatically handle timing and errors.
# Context manager (recommended)
with trace.span("my-llm-call", type=SpanType.LLM) as span:
span.set_input({"prompt": "Hello"})
result = call_llm("Hello")
span.set_output({"response": result})
# Manual span management
span = trace.span("my-tool-call", type=SpanType.TOOL)
span.set_input({"query": "search term"})
result = search_database("search term")
span.set_output({"results": result})
span.end() # or span.end(error="something went wrong")Span Types
| Constant | Value | Use Case |
|---|---|---|
SpanType.LLM | llm | LLM API calls |
SpanType.TOOL | tool | Tool or function calls |
SpanType.RETRIEVAL | retrieval | Vector search, document retrieval |
SpanType.AGENT | agent | Sub-agent invocations |
SpanType.CHAIN | chain | Chain or pipeline steps |
SpanType.CUSTOM | custom | Any other operation |
Token Usage
with trace.span("llm-call", type=SpanType.LLM) as span:
result = openai.chat.completions.create(...)
span.set_token_usage(
prompt_tokens=result.usage.prompt_tokens,
completion_tokens=result.usage.completion_tokens,
model="gpt-4o"
)LangChain Integration
The SDK includes a callback handler that automatically instruments LangChain chains and agents. It captures LLM calls, tool invocations, and chain steps as spans.
from nodeloom import NodeLoom
from nodeloom.integrations.langchain import NodeLoomCallbackHandler
client = NodeLoom(api_key="sdk_...")
handler = NodeLoomCallbackHandler(client)
# Use with any LangChain chain or agent
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
llm = ChatOpenAI(model="gpt-4o")
prompt = ChatPromptTemplate.from_template("Tell me about {topic}")
chain = prompt | llm
# The handler automatically creates traces and spans
result = chain.invoke(
{"topic": "quantum computing"},
config={"callbacks": [handler]}
)
client.shutdown()CrewAI Integration
For CrewAI, use the dedicated handler that maps crews, agents, and tasks to NodeLoom traces and spans.
from nodeloom import NodeLoom
from nodeloom.integrations.crewai import NodeLoomCrewHandler
client = NodeLoom(api_key="sdk_...")
handler = NodeLoomCrewHandler(client)
from crewai import Agent, Task, Crew
researcher = Agent(role="Researcher", goal="...", backstory="...")
task = Task(description="Research AI trends", agent=researcher)
crew = Crew(agents=[researcher], tasks=[task])
# Attach the handler to instrument the crew
result = crew.kickoff(callbacks=[handler])
client.shutdown()Custom Metrics
Record custom metrics to track agent performance, latency, token usage, or any numeric value you want to monitor over time.
# Record a custom metric
client.metric("response_latency", 1.23, unit="seconds", tags={"model": "gpt-4o"})
client.metric("tokens_per_request", 450, unit="tokens")
client.metric("user_satisfaction", 4.5, unit="score", tags={"agent": "support"})Feedback
Submit feedback for a trace to track user satisfaction, flag issues, or label data for fine-tuning.
# Submit feedback for a trace
client.feedback(trace_id=trace.trace_id, rating=5, comment="Accurate and helpful")
client.feedback(trace_id=trace.trace_id, rating=1, comment="Hallucinated facts", tags=["inaccurate"])Session Tracking
Group related traces into a conversation session to track multi-turn interactions with a single user or workflow.
# Group traces into a conversation session
session_id = "conv-" + str(uuid4())
trace1 = client.trace("support-agent", session_id=session_id, input={"query": "What's my balance?"})
# ... agent logic ...
trace1.end(status=TraceStatus.SUCCESS, output={"answer": "$1,234.56"})
trace2 = client.trace("support-agent", session_id=session_id, input={"query": "Transfer $100"})
# ... agent logic ...
trace2.end(status=TraceStatus.SUCCESS, output={"answer": "Transfer initiated"})Prompt Templates
Register and version prompt templates to track which prompts are used across your agents. Link templates to spans for full traceability.
# Register a prompt template
client.set_prompt(
"customer-support-system",
content="You are a helpful customer support agent for {{company_name}}. Always be polite and concise.",
variables=["company_name"],
model_hint="gpt-4o"
)
# Track prompt version in spans
with trace.span("llm-call", type=SpanType.LLM) as span:
span.set_prompt_info(template="customer-support-system", version=2)
# ... LLM call ...Callback URL
Register a callback URL for your agent so NodeLoom can invoke it during red team testing and batch evaluations.
# Register callback for red team testing and batch evaluation
client.register_callback("my-agent", "https://my-agent.example.com/callback")
# Your callback endpoint should handle:
# POST with {"prompt": "...", "category": "redteam|eval"}
# Return: {"response": "agent's response text"}Guardrail Configuration
Read the guardrail configuration for your agent. Guardrails are configured per-agent in the NodeLoom UI and accessed read-only via the SDK.
# Read guardrail config (configured in NodeLoom UI)
config = client.get_guardrail_config("my-agent")
if config and config.get("detectPromptInjection"):
print("Prompt injection detection is enabled")
# Guardrails are configured per-agent in the NodeLoom UI:
# Workflows → click SDK agent → Guardrails tabRead-only access
get_guardrail_config().Shutdown
Always call shutdown() before your application exits. This flushes any remaining events in the queue and waits for pending requests to complete.
# Blocks until all events are flushed (up to 10s timeout)
client.shutdown()
# With custom timeout
client.shutdown(timeout=30.0)Unflushed events