SDK
Basic Usage
Basic Usage for the Nora Observability Python SDK
Basic Usage
OpenAI Basic Usage
import nora
from openai import OpenAI
nora_client = nora.init(api_key="your-nora-api-key", environment="production")
client = OpenAI(api_key="your-openai-key")
response = client.responses.create(
model="gpt-5",
input=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"}
]
)
print(response.choices[0].message.content)Anthropic Basic Usage
import nora
from anthropic import Anthropic
nora_client = nora.init(api_key="your-nora-api-key")
client = Anthropic(api_key="your-anthropic-key")
response = client.messages.create(
model="claude-opus-4-5-20251101",
max_tokens=1024,
messages=[
{"role": "user", "content": "Explain quantum computing"}
]
)
print(response.content[0].text)Google Gemini Basic Usage
import nora
import google.generativeai as genai
nora_client = nora.init(api_key="your-nora-api-key")
genai.configure(api_key="your-gemini-key")
model = genai.GenerativeModel("gemini-2.5-pro")
response = model.generate_content("Write a poem about AI")
print(response.text)Understanding Trace Groups
A trace group collects multiple operations into a single logical unit. All operations within a trace group share the same trace identifier.
Key Concepts:
All traces inside a trace group belong to that group
Multiple trace groups create separate, independent traces
Streaming requires trace groups due to SSE behavior
python
# Two independent traces are created
with nora_client.trace_group(name="first_workflow"):
# All calls belong to "first_workflow" trace
response1 = client.chat.completions.create(...)
response2 = client.chat.completions.create(...)
with nora_client.trace_group(name="second_workflow"):
# All calls belong to separate "second_workflow" trace
response3 = client.chat.completions.create(...)Trace Group Context Manager
import nora
from openai import OpenAI
nora_client = nora.init(api_key="your-nora-api-key")
client = OpenAI(api_key="your-openai-key")
with nora_client.trace_group(name="customer_support_flow"):
intent_response = client.responses.create(
model="gpt-5",
input=[
{
"role": "system",
"content": "You are an intent classification assistant."
},
{
"role": "user",
"content": "I want to cancel my subscription"
}
]
)
final_response = client.responses.create(
model="gpt-5",
input=[
{
"role": "system",
"content": "You are a polite customer support agent."
},
{
"role": "user",
"content": "Generate a polite cancellation response"
}
]
)
print(final_response.output_text)Trace Group Decorator
import nora
from openai import OpenAI
nora_client = nora.init(api_key="your-nora-api-key")
client = OpenAI(api_key="your-openai-key")
@nora_client.trace_group(name="analyze_sentiment")
def analyze_and_respond(text):
sentiment_response = client.responses.create(
model="gpt-5",
input=[
{
"role": "system",
"content": "Analyze sentiment and return one of: positive, negative, neutral."
},
{
"role": "user",
"content": text
}
]
)
sentiment = sentiment_response.output_text.strip()
final_response = client.responses.create(
model="gpt-5",
input=[
{
"role": "system",
"content": "Generate an appropriate response based on the given sentiment."
},
{
"role": "user",
"content": f"Sentiment: {sentiment}"
}
]
)
return final_response.output_text
result = analyze_and_respond("I love this product!")
print(result)Custom Function Tracking
Functions decorated with @nora_client.trace() are tracked when called inside a trace group.
import nora
from openai import OpenAI
nora_client = nora.init(api_key="your-nora-api-key")
client = OpenAI(api_key="your-openai-key")
@nora_client.trace(span_type="retrieval")
def fetch_user_context(user_id):
return {
"user_id": user_id,
"preferences": ["tech", "science"],
"history": ["AI article", "ML tutorial"]
}
@nora_client.trace_group(name="personalized_recommendation")
def generate_recommendation(user_id):
context = fetch_user_context(user_id)
response = client.responses.create(
model="gpt-5",
input=[
{
"role": "system",
"content": f"User preferences: {context['preferences']}"
},
{
"role": "user",
"content": "Recommend an article"
}
]
)
return response.output_text
recommendation = generate_recommendation("user_123")
print(recommendation)Outside a trace group, decorated functions execute normally but are not recorded.
Was this page helpful?