Custom Orchestration
Instrument a pure Python agent without a framework using @span, trace(), and custom tool attributes.
This guide shows how to instrument a custom Python agent — one that doesn't use a framework for tool dispatch. Framework tools are auto-instrumented; custom tools need explicit spans and attributes.
Setup
import os
import neatlogs
from openai import OpenAI
neatlogs.init(
api_key=os.environ["NEATLOGS_API_KEY"],
endpoint=os.environ["NEATLOGS_ENDPOINT"],
workflow_name="research-agent",
instrumentations=["openai"],
)
client = OpenAI()Framework Tools vs. Custom Tools
If you use a supported framework like LangChain, add "langchain" to instrumentations and tool calls are captured automatically — no manual spans needed:
from langchain_core.tools import tool
# Captured automatically via langchain instrumentation
@tool
def lookup_order(order_id: str) -> str:
"""Look up order status by order ID."""
...For a custom Python function not wrapped by a framework, use @neatlogs.span(kind="TOOL"). The decorator wraps the function call and records its arguments as input.value and its return value as output.value on the span:
@neatlogs.span(kind="TOOL", tool_name="web_search", description="Search the web for recent articles")
def web_search(query: str) -> list:
# Your search implementation
return [{"title": "Result", "snippet": "..."}]When using with neatlogs.trace() for inline tool logic, the span has no function wrapper — set neatlogs.tool.* attributes and output.value yourself:
import json
import neatlogs
def call_external_api(endpoint: str, payload: dict) -> dict:
with neatlogs.trace("external_api_call", kind="TOOL") as span:
span.set_attribute("neatlogs.tool.name", "external_api")
span.set_attribute("neatlogs.tool.description", "Call an external REST API")
span.set_attribute("neatlogs.tool.parameters", json.dumps({"endpoint": endpoint}))
span.set_attribute("neatlogs.tool.json_schema", json.dumps({
"type": "object",
"properties": {
"endpoint": {"type": "string"},
"payload": {"type": "object"},
},
}))
result = requests.post(endpoint, json=payload).json()
span.set_attribute("output.value", json.dumps(result))
return resultDefine the Agent
Use @neatlogs.span(kind="AGENT") on the reasoning loop. Place both compile() calls and the LLM call inside with neatlogs.trace(kind="LLM", ...) so the template, compiled values, and LLM response are all captured on the same span:
import json
import neatlogs
from neatlogs import PromptTemplate, UserPromptTemplate
system_tpl = PromptTemplate([
{"role": "system", "content": "You are a research assistant. Use available tools to answer the user's question."},
])
user_tpl = UserPromptTemplate([
{"role": "user", "content": "Question: {{question}}"},
])
client = OpenAI()
@neatlogs.span(kind="AGENT", name="research_agent", role="Researcher")
def research_agent(question: str) -> str:
messages = []
for _ in range(5): # max iterations
with neatlogs.trace("reason", kind="LLM",
prompt_template=system_tpl,
user_prompt_template=user_tpl):
system_msgs = system_tpl.compile()
user_msgs = user_tpl.compile(question=question)
if not messages:
messages = system_msgs + user_msgs
response = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=[
{
"type": "function",
"function": {
"name": "web_search",
"parameters": {
"type": "object",
"properties": {"query": {"type": "string"}},
"required": ["query"],
},
},
}
],
)
msg = response.choices[0].message
if not msg.tool_calls:
return msg.content # final answer
messages.append(msg)
for call in msg.tool_calls:
args = json.loads(call.function.arguments)
if call.function.name == "web_search":
result = web_search(**args)
messages.append({
"role": "tool",
"tool_call_id": call.id,
"content": json.dumps(result),
})
return "Could not complete research."Define the Workflow
Wrap the entry point with @neatlogs.span(kind="WORKFLOW"):
@neatlogs.span(kind="WORKFLOW", name="research_workflow")
def run(question: str) -> str:
return research_agent(question)Run It
result = run("What are the latest advances in transformer architectures?")
print(result)
neatlogs.flush()
neatlogs.shutdown()What You'll See in the Dashboard
A WORKFLOW span containing:
AGENTspan forresearch_agentLLMspans for each reasoning step — prompt template, compiled variables, token countsTOOLspans forweb_search—neatlogs.tool.name,neatlogs.tool.description,input.value(arguments),output.value(return value)