Guides
Multi-Agent LangGraph
Instrument a LangGraph multi-agent workflow with per-node AGENT spans.
This guide shows how to add observability to a LangGraph workflow where a supervisor routes to specialist agents.
Setup
import os
import neatlogs
neatlogs.init(
api_key=os.environ["NEATLOGS_API_KEY"],
endpoint=os.environ["NEATLOGS_ENDPOINT"],
workflow_name="customer-support",
instrumentations=["langchain", "openai"],
)
# Import LangChain/LangGraph AFTER init()
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage
from langgraph.graph import END, START, StateGraphDefine State and Agents
Decorate each LangGraph node function with @neatlogs.span(kind="AGENT"). The LLM calls inside each agent are captured automatically via the openai and langchain instrumentations.
Use with neatlogs.trace(kind="LLM", prompt_template=...) inside each agent to also capture the prompt template and variable values:
from typing import Annotated, Sequence, TypedDict, Literal
from langchain_core.messages import BaseMessage, add_messages
import neatlogs
class WorkflowState(TypedDict):
messages: Annotated[Sequence[BaseMessage], add_messages]
intent: str
llm = ChatOpenAI(model="gpt-4o", temperature=0.3)
supervisor_prompt = neatlogs.PromptTemplate(
"Classify the intent of the user's query as either 'knowledge' or 'orders'. "
"Respond with only the intent word."
)
supervisor_user_prompt = neatlogs.UserPromptTemplate("Query: {{query}}")
@neatlogs.span(kind="AGENT", name="supervisor")
def supervisor(state: WorkflowState) -> dict:
query = state["messages"][-1].content
with neatlogs.trace("classify_intent", kind="LLM",
prompt_template=supervisor_prompt,
user_prompt_template=supervisor_user_prompt):
system_msg = supervisor_prompt.compile()
user_msg = supervisor_user_prompt.compile(query=query)
response = llm.invoke([SystemMessage(content=system_msg), HumanMessage(content=user_msg)])
intent = "orders" if "order" in response.content.lower() else "knowledge"
return {"intent": intent, "messages": [response]}
knowledge_prompt = neatlogs.PromptTemplate(
"You are a helpful assistant. Answer the user's question about our products and policies."
)
knowledge_user_prompt = neatlogs.UserPromptTemplate("{{question}}")
@neatlogs.span(kind="AGENT", name="knowledge_agent")
def knowledge_agent(state: WorkflowState) -> dict:
question = state["messages"][0].content
with neatlogs.trace("answer_question", kind="LLM",
prompt_template=knowledge_prompt,
user_prompt_template=knowledge_user_prompt):
system_msg = knowledge_prompt.compile()
user_msg = knowledge_user_prompt.compile(question=question)
response = llm.invoke([SystemMessage(content=system_msg), HumanMessage(content=user_msg)])
return {"messages": [response]}
orders_prompt = neatlogs.PromptTemplate(
"You are an order management assistant. Help the user with their order inquiry."
)
orders_user_prompt = neatlogs.UserPromptTemplate("{{question}}")
@neatlogs.span(kind="AGENT", name="orders_agent")
def orders_agent(state: WorkflowState) -> dict:
question = state["messages"][0].content
with neatlogs.trace("handle_order", kind="LLM",
prompt_template=orders_prompt,
user_prompt_template=orders_user_prompt):
system_msg = orders_prompt.compile()
user_msg = orders_user_prompt.compile(question=question)
response = llm.invoke([SystemMessage(content=system_msg), HumanMessage(content=user_msg)])
return {"messages": [response]}Build and Run the Graph
Wrap the graph.invoke() call with with neatlogs.trace(kind="WORKFLOW"): to create a top-level WORKFLOW span that contains all agent spans:
def route(state: WorkflowState) -> Literal["knowledge_agent", "orders_agent"]:
return "orders_agent" if state["intent"] == "orders" else "knowledge_agent"
graph = StateGraph(WorkflowState)
graph.add_node("supervisor", supervisor)
graph.add_node("knowledge_agent", knowledge_agent)
graph.add_node("orders_agent", orders_agent)
graph.add_edge(START, "supervisor")
graph.add_conditional_edges("supervisor", route)
graph.add_edge("knowledge_agent", END)
graph.add_edge("orders_agent", END)
app = graph.compile()
with neatlogs.trace("support_request", kind="WORKFLOW"):
result = app.invoke({
"messages": [HumanMessage(content="What is your return policy?")],
"intent": "",
})
print(result["messages"][-1].content)
neatlogs.flush()
neatlogs.shutdown()What You'll See in the Dashboard
A WORKFLOW span containing:
AGENTspan forsupervisorwith the classification LLM call insideAGENTspan forknowledge_agentororders_agentwith the LLM call inside- LangChain framework spans captured automatically