Neatlogs
Guides

Multi-Agent LangGraph

Instrument a LangGraph multi-agent workflow with Neatlogs.

This guide shows how to add observability to a LangGraph workflow where a supervisor routes to specialist agents.

Setup

import os
import neatlogs

neatlogs.init(
    api_key=os.environ["NEATLOGS_API_KEY"],
    endpoint=os.environ.get("NEATLOGS_ENDPOINT"),
    workflow_name="customer-support",
    instrumentations=["langchain"],
)

# Import LangChain/LangGraph AFTER init()
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage
from langgraph.graph import END, START, StateGraph

Define State and Agents

from typing import Annotated, Sequence, TypedDict, Literal
from langchain_core.messages import BaseMessage, add_messages
import neatlogs
from neatlogs import PromptTemplate, UserPromptTemplate

class WorkflowState(TypedDict):
    messages: Annotated[Sequence[BaseMessage], add_messages]
    intent: str

llm = ChatOpenAI(model="gpt-4o", temperature=0.3)

supervisor_prompt = PromptTemplate([{
    "role": "system",
    "content": "Classify the intent of the user's query as either 'knowledge' or 'orders'. Respond with only the intent word.",
}])
supervisor_user_prompt = UserPromptTemplate([{"role": "user", "content": "Query: {{query}}"}])


def supervisor(state: WorkflowState) -> dict:
    query = state["messages"][-1].content
    with neatlogs.trace("classify_intent", kind="LLM",
                        prompt_template=supervisor_prompt,
                        user_prompt_template=supervisor_user_prompt):
        msgs = supervisor_prompt.compile() + supervisor_user_prompt.compile(query=query)
        response = llm.invoke(msgs)
    intent = "orders" if "order" in response.content.lower() else "knowledge"
    return {"intent": intent, "messages": [response]}


knowledge_prompt = PromptTemplate([{
    "role": "system",
    "content": "You are a helpful assistant. Answer the user's question about our products and policies.",
}])
knowledge_user_prompt = UserPromptTemplate([{"role": "user", "content": "{{question}}"}])


def knowledge_agent(state: WorkflowState) -> dict:
    question = state["messages"][0].content
    with neatlogs.trace("answer_question", kind="LLM",
                        prompt_template=knowledge_prompt,
                        user_prompt_template=knowledge_user_prompt):
        msgs = knowledge_prompt.compile() + knowledge_user_prompt.compile(question=question)
        response = llm.invoke(msgs)
    return {"messages": [response]}


orders_prompt = PromptTemplate([{
    "role": "system",
    "content": "You are an order management assistant. Help the user with their order inquiry.",
}])
orders_user_prompt = UserPromptTemplate([{"role": "user", "content": "{{question}}"}])


def orders_agent(state: WorkflowState) -> dict:
    question = state["messages"][0].content
    with neatlogs.trace("handle_order", kind="LLM",
                        prompt_template=orders_prompt,
                        user_prompt_template=orders_user_prompt):
        msgs = orders_prompt.compile() + orders_user_prompt.compile(question=question)
        response = llm.invoke(msgs)
    return {"messages": [response]}

Build and Run the Graph

def route(state: WorkflowState) -> Literal["knowledge_agent", "orders_agent"]:
    return "orders_agent" if state["intent"] == "orders" else "knowledge_agent"

graph = StateGraph(WorkflowState)
graph.add_node("supervisor", supervisor)
graph.add_node("knowledge_agent", knowledge_agent)
graph.add_node("orders_agent", orders_agent)
graph.add_edge(START, "supervisor")
graph.add_conditional_edges("supervisor", route)
graph.add_edge("knowledge_agent", END)
graph.add_edge("orders_agent", END)
app = graph.compile()


@neatlogs.span(kind="WORKFLOW", name="support_request")
def run_workflow(query: str) -> str:
    result = app.invoke({
        "messages": [HumanMessage(content=query)],
        "intent": "",
    })
    return result["messages"][-1].content


print(run_workflow("What is your return policy?"))

neatlogs.flush()
neatlogs.shutdown()

What You'll See in the Dashboard

A WORKFLOW span containing:

  • Spans for each graph node (supervisor, knowledge_agent, orders_agent)
  • LLM spans for each provider call with prompt templates attached

On this page