Skip to content

Commit a84b2f1

Browse files
authored
feat(core): Instrument langgraph createReactAgent (#20344)
This PR adds instrumentation for LangGraph's `createReactAgent` API. ## createReactAgent wrapping - Extracts agent name, LLM model, and tools from params - Wraps compiled graph's `invoke()` with `invoke_agent` span - Wraps tool `invoke()` with `execute_tool` spans (name, type, description, arguments, result) - Injects LangChain callback handler + `lc_agent_name` + `__sentry_langgraph__` metadata at invoke level for chat span creation and agent name propagation to all child spans - Suppresses `StateGraph.compile` instrumentation inside `createReactAgent` to avoid duplicate spans ## LangChain callback handler improvements - Reads `gen_ai.agent.name` from `metadata.lc_agent_name` (convention from newer LangGraph `createAgent`, adopted for our supported versions) - Suppresses chain and tool callback spans inside agent context (based on `metadata.__sentry_langgraph__` presence) to avoid duplicates with our direct instrumentation - Extracts tool definitions from `extraParams` in `handleChatModelStart` and sets `gen_ai.request.available_tools` on chat spans - Uses `runName` for tool name in `handleToolStart` (set by LangChain's `StructuredTool.call()`) — fixes `unknown_tool` issue - Adds `gen_ai.operation.name` to tool spans - Extracts `.content` from ToolMessage objects in `handleToolEnd` instead of serializing the full wrapper - `addToolCallsAttributes` now prefers `message.tool_calls` (LangChain's normalized format) over scanning `message.content` for Anthropic-style `tool_use` items, fixing duplicate tool calls on Anthropic chat spans. Falls back to `message.content` scanning for older LangChain versions. ## OTel module patching - Patches `@langchain/langgraph/prebuilt` for `createReactAgent` (ESM + CJS file patches for `dist/prebuilt/index.cjs`) ## Exports - `instrumentCreateReactAgent` from core, browser, cloudflare Closes: #19372
1 parent 225751a commit a84b2f1

16 files changed

Lines changed: 933 additions & 54 deletions

File tree

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import { ChatAnthropic } from '@langchain/anthropic';
2+
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
3+
import { createReactAgent } from '@langchain/langgraph/prebuilt';
4+
import * as Sentry from '@sentry/node';
5+
import express from 'express';
6+
7+
function startMockAnthropicServer() {
8+
const app = express();
9+
app.use(express.json());
10+
11+
app.post('/v1/messages', (req, res) => {
12+
const model = req.body.model;
13+
14+
res.json({
15+
id: 'msg_react_agent_123',
16+
type: 'message',
17+
role: 'assistant',
18+
content: [
19+
{
20+
type: 'text',
21+
text: 'Paris is the capital of France.',
22+
},
23+
],
24+
model: model,
25+
stop_reason: 'end_turn',
26+
stop_sequence: null,
27+
usage: {
28+
input_tokens: 20,
29+
output_tokens: 10,
30+
},
31+
});
32+
});
33+
34+
return new Promise(resolve => {
35+
const server = app.listen(0, () => {
36+
resolve(server);
37+
});
38+
});
39+
}
40+
41+
async function run() {
42+
const server = await startMockAnthropicServer();
43+
const baseUrl = `http://localhost:${server.address().port}`;
44+
45+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
46+
const llm = new ChatAnthropic({
47+
model: 'claude-3-5-sonnet-20241022',
48+
apiKey: 'mock-api-key',
49+
clientOptions: {
50+
baseURL: baseUrl,
51+
},
52+
});
53+
54+
const agent = createReactAgent({ llm, tools: [], name: 'helpful_assistant' });
55+
56+
await agent.invoke({
57+
messages: [new SystemMessage('You are a helpful assistant.'), new HumanMessage('What is the capital of France?')],
58+
});
59+
});
60+
61+
await Sentry.flush(2000);
62+
server.close();
63+
}
64+
65+
run();
Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
import { tool } from '@langchain/core/tools';
2+
import { ChatAnthropic } from '@langchain/anthropic';
3+
import { createReactAgent } from '@langchain/langgraph/prebuilt';
4+
import { HumanMessage } from '@langchain/core/messages';
5+
import * as Sentry from '@sentry/node';
6+
import express from 'express';
7+
import { z } from 'zod';
8+
9+
let callCount = 0;
10+
11+
function startMockAnthropicServer() {
12+
const app = express();
13+
app.use(express.json());
14+
15+
app.post('/v1/messages', (req, res) => {
16+
callCount++;
17+
const model = req.body.model;
18+
19+
if (callCount === 1) {
20+
// First call: model decides to call the "add" tool
21+
res.json({
22+
id: 'msg_1',
23+
type: 'message',
24+
role: 'assistant',
25+
content: [
26+
{
27+
type: 'tool_use',
28+
id: 'toolu_add_1',
29+
name: 'add',
30+
input: { a: 3, b: 5 },
31+
},
32+
],
33+
model: model,
34+
stop_reason: 'tool_use',
35+
usage: { input_tokens: 20, output_tokens: 10 },
36+
});
37+
} else if (callCount === 2) {
38+
// Second call: model sees add result=8, calls "multiply"
39+
res.json({
40+
id: 'msg_2',
41+
type: 'message',
42+
role: 'assistant',
43+
content: [
44+
{
45+
type: 'tool_use',
46+
id: 'toolu_mul_1',
47+
name: 'multiply',
48+
input: { a: 8, b: 4 },
49+
},
50+
],
51+
model: model,
52+
stop_reason: 'tool_use',
53+
usage: { input_tokens: 30, output_tokens: 10 },
54+
});
55+
} else {
56+
// Third call: model returns final answer
57+
res.json({
58+
id: 'msg_3',
59+
type: 'message',
60+
role: 'assistant',
61+
content: [{ type: 'text', text: 'The result is 32.' }],
62+
model: model,
63+
stop_reason: 'end_turn',
64+
usage: { input_tokens: 40, output_tokens: 10 },
65+
});
66+
}
67+
});
68+
69+
return new Promise(resolve => {
70+
const server = app.listen(0, () => resolve(server));
71+
});
72+
}
73+
74+
async function run() {
75+
const server = await startMockAnthropicServer();
76+
const baseUrl = `http://localhost:${server.address().port}`;
77+
78+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
79+
const llm = new ChatAnthropic({
80+
model: 'claude-3-5-sonnet-20241022',
81+
apiKey: 'mock-api-key',
82+
clientOptions: { baseURL: baseUrl },
83+
});
84+
85+
const addTool = tool(
86+
async ({ a, b }) => {
87+
return String(a + b);
88+
},
89+
{
90+
name: 'add',
91+
description: 'Add two numbers',
92+
schema: z.object({ a: z.number(), b: z.number() }),
93+
},
94+
);
95+
96+
const multiplyTool = tool(
97+
async ({ a, b }) => {
98+
return String(a * b);
99+
},
100+
{
101+
name: 'multiply',
102+
description: 'Multiply two numbers',
103+
schema: z.object({ a: z.number(), b: z.number() }),
104+
},
105+
);
106+
107+
const agent = createReactAgent({
108+
llm,
109+
tools: [addTool, multiplyTool],
110+
name: 'math_assistant',
111+
});
112+
113+
await agent.invoke({
114+
messages: [new HumanMessage('Calculate (3 + 5) * 4')],
115+
});
116+
});
117+
118+
await Sentry.flush(2000);
119+
server.close();
120+
}
121+
122+
run();
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
beforeSendTransaction: event => {
11+
// Filter out mock express server transactions
12+
if (event.transaction && event.transaction.includes('/v1/messages')) {
13+
return null;
14+
}
15+
return event;
16+
},
17+
});
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
import { ChatAnthropic } from '@langchain/anthropic';
2+
import { END, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph';
3+
import * as Sentry from '@sentry/node';
4+
import express from 'express';
5+
6+
function startMockAnthropicServer() {
7+
const app = express();
8+
app.use(express.json());
9+
10+
app.post('/v1/messages', (req, res) => {
11+
res.json({
12+
id: 'msg_stategraph_chat_1',
13+
type: 'message',
14+
role: 'assistant',
15+
content: [{ type: 'text', text: 'Hello from mock.' }],
16+
model: req.body.model,
17+
stop_reason: 'end_turn',
18+
usage: { input_tokens: 5, output_tokens: 3 },
19+
});
20+
});
21+
22+
return new Promise(resolve => {
23+
const server = app.listen(0, () => resolve(server));
24+
});
25+
}
26+
27+
async function run() {
28+
const server = await startMockAnthropicServer();
29+
const baseUrl = `http://localhost:${server.address().port}`;
30+
31+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
32+
const llm = new ChatAnthropic({
33+
model: 'claude-3-5-sonnet-20241022',
34+
apiKey: 'mock-api-key',
35+
clientOptions: { baseURL: baseUrl },
36+
});
37+
38+
const callLlm = async state => {
39+
const response = await llm.invoke(state.messages);
40+
return { messages: [response] };
41+
};
42+
43+
const graph = new StateGraph(MessagesAnnotation)
44+
.addNode('agent', callLlm)
45+
.addEdge(START, 'agent')
46+
.addEdge('agent', END)
47+
.compile({ name: 'plain_assistant' });
48+
49+
await graph.invoke({ messages: [{ role: 'user', content: 'Hi.' }] });
50+
});
51+
52+
await Sentry.flush(2000);
53+
server.close();
54+
}
55+
56+
run();

dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ import {
1313
GEN_AI_RESPONSE_TEXT_ATTRIBUTE,
1414
GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
1515
GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE,
16+
GEN_AI_TOOL_NAME_ATTRIBUTE,
1617
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
1718
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
1819
GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE,
@@ -445,4 +446,111 @@ describe('LangGraph integration', () => {
445446
});
446447
},
447448
);
449+
450+
// createReactAgent tests
451+
const EXPECTED_TRANSACTION_REACT_AGENT = {
452+
transaction: 'main',
453+
spans: [
454+
expect.objectContaining({
455+
data: expect.objectContaining({
456+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
457+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
458+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
459+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'helpful_assistant',
460+
[GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'helpful_assistant',
461+
}),
462+
description: 'invoke_agent helpful_assistant',
463+
op: 'gen_ai.invoke_agent',
464+
origin: 'auto.ai.langgraph',
465+
status: 'ok',
466+
}),
467+
expect.objectContaining({ op: 'http.client' }),
468+
expect.objectContaining({
469+
data: expect.objectContaining({
470+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'helpful_assistant',
471+
}),
472+
op: 'gen_ai.chat',
473+
}),
474+
],
475+
};
476+
477+
createEsmAndCjsTests(__dirname, 'agent-scenario.mjs', 'instrument-agent.mjs', (createRunner, test) => {
478+
test('should instrument createReactAgent with agent and chat spans', { timeout: 30000 }, async () => {
479+
await createRunner()
480+
.ignore('event')
481+
.expect({ transaction: EXPECTED_TRANSACTION_REACT_AGENT })
482+
.start()
483+
.completed();
484+
});
485+
});
486+
487+
// createReactAgent with tools - verifies tool execution spans
488+
const EXPECTED_TRANSACTION_REACT_AGENT_TOOLS = {
489+
transaction: 'main',
490+
spans: [
491+
expect.objectContaining({
492+
data: expect.objectContaining({
493+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
494+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'math_assistant',
495+
}),
496+
op: 'gen_ai.invoke_agent',
497+
status: 'ok',
498+
}),
499+
expect.objectContaining({ op: 'http.client' }),
500+
expect.objectContaining({ op: 'gen_ai.chat' }),
501+
expect.objectContaining({
502+
data: expect.objectContaining({
503+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
504+
[GEN_AI_TOOL_NAME_ATTRIBUTE]: 'add',
505+
'gen_ai.tool.type': 'function',
506+
}),
507+
description: 'execute_tool add',
508+
op: 'gen_ai.execute_tool',
509+
status: 'ok',
510+
}),
511+
expect.objectContaining({ op: 'http.client' }),
512+
expect.objectContaining({ op: 'gen_ai.chat' }),
513+
expect.objectContaining({
514+
data: expect.objectContaining({
515+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
516+
[GEN_AI_TOOL_NAME_ATTRIBUTE]: 'multiply',
517+
'gen_ai.tool.type': 'function',
518+
}),
519+
description: 'execute_tool multiply',
520+
op: 'gen_ai.execute_tool',
521+
status: 'ok',
522+
}),
523+
expect.objectContaining({ op: 'http.client' }),
524+
expect.objectContaining({ op: 'gen_ai.chat' }),
525+
],
526+
};
527+
528+
createEsmAndCjsTests(__dirname, 'agent-tools-scenario.mjs', 'instrument-agent.mjs', (createRunner, test) => {
529+
test('should create tool execution spans for createReactAgent with tools', { timeout: 30000 }, async () => {
530+
await createRunner()
531+
.ignore('event')
532+
.expect({ transaction: EXPECTED_TRANSACTION_REACT_AGENT_TOOLS })
533+
.start()
534+
.completed();
535+
});
536+
});
537+
538+
createEsmAndCjsTests(__dirname, 'scenario-stategraph-chat.mjs', 'instrument-agent.mjs', (createRunner, test) => {
539+
test('auto-injects langchain handler for plain StateGraph and emits chat spans', { timeout: 30000 }, async () => {
540+
await createRunner()
541+
.ignore('event')
542+
.expect({
543+
transaction: event => {
544+
const spans = event.spans ?? [];
545+
const chatSpans = spans.filter(s => s.op === 'gen_ai.chat');
546+
expect(chatSpans).toHaveLength(1);
547+
expect(chatSpans[0]?.data).toMatchObject({
548+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'plain_assistant',
549+
});
550+
},
551+
})
552+
.start()
553+
.completed();
554+
});
555+
});
448556
});

packages/browser/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ export {
7272
instrumentOpenAiClient,
7373
instrumentGoogleGenAIClient,
7474
instrumentLangGraph,
75+
instrumentCreateReactAgent,
7576
createLangChainCallbackHandler,
7677
instrumentLangChainEmbeddings,
7778
logger,

packages/cloudflare/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ export {
110110
withStreamedSpan,
111111
spanStreamingIntegration,
112112
instrumentLangGraph,
113+
instrumentCreateReactAgent,
113114
} from '@sentry/core';
114115

115116
export { withSentry } from './withSentry';

0 commit comments

Comments
 (0)