Skip to content

Commit 1f8b4bd

Browse files
committed
add more tests
1 parent 77d24eb commit 1f8b4bd

File tree

6 files changed

+219
-133
lines changed

6 files changed

+219
-133
lines changed
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
transport: loggingTransport,
9+
});
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import * as Sentry from '@sentry/node';
2+
import { startExpressServerAndSendPortToRunner } from '@sentry-internal/node-integration-tests';
3+
import express from 'express';
4+
import http from 'http';
5+
6+
const app = express();
7+
8+
app.get('/test', (_req, res) => {
9+
http.get(`http://localhost:${app.port}/test2`, httpRes => {
10+
httpRes.on('data', () => {
11+
setTimeout(() => {
12+
res.send({ response: 'response 1' });
13+
}, 200);
14+
});
15+
});
16+
});
17+
18+
app.get('/test2', (_req, res) => {
19+
res.send({ response: 'response 2' });
20+
});
21+
22+
app.get('/test3', (_req, res) => {
23+
res.send({ response: 'response 3' });
24+
});
25+
26+
Sentry.setupExpressErrorHandler(app);
27+
28+
startExpressServerAndSendPortToRunner(app);
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import { afterAll, describe, test } from 'vitest';
2+
import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner';
3+
4+
describe('express with http import xxx', () => {
5+
afterAll(() => {
6+
cleanupChildProcesses();
7+
});
8+
9+
createEsmAndCjsTests(
10+
__dirname,
11+
'scenario.mjs',
12+
'instrument.mjs',
13+
createRunner => {
14+
test('it works when importing the http module', async () => {
15+
const runner = createRunner()
16+
.expect({
17+
transaction: {
18+
transaction: 'GET /test2',
19+
},
20+
})
21+
.expect({
22+
transaction: {
23+
transaction: 'GET /test',
24+
},
25+
})
26+
.expect({
27+
transaction: {
28+
transaction: 'GET /test3',
29+
},
30+
})
31+
.start();
32+
await runner.makeRequest('get', '/test');
33+
await runner.makeRequest('get', '/test3');
34+
await runner.completed();
35+
});
36+
// TODO: This is failing on ESM because importing http is triggering the http spans twice :(
37+
// We need to fix this!
38+
},
39+
{ skipEsm: true },
40+
);
41+
});
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
transport: loggingTransport,
9+
});

dev-packages/node-integration-tests/suites/tracing/ai/scenario.js renamed to dev-packages/node-integration-tests/suites/tracing/ai/scenario.mjs

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,6 @@
1-
const { loggingTransport } = require('@sentry-internal/node-integration-tests');
2-
const Sentry = require('@sentry/node');
3-
4-
Sentry.init({
5-
dsn: 'https://[email protected]/1337',
6-
release: '1.0',
7-
tracesSampleRate: 1.0,
8-
transport: loggingTransport,
9-
});
10-
11-
const { generateText } = require('ai');
12-
const { MockLanguageModelV1 } = require('ai/test');
1+
import * as Sentry from '@sentry/node';
2+
import { generateText } from 'ai';
3+
import { MockLanguageModelV1 } from 'ai/test';
134

145
async function run() {
156
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
Lines changed: 129 additions & 121 deletions
Original file line numberDiff line numberDiff line change
@@ -1,131 +1,139 @@
11
import { afterAll, describe, expect, test } from 'vitest';
2-
import { cleanupChildProcesses, createRunner } from '../../../utils/runner';
2+
import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner';
33

44
// `ai` SDK only support Node 18+
55
describe('ai', () => {
66
afterAll(() => {
77
cleanupChildProcesses();
88
});
99

10-
test('creates ai related spans', async () => {
11-
const EXPECTED_TRANSACTION = {
12-
transaction: 'main',
13-
spans: expect.arrayContaining([
14-
expect.objectContaining({
15-
data: expect.objectContaining({
16-
'ai.completion_tokens.used': 20,
17-
'ai.model.id': 'mock-model-id',
18-
'ai.model.provider': 'mock-provider',
19-
'ai.model_id': 'mock-model-id',
20-
'ai.operationId': 'ai.generateText',
21-
'ai.pipeline.name': 'generateText',
22-
'ai.prompt_tokens.used': 10,
23-
'ai.response.finishReason': 'stop',
24-
'ai.settings.maxRetries': 2,
25-
'ai.settings.maxSteps': 1,
26-
'ai.streaming': false,
27-
'ai.total_tokens.used': 30,
28-
'ai.usage.completionTokens': 20,
29-
'ai.usage.promptTokens': 10,
30-
'operation.name': 'ai.generateText',
31-
'sentry.op': 'ai.pipeline.generateText',
32-
'sentry.origin': 'auto.vercelai.otel',
33-
}),
34-
description: 'generateText',
35-
op: 'ai.pipeline.generateText',
36-
origin: 'auto.vercelai.otel',
37-
status: 'ok',
38-
}),
39-
expect.objectContaining({
40-
data: expect.objectContaining({
41-
'sentry.origin': 'auto.vercelai.otel',
42-
'sentry.op': 'ai.run.doGenerate',
43-
'operation.name': 'ai.generateText.doGenerate',
44-
'ai.operationId': 'ai.generateText.doGenerate',
45-
'ai.model.provider': 'mock-provider',
46-
'ai.model.id': 'mock-model-id',
47-
'ai.settings.maxRetries': 2,
48-
'gen_ai.system': 'mock-provider',
49-
'gen_ai.request.model': 'mock-model-id',
50-
'ai.pipeline.name': 'generateText.doGenerate',
51-
'ai.model_id': 'mock-model-id',
52-
'ai.streaming': false,
53-
'ai.response.finishReason': 'stop',
54-
'ai.response.model': 'mock-model-id',
55-
'ai.usage.promptTokens': 10,
56-
'ai.usage.completionTokens': 20,
57-
'gen_ai.response.finish_reasons': ['stop'],
58-
'gen_ai.usage.input_tokens': 10,
59-
'gen_ai.usage.output_tokens': 20,
60-
'ai.completion_tokens.used': 20,
61-
'ai.prompt_tokens.used': 10,
62-
'ai.total_tokens.used': 30,
63-
}),
64-
description: 'generateText.doGenerate',
65-
op: 'ai.run.doGenerate',
66-
origin: 'auto.vercelai.otel',
67-
status: 'ok',
68-
}),
69-
expect.objectContaining({
70-
data: expect.objectContaining({
71-
'ai.completion_tokens.used': 20,
72-
'ai.model.id': 'mock-model-id',
73-
'ai.model.provider': 'mock-provider',
74-
'ai.model_id': 'mock-model-id',
75-
'ai.prompt': '{"prompt":"Where is the second span?"}',
76-
'ai.operationId': 'ai.generateText',
77-
'ai.pipeline.name': 'generateText',
78-
'ai.prompt_tokens.used': 10,
79-
'ai.response.finishReason': 'stop',
80-
'ai.input_messages': '{"prompt":"Where is the second span?"}',
81-
'ai.settings.maxRetries': 2,
82-
'ai.settings.maxSteps': 1,
83-
'ai.streaming': false,
84-
'ai.total_tokens.used': 30,
85-
'ai.usage.completionTokens': 20,
86-
'ai.usage.promptTokens': 10,
87-
'operation.name': 'ai.generateText',
88-
'sentry.op': 'ai.pipeline.generateText',
89-
'sentry.origin': 'auto.vercelai.otel',
90-
}),
91-
description: 'generateText',
92-
op: 'ai.pipeline.generateText',
93-
origin: 'auto.vercelai.otel',
94-
status: 'ok',
95-
}),
96-
expect.objectContaining({
97-
data: expect.objectContaining({
98-
'sentry.origin': 'auto.vercelai.otel',
99-
'sentry.op': 'ai.run.doGenerate',
100-
'operation.name': 'ai.generateText.doGenerate',
101-
'ai.operationId': 'ai.generateText.doGenerate',
102-
'ai.model.provider': 'mock-provider',
103-
'ai.model.id': 'mock-model-id',
104-
'ai.settings.maxRetries': 2,
105-
'gen_ai.system': 'mock-provider',
106-
'gen_ai.request.model': 'mock-model-id',
107-
'ai.pipeline.name': 'generateText.doGenerate',
108-
'ai.model_id': 'mock-model-id',
109-
'ai.streaming': false,
110-
'ai.response.finishReason': 'stop',
111-
'ai.response.model': 'mock-model-id',
112-
'ai.usage.promptTokens': 10,
113-
'ai.usage.completionTokens': 20,
114-
'gen_ai.response.finish_reasons': ['stop'],
115-
'gen_ai.usage.input_tokens': 10,
116-
'gen_ai.usage.output_tokens': 20,
117-
'ai.completion_tokens.used': 20,
118-
'ai.prompt_tokens.used': 10,
119-
'ai.total_tokens.used': 30,
120-
}),
121-
description: 'generateText.doGenerate',
122-
op: 'ai.run.doGenerate',
123-
origin: 'auto.vercelai.otel',
124-
status: 'ok',
125-
}),
126-
]),
127-
};
10+
createEsmAndCjsTests(
11+
__dirname,
12+
'scenario.mjs',
13+
'instrument.mjs',
14+
createRunner => {
15+
test('creates ai related spans', async () => {
16+
const EXPECTED_TRANSACTION = {
17+
transaction: 'main',
18+
spans: expect.arrayContaining([
19+
expect.objectContaining({
20+
data: expect.objectContaining({
21+
'ai.completion_tokens.used': 20,
22+
'ai.model.id': 'mock-model-id',
23+
'ai.model.provider': 'mock-provider',
24+
'ai.model_id': 'mock-model-id',
25+
'ai.operationId': 'ai.generateText',
26+
'ai.pipeline.name': 'generateText',
27+
'ai.prompt_tokens.used': 10,
28+
'ai.response.finishReason': 'stop',
29+
'ai.settings.maxRetries': 2,
30+
'ai.settings.maxSteps': 1,
31+
'ai.streaming': false,
32+
'ai.total_tokens.used': 30,
33+
'ai.usage.completionTokens': 20,
34+
'ai.usage.promptTokens': 10,
35+
'operation.name': 'ai.generateText',
36+
'sentry.op': 'ai.pipeline.generateText',
37+
'sentry.origin': 'auto.vercelai.otel',
38+
}),
39+
description: 'generateText',
40+
op: 'ai.pipeline.generateText',
41+
origin: 'auto.vercelai.otel',
42+
status: 'ok',
43+
}),
44+
expect.objectContaining({
45+
data: expect.objectContaining({
46+
'sentry.origin': 'auto.vercelai.otel',
47+
'sentry.op': 'ai.run.doGenerate',
48+
'operation.name': 'ai.generateText.doGenerate',
49+
'ai.operationId': 'ai.generateText.doGenerate',
50+
'ai.model.provider': 'mock-provider',
51+
'ai.model.id': 'mock-model-id',
52+
'ai.settings.maxRetries': 2,
53+
'gen_ai.system': 'mock-provider',
54+
'gen_ai.request.model': 'mock-model-id',
55+
'ai.pipeline.name': 'generateText.doGenerate',
56+
'ai.model_id': 'mock-model-id',
57+
'ai.streaming': false,
58+
'ai.response.finishReason': 'stop',
59+
'ai.response.model': 'mock-model-id',
60+
'ai.usage.promptTokens': 10,
61+
'ai.usage.completionTokens': 20,
62+
'gen_ai.response.finish_reasons': ['stop'],
63+
'gen_ai.usage.input_tokens': 10,
64+
'gen_ai.usage.output_tokens': 20,
65+
'ai.completion_tokens.used': 20,
66+
'ai.prompt_tokens.used': 10,
67+
'ai.total_tokens.used': 30,
68+
}),
69+
description: 'generateText.doGenerate',
70+
op: 'ai.run.doGenerate',
71+
origin: 'auto.vercelai.otel',
72+
status: 'ok',
73+
}),
74+
expect.objectContaining({
75+
data: expect.objectContaining({
76+
'ai.completion_tokens.used': 20,
77+
'ai.model.id': 'mock-model-id',
78+
'ai.model.provider': 'mock-provider',
79+
'ai.model_id': 'mock-model-id',
80+
'ai.prompt': '{"prompt":"Where is the second span?"}',
81+
'ai.operationId': 'ai.generateText',
82+
'ai.pipeline.name': 'generateText',
83+
'ai.prompt_tokens.used': 10,
84+
'ai.response.finishReason': 'stop',
85+
'ai.input_messages': '{"prompt":"Where is the second span?"}',
86+
'ai.settings.maxRetries': 2,
87+
'ai.settings.maxSteps': 1,
88+
'ai.streaming': false,
89+
'ai.total_tokens.used': 30,
90+
'ai.usage.completionTokens': 20,
91+
'ai.usage.promptTokens': 10,
92+
'operation.name': 'ai.generateText',
93+
'sentry.op': 'ai.pipeline.generateText',
94+
'sentry.origin': 'auto.vercelai.otel',
95+
}),
96+
description: 'generateText',
97+
op: 'ai.pipeline.generateText',
98+
origin: 'auto.vercelai.otel',
99+
status: 'ok',
100+
}),
101+
expect.objectContaining({
102+
data: expect.objectContaining({
103+
'sentry.origin': 'auto.vercelai.otel',
104+
'sentry.op': 'ai.run.doGenerate',
105+
'operation.name': 'ai.generateText.doGenerate',
106+
'ai.operationId': 'ai.generateText.doGenerate',
107+
'ai.model.provider': 'mock-provider',
108+
'ai.model.id': 'mock-model-id',
109+
'ai.settings.maxRetries': 2,
110+
'gen_ai.system': 'mock-provider',
111+
'gen_ai.request.model': 'mock-model-id',
112+
'ai.pipeline.name': 'generateText.doGenerate',
113+
'ai.model_id': 'mock-model-id',
114+
'ai.streaming': false,
115+
'ai.response.finishReason': 'stop',
116+
'ai.response.model': 'mock-model-id',
117+
'ai.usage.promptTokens': 10,
118+
'ai.usage.completionTokens': 20,
119+
'gen_ai.response.finish_reasons': ['stop'],
120+
'gen_ai.usage.input_tokens': 10,
121+
'gen_ai.usage.output_tokens': 20,
122+
'ai.completion_tokens.used': 20,
123+
'ai.prompt_tokens.used': 10,
124+
'ai.total_tokens.used': 30,
125+
}),
126+
description: 'generateText.doGenerate',
127+
op: 'ai.run.doGenerate',
128+
origin: 'auto.vercelai.otel',
129+
status: 'ok',
130+
}),
131+
]),
132+
};
128133

129-
await createRunner(__dirname, 'scenario.js').expect({ transaction: EXPECTED_TRANSACTION }).start().completed();
130-
});
134+
await createRunner().expect({ transaction: EXPECTED_TRANSACTION }).start().completed();
135+
});
136+
},
137+
{ skipEsm: true },
138+
);
131139
});

0 commit comments

Comments
 (0)