Skip to content

Commit 57deae1

Browse files
committed
fix(core): backfill OpenAI responses model for streaming responses
1 parent 7e3614c commit 57deae1

File tree

2 files changed

+55
-0
lines changed

2 files changed

+55
-0
lines changed

packages/core/src/tracing/openai/streaming.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,14 @@
11
import { captureException } from '../../exports';
22
import { SPAN_STATUS_ERROR } from '../../tracing';
33
import type { Span } from '../../types-hoist/span';
4+
import { updateSpanName } from '../../utils/spanUtils';
45
import {
6+
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
57
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
68
GEN_AI_RESPONSE_STREAMING_ATTRIBUTE,
79
GEN_AI_RESPONSE_TEXT_ATTRIBUTE,
810
GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
11+
OPENAI_OPERATIONS,
912
} from '../ai/gen-ai-attributes';
1013
import { RESPONSE_EVENT_TYPES } from './constants';
1114
import type {
@@ -246,6 +249,12 @@ export async function* instrumentStream<T>(
246249
}
247250
} finally {
248251
setCommonResponseAttributes(span, state.responseId, state.responseModel, state.responseTimestamp);
252+
if (state.responseModel) {
253+
span.setAttributes({
254+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: state.responseModel,
255+
});
256+
updateSpanName(span, `${OPENAI_OPERATIONS.CHAT} ${state.responseModel}`);
257+
}
249258
setTokenUsageAttributes(span, state.promptTokens, state.completionTokens, state.totalTokens);
250259

251260
span.setAttributes({
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
import { describe, expect, it, vi } from 'vitest';
2+
import { instrumentStream } from '../../../src/tracing/openai/streaming';
3+
import type { ResponseStreamingEvent } from '../../../src/tracing/openai/types';
4+
5+
async function collectStream<T>(stream: AsyncIterable<T>): Promise<T[]> {
6+
const events: T[] = [];
7+
for await (const event of stream) {
8+
events.push(event);
9+
}
10+
return events;
11+
}
12+
13+
describe('openai-streaming', () => {
14+
it('should backfill the request model and span name for streamed Responses API events', async () => {
15+
async function* createStream(): AsyncGenerator<ResponseStreamingEvent> {
16+
yield {
17+
type: 'response.completed',
18+
response: {
19+
object: 'response',
20+
id: 'resp_123',
21+
model: 'gpt-4.1-mini',
22+
created_at: 1704067200,
23+
status: 'completed',
24+
},
25+
} as ResponseStreamingEvent;
26+
}
27+
28+
const span = {
29+
setAttributes: vi.fn(),
30+
setStatus: vi.fn(),
31+
updateName: vi.fn(),
32+
end: vi.fn(),
33+
};
34+
35+
const events = await collectStream(
36+
instrumentStream(createStream(), span as unknown as Parameters<typeof instrumentStream>[1], false),
37+
);
38+
39+
expect(events).toHaveLength(1);
40+
expect(span.setAttributes).toHaveBeenCalledWith({
41+
'gen_ai.request.model': 'gpt-4.1-mini',
42+
});
43+
expect(span.updateName).toHaveBeenCalledWith('chat gpt-4.1-mini');
44+
expect(span.end).toHaveBeenCalled();
45+
});
46+
});

0 commit comments

Comments
 (0)