Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/metal-baboons-provide.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'@ai-sdk/amazon-bedrock': patch
---

fix(bedrock): map reasoningConfig to reasoning_effort for openai models
18 changes: 18 additions & 0 deletions examples/ai-core/src/generate-text/bedrock-openai-reasoning.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import { bedrock } from '@ai-sdk/amazon-bedrock';
import { generateText } from 'ai';
import { run } from '../lib/run';

run(async () => {
const result = await generateText({
model: bedrock('openai.gpt-oss-120b-1:0'),
prompt: 'What is 2 + 2 equal to?',
providerOptions: {
bedrock: {
reasoningConfig: { type: 'enabled', maxReasoningEffort: 'medium' },
},
},
});

console.log(result.reasoningText);
console.log(result.text);
});
48 changes: 48 additions & 0 deletions packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,11 @@ const novaGenerateUrl = `${baseUrl}/model/${encodeURIComponent(
novaModelId,
)}/converse`;

const openaiModelId = 'openai.gpt-oss-120b-1:0';
const openaiGenerateUrl = `${baseUrl}/model/${encodeURIComponent(
openaiModelId,
)}/converse`;

const server = createTestServer({
[generateUrl]: {},
[streamUrl]: {
Expand All @@ -98,6 +103,7 @@ const server = createTestServer({
// Configure the server for the Anthropic model from the start
[anthropicGenerateUrl]: {},
[novaGenerateUrl]: {},
[openaiGenerateUrl]: {},
});

function prepareJsonFixtureResponse(filename: string) {
Expand Down Expand Up @@ -151,6 +157,13 @@ const novaModel = new BedrockChatLanguageModel(novaModelId, {
generateId: () => 'test-id',
});

const openaiModel = new BedrockChatLanguageModel(openaiModelId, {
baseUrl: () => baseUrl,
headers: {},
fetch: fakeFetchWithAuth,
generateId: () => 'test-id',
});

let mockOptions: { success: boolean; errorValue?: any } = { success: true };

describe('doStream', () => {
Expand Down Expand Up @@ -3595,6 +3608,41 @@ describe('doGenerate', () => {
expect(requestBody.additionalModelRequestFields?.thinking).toBeUndefined();
});

it('maps maxReasoningEffort to reasoning_effort for OpenAI models (generate)', async () => {
server.urls[openaiGenerateUrl].response = {
type: 'json-value',
body: {
output: {
message: { content: [{ text: 'Hello' }], role: 'assistant' },
},
stopReason: 'stop_sequence',
usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 },
},
};

await openaiModel.doGenerate({
prompt: TEST_PROMPT,
providerOptions: {
bedrock: {
reasoningConfig: {
maxReasoningEffort: 'medium',
},
},
},
});

const requestBody = await server.calls[0].requestBodyJson;
expect(requestBody).toMatchObject({
additionalModelRequestFields: {
reasoning_effort: 'medium',
},
});
expect(
requestBody.additionalModelRequestFields?.reasoningConfig,
).toBeUndefined();
expect(requestBody.additionalModelRequestFields?.thinking).toBeUndefined();
});

it('should warn when Anthropic model receives maxReasoningEffort (generate)', async () => {
prepareJsonResponse({});

Expand Down
29 changes: 20 additions & 9 deletions packages/amazon-bedrock/src/bedrock-chat-language-model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -215,16 +215,27 @@ export class BedrockChatLanguageModel implements LanguageModelV3 {

const maxReasoningEffort =
bedrockOptions.reasoningConfig?.maxReasoningEffort;
const isOpenAIModel = this.modelId.startsWith('openai.');
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OpenAI models should use reasoning_effort directly, not reasoningConfig, when handling maxReasoningEffort

Fix on Vercel


if (maxReasoningEffort != null && !isAnthropicModel) {
bedrockOptions.additionalModelRequestFields = {
...bedrockOptions.additionalModelRequestFields,
reasoningConfig: {
...(bedrockOptions.reasoningConfig?.type != null && {
type: bedrockOptions.reasoningConfig.type,
}),
maxReasoningEffort,
},
};
if (isOpenAIModel) {
// OpenAI models on Bedrock expect `reasoning_effort` as a flat value
bedrockOptions.additionalModelRequestFields = {
...bedrockOptions.additionalModelRequestFields,
reasoning_effort: maxReasoningEffort,
};
} else {
// other models (such as Nova 2) use reasoningConfig format
bedrockOptions.additionalModelRequestFields = {
...bedrockOptions.additionalModelRequestFields,
reasoningConfig: {
...(bedrockOptions.reasoningConfig?.type != null && {
type: bedrockOptions.reasoningConfig.type,
}),
maxReasoningEffort,
},
};
}
} else if (maxReasoningEffort != null && isAnthropicModel) {
warnings.push({
type: 'unsupported',
Expand Down
Loading