diff --git a/.changeset/stale-berries-push.md b/.changeset/stale-berries-push.md new file mode 100644 index 000000000000..8df3c9599375 --- /dev/null +++ b/.changeset/stale-berries-push.md @@ -0,0 +1,5 @@ +--- +"@ai-sdk/open-responses": patch +--- + +feat(open-responses): add option to pass reasoning summary for OpenResponses diff --git a/examples/ai-functions/src/generate-text/open-responses/reasoning.ts b/examples/ai-functions/src/generate-text/open-responses/reasoning.ts new file mode 100644 index 000000000000..dfb87385b8ae --- /dev/null +++ b/examples/ai-functions/src/generate-text/open-responses/reasoning.ts @@ -0,0 +1,32 @@ +import { createOpenResponses } from '@ai-sdk/open-responses'; +import { generateText } from 'ai'; +import { run } from '../../lib/run'; + +const lmstudio = createOpenResponses({ + name: 'lmstudio', + url: 'https://api.openai.com/v1/responses', + apiKey: process.env.OPENAI_API_KEY, +}); + +run(async () => { + const result = await generateText({ + model: lmstudio('gpt-5'), + prompt: 'Invent a new holiday and describe its traditions.', + reasoning: 'high', + maxOutputTokens: 100, + providerOptions: { + lmstudio: { + reasoningSummary: 'detailed', + }, + }, + }); + + console.log(JSON.stringify(result.response.body, null, 2)); + + console.log('Reasoning:', result.reasoning); + console.log(result.text); + console.log(); + console.log('Token usage:', result.usage); + console.log('Finish reason:', result.finishReason); + console.log('Request:', JSON.stringify(result.request, null, 2)); +}); diff --git a/packages/open-responses/src/index.ts b/packages/open-responses/src/index.ts index 2fc4297e543b..0266f39c37ce 100644 --- a/packages/open-responses/src/index.ts +++ b/packages/open-responses/src/index.ts @@ -1,2 +1,3 @@ export { VERSION } from './version'; export { createOpenResponses } from './open-responses-provider'; +export type { OpenResponsesOptions } from './responses/open-responses-options'; diff --git a/packages/open-responses/src/open-responses-provider.ts b/packages/open-responses/src/open-responses-provider.ts index 2956f1948bba..5e37d5f4cb1e 100644 --- a/packages/open-responses/src/open-responses-provider.ts +++ b/packages/open-responses/src/open-responses-provider.ts @@ -65,6 +65,7 @@ export function createOpenResponses( const createResponsesModel = (modelId: string) => { return new OpenResponsesLanguageModel(modelId, { provider: `${providerName}.responses`, + providerOptionsName: providerName, headers: getHeaders, url: options.url, fetch: options.fetch, diff --git a/packages/open-responses/src/responses/open-responses-config.ts b/packages/open-responses/src/responses/open-responses-config.ts index d8acd68138b3..291869830963 100644 --- a/packages/open-responses/src/responses/open-responses-config.ts +++ b/packages/open-responses/src/responses/open-responses-config.ts @@ -2,6 +2,7 @@ import { FetchFunction } from '@ai-sdk/provider-utils'; export type OpenResponsesConfig = { provider: string; + providerOptionsName: string; url: string; headers: () => Record; fetch?: FetchFunction; diff --git a/packages/open-responses/src/responses/open-responses-language-model.test.ts b/packages/open-responses/src/responses/open-responses-language-model.test.ts index 0c3fad854df9..9f0210408e1f 100644 --- a/packages/open-responses/src/responses/open-responses-language-model.test.ts +++ b/packages/open-responses/src/responses/open-responses-language-model.test.ts @@ -25,6 +25,7 @@ describe('OpenResponsesLanguageModel', () => { function createModel(modelId: string = 'gemma-7b-it') { return new OpenResponsesLanguageModel(modelId, { provider: 'lmstudio', + providerOptionsName: 'lmstudio', url: URL, headers: () => ({}), generateId: mockId(), @@ -210,6 +211,131 @@ describe('OpenResponsesLanguageModel', () => { }); }); + describe('providerOptions reasoning', () => { + beforeEach(() => { + prepareJsonFixtureResponse('lmstudio-basic.1'); + }); + + it('should send reasoning.summary via providerOptions', async () => { + await createModel().doGenerate({ + prompt: TEST_PROMPT, + providerOptions: { + lmstudio: { reasoningSummary: 'detailed' }, + }, + }); + + expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(` + { + "input": [ + { + "content": [ + { + "text": "Hello", + "type": "input_text", + }, + ], + "role": "user", + "type": "message", + }, + ], + "model": "gemma-7b-it", + "reasoning": { + "summary": "detailed", + }, + } + `); + }); + + it('should combine top-level reasoning effort with providerOptions summary', async () => { + await createModel().doGenerate({ + prompt: TEST_PROMPT, + reasoning: 'high', + providerOptions: { + lmstudio: { reasoningSummary: 'auto' }, + }, + }); + + expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(` + { + "input": [ + { + "content": [ + { + "text": "Hello", + "type": "input_text", + }, + ], + "role": "user", + "type": "message", + }, + ], + "model": "gemma-7b-it", + "reasoning": { + "effort": "high", + "summary": "auto", + }, + } + `); + }); + + it('should send reasoning.summary concise via providerOptions', async () => { + await createModel().doGenerate({ + prompt: TEST_PROMPT, + providerOptions: { + lmstudio: { reasoningSummary: 'concise' }, + }, + }); + + expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(` + { + "input": [ + { + "content": [ + { + "text": "Hello", + "type": "input_text", + }, + ], + "role": "user", + "type": "message", + }, + ], + "model": "gemma-7b-it", + "reasoning": { + "summary": "concise", + }, + } + `); + }); + + it('should not set reasoning when providerOptions has no reasoning fields', async () => { + await createModel().doGenerate({ + prompt: TEST_PROMPT, + providerOptions: { + lmstudio: {}, + }, + }); + + expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(` + { + "input": [ + { + "content": [ + { + "text": "Hello", + "type": "input_text", + }, + ], + "role": "user", + "type": "message", + }, + ], + "model": "gemma-7b-it", + } + `); + }); + }); + describe('tool call parsing', () => { let result: LanguageModelV4GenerateResult; diff --git a/packages/open-responses/src/responses/open-responses-language-model.ts b/packages/open-responses/src/responses/open-responses-language-model.ts index 744d099b68dd..82aada4fa2fe 100644 --- a/packages/open-responses/src/responses/open-responses-language-model.ts +++ b/packages/open-responses/src/responses/open-responses-language-model.ts @@ -17,6 +17,7 @@ import { isCustomReasoning, jsonSchema, mapReasoningToProviderEffort, + parseProviderOptions, ParseResult, postJsonToApi, } from '@ai-sdk/provider-utils'; @@ -32,6 +33,7 @@ import { } from './open-responses-api'; import { mapOpenResponsesFinishReason } from './map-open-responses-finish-reason'; import { OpenResponsesConfig } from './open-responses-config'; +import { openResponsesOptionsSchema } from './open-responses-options'; export class OpenResponsesLanguageModel implements LanguageModelV4 { readonly specificationVersion = 'v4'; @@ -130,6 +132,12 @@ export class OpenResponsesLanguageModel implements LanguageModelV4 { } : undefined; + const openResponsesOptions = await parseProviderOptions({ + provider: this.config.providerOptionsName, + providerOptions, + schema: openResponsesOptionsSchema, + }); + const resolvedReasoningEffort = isCustomReasoning(reasoning) ? reasoning === 'none' ? 'none' @@ -157,8 +165,16 @@ export class OpenResponsesLanguageModel implements LanguageModelV4 { presence_penalty: presencePenalty, frequency_penalty: frequencyPenalty, reasoning: - resolvedReasoningEffort != null - ? { effort: resolvedReasoningEffort } + resolvedReasoningEffort != null || + openResponsesOptions?.reasoningSummary != null + ? { + ...(resolvedReasoningEffort != null && { + effort: resolvedReasoningEffort, + }), + ...(openResponsesOptions?.reasoningSummary != null && { + summary: openResponsesOptions.reasoningSummary, + }), + } : undefined, tools: functionTools?.length ? functionTools : undefined, tool_choice: convertedToolChoice, diff --git a/packages/open-responses/src/responses/open-responses-options.ts b/packages/open-responses/src/responses/open-responses-options.ts new file mode 100644 index 000000000000..5899ae7103a6 --- /dev/null +++ b/packages/open-responses/src/responses/open-responses-options.ts @@ -0,0 +1,18 @@ +import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils'; +import { z } from 'zod/v4'; + +export const openResponsesOptionsSchema = lazySchema(() => + zodSchema( + z.object({ + /** + * Controls reasoning summary output from the model. + * Valid values: 'concise', 'detailed', 'auto'. + */ + reasoningSummary: z.enum(['concise', 'detailed', 'auto']).nullish(), + }), + ), +); + +export type OpenResponsesOptions = InferSchema< + typeof openResponsesOptionsSchema +>;