Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/stale-berries-push.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"@ai-sdk/open-responses": patch
---

feat(open-responses): add option to pass reasoning summary for OpenResponses
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import { createOpenResponses } from '@ai-sdk/open-responses';
import { generateText } from 'ai';
import { run } from '../../lib/run';

const lmstudio = createOpenResponses({
name: 'lmstudio',
url: 'https://api.openai.com/v1/responses',
apiKey: process.env.OPENAI_API_KEY,
});

run(async () => {
const result = await generateText({
model: lmstudio('gpt-5'),
prompt: 'Invent a new holiday and describe its traditions.',
reasoning: 'high',
maxOutputTokens: 100,
providerOptions: {
lmstudio: {
reasoningSummary: 'detailed',
},
},
});

console.log(JSON.stringify(result.response.body, null, 2));

console.log('Reasoning:', result.reasoning);
console.log(result.text);
console.log();
console.log('Token usage:', result.usage);
console.log('Finish reason:', result.finishReason);
console.log('Request:', JSON.stringify(result.request, null, 2));
});
1 change: 1 addition & 0 deletions packages/open-responses/src/index.ts
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
export { VERSION } from './version';
export { createOpenResponses } from './open-responses-provider';
export type { OpenResponsesOptions } from './responses/open-responses-options';
1 change: 1 addition & 0 deletions packages/open-responses/src/open-responses-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ export function createOpenResponses(
const createResponsesModel = (modelId: string) => {
return new OpenResponsesLanguageModel(modelId, {
provider: `${providerName}.responses`,
providerOptionsName: providerName,
headers: getHeaders,
url: options.url,
fetch: options.fetch,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { FetchFunction } from '@ai-sdk/provider-utils';

export type OpenResponsesConfig = {
provider: string;
providerOptionsName: string;
url: string;
headers: () => Record<string, string | undefined>;
fetch?: FetchFunction;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ describe('OpenResponsesLanguageModel', () => {
function createModel(modelId: string = 'gemma-7b-it') {
return new OpenResponsesLanguageModel(modelId, {
provider: 'lmstudio',
providerOptionsName: 'lmstudio',
url: URL,
headers: () => ({}),
generateId: mockId(),
Expand Down Expand Up @@ -210,6 +211,131 @@ describe('OpenResponsesLanguageModel', () => {
});
});

describe('providerOptions reasoning', () => {
beforeEach(() => {
prepareJsonFixtureResponse('lmstudio-basic.1');
});

it('should send reasoning.summary via providerOptions', async () => {
await createModel().doGenerate({
prompt: TEST_PROMPT,
providerOptions: {
lmstudio: { reasoningSummary: 'detailed' },
},
});

expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
{
"input": [
{
"content": [
{
"text": "Hello",
"type": "input_text",
},
],
"role": "user",
"type": "message",
},
],
"model": "gemma-7b-it",
"reasoning": {
"summary": "detailed",
},
}
`);
});

it('should combine top-level reasoning effort with providerOptions summary', async () => {
await createModel().doGenerate({
prompt: TEST_PROMPT,
reasoning: 'high',
providerOptions: {
lmstudio: { reasoningSummary: 'auto' },
},
});

expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
{
"input": [
{
"content": [
{
"text": "Hello",
"type": "input_text",
},
],
"role": "user",
"type": "message",
},
],
"model": "gemma-7b-it",
"reasoning": {
"effort": "high",
"summary": "auto",
},
}
`);
});

it('should send reasoning.summary concise via providerOptions', async () => {
await createModel().doGenerate({
prompt: TEST_PROMPT,
providerOptions: {
lmstudio: { reasoningSummary: 'concise' },
},
});

expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
{
"input": [
{
"content": [
{
"text": "Hello",
"type": "input_text",
},
],
"role": "user",
"type": "message",
},
],
"model": "gemma-7b-it",
"reasoning": {
"summary": "concise",
},
}
`);
});

it('should not set reasoning when providerOptions has no reasoning fields', async () => {
await createModel().doGenerate({
prompt: TEST_PROMPT,
providerOptions: {
lmstudio: {},
},
});

expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
{
"input": [
{
"content": [
{
"text": "Hello",
"type": "input_text",
},
],
"role": "user",
"type": "message",
},
],
"model": "gemma-7b-it",
}
`);
});
});

describe('tool call parsing', () => {
let result: LanguageModelV4GenerateResult;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import {
isCustomReasoning,
jsonSchema,
mapReasoningToProviderEffort,
parseProviderOptions,
ParseResult,
postJsonToApi,
} from '@ai-sdk/provider-utils';
Expand All @@ -32,6 +33,7 @@ import {
} from './open-responses-api';
import { mapOpenResponsesFinishReason } from './map-open-responses-finish-reason';
import { OpenResponsesConfig } from './open-responses-config';
import { openResponsesOptionsSchema } from './open-responses-options';

export class OpenResponsesLanguageModel implements LanguageModelV4 {
readonly specificationVersion = 'v4';
Expand Down Expand Up @@ -130,6 +132,12 @@ export class OpenResponsesLanguageModel implements LanguageModelV4 {
}
: undefined;

const openResponsesOptions = await parseProviderOptions({
provider: this.config.providerOptionsName,
providerOptions,
schema: openResponsesOptionsSchema,
});

const resolvedReasoningEffort = isCustomReasoning(reasoning)
? reasoning === 'none'
? 'none'
Expand Down Expand Up @@ -157,8 +165,16 @@ export class OpenResponsesLanguageModel implements LanguageModelV4 {
presence_penalty: presencePenalty,
frequency_penalty: frequencyPenalty,
reasoning:
resolvedReasoningEffort != null
? { effort: resolvedReasoningEffort }
resolvedReasoningEffort != null ||
openResponsesOptions?.reasoningSummary != null
? {
...(resolvedReasoningEffort != null && {
effort: resolvedReasoningEffort,
}),
...(openResponsesOptions?.reasoningSummary != null && {
summary: openResponsesOptions.reasoningSummary,
}),
}
: undefined,
tools: functionTools?.length ? functionTools : undefined,
tool_choice: convertedToolChoice,
Expand Down
18 changes: 18 additions & 0 deletions packages/open-responses/src/responses/open-responses-options.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
import { z } from 'zod/v4';

export const openResponsesOptionsSchema = lazySchema(() =>
zodSchema(
z.object({
/**
* Controls reasoning summary output from the model.
* Valid values: 'concise', 'detailed', 'auto'.
*/
reasoningSummary: z.enum(['concise', 'detailed', 'auto']).nullish(),
}),
),
);

export type OpenResponsesOptions = InferSchema<
typeof openResponsesOptionsSchema
>;
Loading