Skip to content

Commit 73ee587

Browse files
authored
fix: #563 enable explicit model override for prompt (#589)
1 parent 3023dc0 commit 73ee587

File tree

6 files changed

+87
-1
lines changed

6 files changed

+87
-1
lines changed

.changeset/blue-symbols-kiss.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'@openai/agents-openai': patch
3+
'@openai/agents-core': patch
4+
---
5+
6+
fix: #563 enable explicit model override for prompt

packages/agents-core/src/model.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,13 @@ export type ModelRequest = {
269269
* The prompt template to use for the model, if any.
270270
*/
271271
prompt?: Prompt;
272+
273+
/**
274+
* When true, the resolved model should override the model configured in the prompt template.
275+
* Providers that support prompt templates should include the explicit model name in the request
276+
* even when a prompt is supplied.
277+
*/
278+
overridePromptModel?: boolean;
272279
};
273280

274281
export type ModelResponse = {

packages/agents-core/src/run.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -558,6 +558,8 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
558558
state._context,
559559
),
560560
prompt: await state._currentAgent.getPrompt(state._context),
561+
// Explicit agent/run config models should take precedence over prompt defaults.
562+
...(explictlyModelSet ? { overridePromptModel: true } : {}),
561563
input: turnInput,
562564
previousResponseId,
563565
conversationId,
@@ -957,6 +959,8 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
957959
result.state._context,
958960
),
959961
prompt: await currentAgent.getPrompt(result.state._context),
962+
// Streaming requests should also honor explicitly chosen models.
963+
...(explictlyModelSet ? { overridePromptModel: true } : {}),
960964
input: turnInput,
961965
previousResponseId,
962966
conversationId,

packages/agents-core/test/run.test.ts

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,39 @@ describe('Runner.run', () => {
157157
await expect(run(agent, 'fail')).rejects.toThrow('No response found');
158158
});
159159

160+
it('sets overridePromptModel when agent supplies a prompt and explicit model', async () => {
161+
class CapturingModel implements Model {
162+
lastRequest?: ModelRequest;
163+
async getResponse(request: ModelRequest): Promise<ModelResponse> {
164+
this.lastRequest = request;
165+
return {
166+
output: [fakeModelMessage('override')],
167+
usage: new Usage(),
168+
};
169+
}
170+
async *getStreamedResponse(
171+
_request: ModelRequest,
172+
): AsyncIterable<protocol.StreamEvent> {
173+
yield* [];
174+
throw new Error('Not implemented');
175+
}
176+
}
177+
178+
const capturingModel = new CapturingModel();
179+
180+
const agent = new Agent({
181+
name: 'Prompted',
182+
instructions: 'Use the prompt.',
183+
model: capturingModel,
184+
prompt: { promptId: 'prompt_123' },
185+
});
186+
187+
await run(agent, 'hello');
188+
189+
expect(capturingModel.lastRequest?.prompt).toBeDefined();
190+
expect(capturingModel.lastRequest?.overridePromptModel).toBe(true);
191+
});
192+
160193
it('emits agent_end lifecycle event for non-streaming agents', async () => {
161194
const agent = new Agent({
162195
name: 'TestAgent',

packages/agents-openai/src/openaiResponsesModel.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -877,8 +877,13 @@ export class OpenAIResponsesModel implements Model {
877877
parallelToolCalls = request.modelSettings.parallelToolCalls;
878878
}
879879

880+
// When a prompt template already declares a model, skip sending the agent's default model.
881+
// If the caller explicitly requests an override, include the resolved model name in the request.
882+
const shouldSendModel =
883+
!request.prompt || request.overridePromptModel === true;
884+
880885
const requestData = {
881-
...(!request.prompt ? { model: this.#model } : {}),
886+
...(shouldSendModel ? { model: this.#model } : {}),
882887
instructions: normalizeInstructions(request.systemInstructions),
883888
input,
884889
include,

packages/agents-openai/test/openaiResponsesModel.test.ts

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,37 @@ describe('OpenAIResponsesModel', () => {
104104
});
105105
});
106106

107+
it('includes model when overridePromptModel is true', async () => {
108+
await withTrace('test', async () => {
109+
const fakeResponse = { id: 'res-prompt-override', usage: {}, output: [] };
110+
const createMock = vi.fn().mockResolvedValue(fakeResponse);
111+
const fakeClient = {
112+
responses: { create: createMock },
113+
} as unknown as OpenAI;
114+
const model = new OpenAIResponsesModel(fakeClient, 'gpt-override');
115+
116+
const request = {
117+
systemInstructions: undefined,
118+
prompt: { promptId: 'pmpt_456' },
119+
input: 'hello',
120+
modelSettings: {},
121+
tools: [],
122+
outputType: 'text',
123+
handoffs: [],
124+
tracing: false,
125+
signal: undefined,
126+
overridePromptModel: true,
127+
};
128+
129+
await model.getResponse(request as any);
130+
131+
expect(createMock).toHaveBeenCalledTimes(1);
132+
const [args] = createMock.mock.calls[0];
133+
expect(args.model).toBe('gpt-override');
134+
expect(args.prompt).toMatchObject({ id: 'pmpt_456' });
135+
});
136+
});
137+
107138
it('normalizes systemInstructions so empty strings are omitted', async () => {
108139
await withTrace('test', async () => {
109140
const fakeResponse = {

0 commit comments

Comments
 (0)