Skip to content

Commit 12f55c6

Browse files
committed
fix(openai_responses): OpenAIResponsesObject is not complete
1 parent 27d6bec commit 12f55c6

File tree

2 files changed

+179
-13
lines changed

2 files changed

+179
-13
lines changed

llama_stack/apis/agents/openai_responses.py

Lines changed: 98 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4,17 +4,22 @@
44
# This source code is licensed under the terms described in the LICENSE file in
55
# the root directory of this source tree.
66

7-
from typing import Annotated, Any, Literal
7+
from typing import Annotated, Any, Literal, Optional, TypeAlias, Union
88

99
from pydantic import BaseModel, Field
1010
from typing_extensions import TypedDict
1111

12+
from llama_stack.apis.tools.openai_tool_choice import (
13+
ToolChoiceAllowed,
14+
ToolChoiceCustom,
15+
ToolChoiceFunction,
16+
ToolChoiceMcp,
17+
ToolChoiceTypes
18+
)
1219
from llama_stack.apis.vector_io import SearchRankingOptions as FileSearchRankingOptions
1320
from llama_stack.schema_utils import json_schema_type, register_schema
1421

15-
# NOTE(ashwin): this file is literally a copy of the OpenAI responses API schema. We should probably
16-
# take their YAML and generate this file automatically. Their YAML is available.
17-
22+
OpenAIResponsesToolChoice: TypeAlias = Union[ToolChoiceTypes, ToolChoiceAllowed, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceCustom]
1823

1924
@json_schema_type
2025
class OpenAIResponseError(BaseModel):
@@ -316,21 +321,90 @@ class OpenAIResponseText(BaseModel):
316321
:param format: (Optional) Text format configuration specifying output format requirements
317322
"""
318323

319-
format: OpenAIResponseTextFormat | None = None
324+
# Default to text format to avoid breaking the loading of old responses
325+
# before the field was added. New responses will have this set always.
326+
format: OpenAIResponseTextFormat | None = Field(default_factory=lambda: OpenAIResponseTextFormat(type="text"))
327+
328+
329+
@json_schema_type
330+
class OpenAIResponseIncompleteDetails(BaseModel):
331+
"""Incomplete details for OpenAI responses.
332+
333+
:param reason: Reason for the response being incomplete
334+
"""
335+
336+
reason: str
337+
338+
339+
@json_schema_type
340+
class OpenAIResponsePrompt(BaseModel):
341+
"""Reference to a prompt template and its variables.
342+
343+
:param id: The unique identifier of the prompt template to use.
344+
:param variables: (Optional) Map of values to substitute in for variables in your prompt. The substitution values can either be strings, or other Response input types like images or files.
345+
:param version: (Optional) Version of the prompt template.
346+
"""
347+
348+
id: str
349+
variables: Optional[dict[str, Any]] = None
350+
version: Optional[str] = None
351+
352+
353+
@json_schema_type
354+
class OpenAIResponseReasoning(BaseModel):
355+
"""Configuration options for reasoning models.
356+
357+
:param effort: (Optional) The effort level to use for reasoning.
358+
:param generate_summary: Deprecated. Use the generate_summary_text field instead. (Optional) Whether to generate a summary of the reasoning process.
359+
"""
360+
361+
effort: Optional[Literal["low", "medium", "high", "minimal"]] = None
362+
generate_summary: Optional[str] = None
363+
summary: Optional[str] = None
364+
365+
366+
@json_schema_type
367+
class OpenAIResponsesTool(BaseModel):
368+
description: Optional[str] = None
369+
"""
370+
The description of the function, including guidance on when and how to call it,
371+
and guidance about what to tell the user when calling (if anything).
372+
"""
373+
374+
name: Optional[str] = None
375+
"""The name of the function."""
376+
377+
parameters: Optional[object] = None
378+
"""Parameters of the function in JSON Schema."""
379+
380+
type: Optional[Literal["function"]] = None
381+
"""The type of the tool, i.e. `function`."""
320382

321383

322384
@json_schema_type
323385
class OpenAIResponseObject(BaseModel):
324386
"""Complete OpenAI response object containing generation results and metadata.
325387
388+
Based on OpenAI Responses API schema: https://github.com/openai/openai-python/blob/34014aedbb8946c03e97e5c8d72e03ad2259cd7c/src/openai/types/responses/response.py#L38
389+
326390
:param created_at: Unix timestamp when the response was created
327391
:param error: (Optional) Error details if the response generation failed
328392
:param id: Unique identifier for this response
393+
:param incomplete_details: (Optional) Incomplete details if the response is incomplete
394+
:param instructions: (Optional) A system (or developer) message inserted into the model's context.
395+
:param max_output_tokens: (Optional) An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens.
396+
:param max_tool_calls: (Optional) The maximum number of total calls to built-in tools that can be processed in a response.
397+
:param metadata: (Optional) Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.
329398
:param model: Model identifier used for generation
330399
:param object: Object type identifier, always "response"
331400
:param output: List of generated output items (messages, tool calls, etc.)
332401
:param parallel_tool_calls: Whether tool calls can be executed in parallel
333402
:param previous_response_id: (Optional) ID of the previous response in a conversation
403+
:param prompt: (Optional) Reference to a prompt template and its variables.
404+
:param prompt_cache_key: (Optional)Used to cache responses for similar requests to optimize your cache hit rates. Replaces the user field.
405+
:param reasoning: (Optional) Configuration options for reasoning models.
406+
:param safety_identifier: (Optional) A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.
407+
:param service_tier: (Optional) Specifies the processing type used for serving the request.
334408
:param status: Current status of the response generation
335409
:param temperature: (Optional) Sampling temperature used for generation
336410
:param text: Text formatting configuration for the response
@@ -340,21 +414,32 @@ class OpenAIResponseObject(BaseModel):
340414
"""
341415

342416
created_at: int
343-
error: OpenAIResponseError | None = None
417+
error: Optional[OpenAIResponseError] = None
344418
id: str
419+
incomplete_details: Optional[OpenAIResponseIncompleteDetails] = None # TODO: unimplemented
420+
instructions: Optional[str | list[str]] = None # TODO: unimplemented
421+
max_output_tokens: Optional[int] = None # TODO: unimplemented
422+
max_tool_calls: Optional[int] = None # TODO: unimplemented
423+
metadata: Optional[dict[str, str]] = None # TODO: unimplemented
345424
model: str
346425
object: Literal["response"] = "response"
347426
output: list[OpenAIResponseOutput]
348427
parallel_tool_calls: bool = False
349-
previous_response_id: str | None = None
428+
previous_response_id: Optional[str] = None
429+
prompt: Optional[OpenAIResponsePrompt] = None
430+
prompt_cache_key: Optional[str] = None
431+
reasoning: Optional[OpenAIResponseReasoning] = None
432+
safety_identifier: Optional[str] = None
433+
service_tier: Optional[str] = None # TODO: unimplemented
350434
status: str
351435
temperature: float | None = None
352-
# Default to text format to avoid breaking the loading of old responses
353-
# before the field was added. New responses will have this set always.
354-
text: OpenAIResponseText = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text"))
355-
top_p: float | None = None
356-
truncation: str | None = None
357-
user: str | None = None
436+
text: Optional[OpenAIResponseText] = None
437+
tool_choice: Optional[OpenAIResponsesToolChoice] = None # TODO: unimplemented
438+
tools: Optional[list[OpenAIResponsesTool]] = None # TODO: unimplemented
439+
top_logprobs: Optional[int] = None # TODO: unimplemented
440+
top_p: Optional[float] = None
441+
user: Optional[str] = None # Deprecated: This field is being replaced by safety_identifier and prompt_cache_key
442+
truncation: Optional[str] = None
358443

359444

360445
@json_schema_type
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
from typing import Dict, List, Literal, Optional, TypeAlias
2+
3+
from pydantic import BaseModel
4+
5+
ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"]
6+
7+
8+
class ToolChoiceTypes(BaseModel):
9+
type: Literal[
10+
"file_search",
11+
"web_search_preview",
12+
"computer_use_preview",
13+
"web_search_preview_2025_03_11",
14+
"image_generation",
15+
"code_interpreter",
16+
]
17+
"""The type of hosted tool the model should to use.
18+
19+
Allowed values are:
20+
21+
- `file_search`
22+
- `web_search_preview`
23+
- `computer_use_preview`
24+
- `code_interpreter`
25+
- `image_generation`
26+
"""
27+
28+
29+
class ToolChoiceAllowed(BaseModel):
30+
mode: Literal["auto", "required"]
31+
"""Constrains the tools available to the model to a pre-defined set.
32+
33+
`auto` allows the model to pick from among the allowed tools and generate a
34+
message.
35+
36+
`required` requires the model to call one or more of the allowed tools.
37+
"""
38+
39+
tools: List[Dict[str, object]]
40+
"""A list of tool definitions that the model should be allowed to call.
41+
42+
For the Responses API, the list of tool definitions might look like:
43+
44+
```json
45+
[
46+
{ "type": "function", "name": "get_weather" },
47+
{ "type": "mcp", "server_label": "deepwiki" },
48+
{ "type": "image_generation" }
49+
]
50+
```
51+
"""
52+
53+
type: Literal["allowed_tools"]
54+
"""Allowed tool configuration type. Always `allowed_tools`."""
55+
56+
57+
class ToolChoiceFunction(BaseModel):
58+
name: str
59+
"""The name of the function to call."""
60+
61+
type: Literal["function"]
62+
"""For function calling, the type is always `function`."""
63+
64+
65+
class ToolChoiceMcp(BaseModel):
66+
server_label: str
67+
"""The label of the MCP server to use."""
68+
69+
type: Literal["mcp"]
70+
"""For MCP tools, the type is always `mcp`."""
71+
72+
name: Optional[str] = None
73+
"""The name of the tool to call on the server."""
74+
75+
76+
class ToolChoiceCustom(BaseModel):
77+
name: str
78+
"""The name of the custom tool to call."""
79+
80+
type: Literal["custom"]
81+
"""For custom tool calling, the type is always `custom`."""

0 commit comments

Comments
 (0)