Skip to content

Commit ac534dc

Browse files
feat(api): gpt 5.1
1 parent 139b376 commit ac534dc

File tree

60 files changed

+1486
-239
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+1486
-239
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 136
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-eeba8addf3a5f412e5ce8d22031e60c61650cee3f5d9e587a2533f6818a249ea.yml
3-
openapi_spec_hash: 0a4d8ad2469823ce24a3fd94f23f1c2b
4-
config_hash: 630eea84bb3067d25640419af058ed56
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ca24bc4d8125b5153514ce643c4e3220f25971b7d67ca384d56d493c72c0d977.yml
3+
openapi_spec_hash: c6f048c7b3d29f4de48fde0e845ba33f
4+
config_hash: b876221dfb213df9f0a999e75d38a65e

api.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -732,12 +732,16 @@ Types:
732732

733733
```python
734734
from openai.types.responses import (
735+
ApplyPatchTool,
735736
ComputerTool,
736737
CustomTool,
737738
EasyInputMessage,
738739
FileSearchTool,
740+
FunctionShellTool,
739741
FunctionTool,
740742
Response,
743+
ResponseApplyPatchToolCall,
744+
ResponseApplyPatchToolCallOutput,
741745
ResponseAudioDeltaEvent,
742746
ResponseAudioDoneEvent,
743747
ResponseAudioTranscriptDeltaEvent,
@@ -774,6 +778,9 @@ from openai.types.responses import (
774778
ResponseFunctionCallArgumentsDoneEvent,
775779
ResponseFunctionCallOutputItem,
776780
ResponseFunctionCallOutputItemList,
781+
ResponseFunctionShellCallOutputContent,
782+
ResponseFunctionShellToolCall,
783+
ResponseFunctionShellToolCallOutput,
777784
ResponseFunctionToolCall,
778785
ResponseFunctionToolCallItem,
779786
ResponseFunctionToolCallOutputItem,
@@ -836,10 +843,12 @@ from openai.types.responses import (
836843
ResponseWebSearchCallSearchingEvent,
837844
Tool,
838845
ToolChoiceAllowed,
846+
ToolChoiceApplyPatch,
839847
ToolChoiceCustom,
840848
ToolChoiceFunction,
841849
ToolChoiceMcp,
842850
ToolChoiceOptions,
851+
ToolChoiceShell,
843852
ToolChoiceTypes,
844853
WebSearchPreviewTool,
845854
WebSearchTool,

src/openai/lib/_parsing/_responses.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,10 @@ def parse_response(
108108
or output.type == "image_generation_call"
109109
or output.type == "code_interpreter_call"
110110
or output.type == "local_shell_call"
111+
or output.type == "shell_call"
112+
or output.type == "shell_call_output"
113+
or output.type == "apply_patch_call"
114+
or output.type == "apply_patch_call_output"
111115
or output.type == "mcp_list_tools"
112116
or output.type == "exec"
113117
or output.type == "custom_tool_call"

src/openai/resources/batches.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,9 @@ def create(
4646
self,
4747
*,
4848
completion_window: Literal["24h"],
49-
endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
49+
endpoint: Literal[
50+
"/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"
51+
],
5052
input_file_id: str,
5153
metadata: Optional[Metadata] | Omit = omit,
5254
output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit,
@@ -65,9 +67,10 @@ def create(
6567
is supported.
6668
6769
endpoint: The endpoint to be used for all requests in the batch. Currently
68-
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
69-
are supported. Note that `/v1/embeddings` batches are also restricted to a
70-
maximum of 50,000 embedding inputs across all requests in the batch.
70+
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
71+
and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
72+
restricted to a maximum of 50,000 embedding inputs across all requests in the
73+
batch.
7174
7275
input_file_id: The ID of an uploaded file that contains requests for the new batch.
7376
@@ -261,7 +264,9 @@ async def create(
261264
self,
262265
*,
263266
completion_window: Literal["24h"],
264-
endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
267+
endpoint: Literal[
268+
"/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"
269+
],
265270
input_file_id: str,
266271
metadata: Optional[Metadata] | Omit = omit,
267272
output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit,
@@ -280,9 +285,10 @@ async def create(
280285
is supported.
281286
282287
endpoint: The endpoint to be used for all requests in the batch. Currently
283-
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
284-
are supported. Note that `/v1/embeddings` batches are also restricted to a
285-
maximum of 50,000 embedding inputs across all requests in the batch.
288+
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
289+
and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
290+
restricted to a maximum of 50,000 embedding inputs across all requests in the
291+
batch.
286292
287293
input_file_id: The ID of an uploaded file that contains requests for the new batch.
288294

src/openai/resources/beta/assistants.py

Lines changed: 37 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -98,12 +98,16 @@ def create(
9898
9999
reasoning_effort: Constrains effort on reasoning for
100100
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
101-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
102-
effort can result in faster responses and fewer tokens used on reasoning in a
103-
response.
101+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
102+
reasoning effort can result in faster responses and fewer tokens used on
103+
reasoning in a response.
104104
105-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
106-
effort.
105+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
106+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
107+
calls are supported for all reasoning values in gpt-5.1.
108+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
109+
support `none`.
110+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
107111
108112
response_format: Specifies the format that the model must output. Compatible with
109113
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -308,12 +312,16 @@ def update(
308312
309313
reasoning_effort: Constrains effort on reasoning for
310314
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
311-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
312-
effort can result in faster responses and fewer tokens used on reasoning in a
313-
response.
315+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
316+
reasoning effort can result in faster responses and fewer tokens used on
317+
reasoning in a response.
314318
315-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
316-
effort.
319+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
320+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
321+
calls are supported for all reasoning values in gpt-5.1.
322+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
323+
support `none`.
324+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
317325
318326
response_format: Specifies the format that the model must output. Compatible with
319327
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -557,12 +565,16 @@ async def create(
557565
558566
reasoning_effort: Constrains effort on reasoning for
559567
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
560-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
561-
effort can result in faster responses and fewer tokens used on reasoning in a
562-
response.
568+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
569+
reasoning effort can result in faster responses and fewer tokens used on
570+
reasoning in a response.
563571
564-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
565-
effort.
572+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
573+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
574+
calls are supported for all reasoning values in gpt-5.1.
575+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
576+
support `none`.
577+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
566578
567579
response_format: Specifies the format that the model must output. Compatible with
568580
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -767,12 +779,16 @@ async def update(
767779
768780
reasoning_effort: Constrains effort on reasoning for
769781
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
770-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
771-
effort can result in faster responses and fewer tokens used on reasoning in a
772-
response.
773-
774-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
775-
effort.
782+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
783+
reasoning effort can result in faster responses and fewer tokens used on
784+
reasoning in a response.
785+
786+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
787+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
788+
calls are supported for all reasoning values in gpt-5.1.
789+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
790+
support `none`.
791+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
776792
777793
response_format: Specifies the format that the model must output. Compatible with
778794
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),

src/openai/resources/beta/threads/runs/runs.py

Lines changed: 55 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -169,12 +169,16 @@ def create(
169169
170170
reasoning_effort: Constrains effort on reasoning for
171171
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
172-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
173-
effort can result in faster responses and fewer tokens used on reasoning in a
174-
response.
172+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
173+
reasoning effort can result in faster responses and fewer tokens used on
174+
reasoning in a response.
175175
176-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
177-
effort.
176+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
177+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
178+
calls are supported for all reasoning values in gpt-5.1.
179+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
180+
support `none`.
181+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
178182
179183
response_format: Specifies the format that the model must output. Compatible with
180184
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -326,12 +330,16 @@ def create(
326330
327331
reasoning_effort: Constrains effort on reasoning for
328332
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
329-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
330-
effort can result in faster responses and fewer tokens used on reasoning in a
331-
response.
333+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
334+
reasoning effort can result in faster responses and fewer tokens used on
335+
reasoning in a response.
332336
333-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
334-
effort.
337+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
338+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
339+
calls are supported for all reasoning values in gpt-5.1.
340+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
341+
support `none`.
342+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
335343
336344
response_format: Specifies the format that the model must output. Compatible with
337345
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -479,12 +487,16 @@ def create(
479487
480488
reasoning_effort: Constrains effort on reasoning for
481489
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
482-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
483-
effort can result in faster responses and fewer tokens used on reasoning in a
484-
response.
490+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
491+
reasoning effort can result in faster responses and fewer tokens used on
492+
reasoning in a response.
485493
486-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
487-
effort.
494+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
495+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
496+
calls are supported for all reasoning values in gpt-5.1.
497+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
498+
support `none`.
499+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
488500
489501
response_format: Specifies the format that the model must output. Compatible with
490502
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -1608,12 +1620,16 @@ async def create(
16081620
16091621
reasoning_effort: Constrains effort on reasoning for
16101622
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1611-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1612-
effort can result in faster responses and fewer tokens used on reasoning in a
1613-
response.
1623+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1624+
reasoning effort can result in faster responses and fewer tokens used on
1625+
reasoning in a response.
16141626
1615-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1616-
effort.
1627+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1628+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1629+
calls are supported for all reasoning values in gpt-5.1.
1630+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1631+
support `none`.
1632+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
16171633
16181634
response_format: Specifies the format that the model must output. Compatible with
16191635
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -1765,12 +1781,16 @@ async def create(
17651781
17661782
reasoning_effort: Constrains effort on reasoning for
17671783
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1768-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1769-
effort can result in faster responses and fewer tokens used on reasoning in a
1770-
response.
1784+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1785+
reasoning effort can result in faster responses and fewer tokens used on
1786+
reasoning in a response.
17711787
1772-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1773-
effort.
1788+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1789+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1790+
calls are supported for all reasoning values in gpt-5.1.
1791+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1792+
support `none`.
1793+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
17741794
17751795
response_format: Specifies the format that the model must output. Compatible with
17761796
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -1918,12 +1938,16 @@ async def create(
19181938
19191939
reasoning_effort: Constrains effort on reasoning for
19201940
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1921-
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1922-
effort can result in faster responses and fewer tokens used on reasoning in a
1923-
response.
1924-
1925-
Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1926-
effort.
1941+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1942+
reasoning effort can result in faster responses and fewer tokens used on
1943+
reasoning in a response.
1944+
1945+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1946+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1947+
calls are supported for all reasoning values in gpt-5.1.
1948+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1949+
support `none`.
1950+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
19271951
19281952
response_format: Specifies the format that the model must output. Compatible with
19291953
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),

0 commit comments

Comments
 (0)