Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit f570d91

Browse files
stainless-app[bot]meorphis
authored and
meorphis
committedMar 18, 2025
chore(internal): codegen related update (#2222)
1 parent c71d4c9 commit f570d91

12 files changed

+55
-37
lines changed
 

‎.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 81
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml

‎src/openai/resources/batches.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def create(
4949
self,
5050
*,
5151
completion_window: Literal["24h"],
52-
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
52+
endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
5353
input_file_id: str,
5454
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
5555
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -67,9 +67,9 @@ def create(
6767
is supported.
6868
6969
endpoint: The endpoint to be used for all requests in the batch. Currently
70-
`/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
71-
Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
72-
embedding inputs across all requests in the batch.
70+
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
71+
are supported. Note that `/v1/embeddings` batches are also restricted to a
72+
maximum of 50,000 embedding inputs across all requests in the batch.
7373
7474
input_file_id: The ID of an uploaded file that contains requests for the new batch.
7575
@@ -259,7 +259,7 @@ async def create(
259259
self,
260260
*,
261261
completion_window: Literal["24h"],
262-
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
262+
endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
263263
input_file_id: str,
264264
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
265265
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -277,9 +277,9 @@ async def create(
277277
is supported.
278278
279279
endpoint: The endpoint to be used for all requests in the batch. Currently
280-
`/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
281-
Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
282-
embedding inputs across all requests in the batch.
280+
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
281+
are supported. Note that `/v1/embeddings` batches are also restricted to a
282+
maximum of 50,000 embedding inputs across all requests in the batch.
283283
284284
input_file_id: The ID of an uploaded file that contains requests for the new batch.
285285

‎src/openai/types/batch_create_params.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,13 @@ class BatchCreateParams(TypedDict, total=False):
1717
Currently only `24h` is supported.
1818
"""
1919

20-
endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
20+
endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
2121
"""The endpoint to be used for all requests in the batch.
2222
23-
Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are
24-
supported. Note that `/v1/embeddings` batches are also restricted to a maximum
25-
of 50,000 embedding inputs across all requests in the batch.
23+
Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and
24+
`/v1/completions` are supported. Note that `/v1/embeddings` batches are also
25+
restricted to a maximum of 50,000 embedding inputs across all requests in the
26+
batch.
2627
"""
2728

2829
input_file_id: Required[str]

‎src/openai/types/chat/chat_completion_chunk.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,9 @@ class ChatCompletionChunk(BaseModel):
142142
"""
143143
An optional field that will only be present when you set
144144
`stream_options: {"include_usage": true}` in your request. When present, it
145-
contains a null value except for the last chunk which contains the token usage
146-
statistics for the entire request.
145+
contains a null value **except for the last chunk** which contains the token
146+
usage statistics for the entire request.
147+
148+
**NOTE:** If the stream is interrupted or cancelled, you may not receive the
149+
final usage chunk which contains the total token usage for the request.
147150
"""

‎src/openai/types/chat/chat_completion_content_part_param.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class FileFile(TypedDict, total=False):
2222
file_id: str
2323
"""The ID of an uploaded file to use as input."""
2424

25-
file_name: str
25+
filename: str
2626
"""The name of the file, used when passing the file to the model as a string."""
2727

2828

‎src/openai/types/chat/chat_completion_stream_options_param.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False):
1212
"""If set, an additional chunk will be streamed before the `data: [DONE]` message.
1313
1414
The `usage` field on this chunk shows the token usage statistics for the entire
15-
request, and the `choices` field will always be an empty array. All other chunks
16-
will also include a `usage` field, but with a null value.
15+
request, and the `choices` field will always be an empty array.
16+
17+
All other chunks will also include a `usage` field, but with a null value.
18+
**NOTE:** If the stream is interrupted, you may not receive the final usage
19+
chunk which contains the total token usage for the request.
1720
"""

‎src/openai/types/responses/response_function_tool_call.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,6 @@
99

1010

1111
class ResponseFunctionToolCall(BaseModel):
12-
id: str
13-
"""The unique ID of the function tool call."""
14-
1512
arguments: str
1613
"""A JSON string of the arguments to pass to the function."""
1714

@@ -24,6 +21,9 @@ class ResponseFunctionToolCall(BaseModel):
2421
type: Literal["function_call"]
2522
"""The type of the function tool call. Always `function_call`."""
2623

24+
id: Optional[str] = None
25+
"""The unique ID of the function tool call."""
26+
2727
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
2828
"""The status of the item.
2929

‎src/openai/types/responses/response_function_tool_call_param.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,6 @@
88

99

1010
class ResponseFunctionToolCallParam(TypedDict, total=False):
11-
id: Required[str]
12-
"""The unique ID of the function tool call."""
13-
1411
arguments: Required[str]
1512
"""A JSON string of the arguments to pass to the function."""
1613

@@ -23,6 +20,9 @@ class ResponseFunctionToolCallParam(TypedDict, total=False):
2320
type: Required[Literal["function_call"]]
2421
"""The type of the function tool call. Always `function_call`."""
2522

23+
id: str
24+
"""The unique ID of the function tool call."""
25+
2626
status: Literal["in_progress", "completed", "incomplete"]
2727
"""The status of the item.
2828

‎src/openai/types/responses/response_usage.py

+12-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,15 @@
33

44
from ..._models import BaseModel
55

6-
__all__ = ["ResponseUsage", "OutputTokensDetails"]
6+
__all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"]
7+
8+
9+
class InputTokensDetails(BaseModel):
10+
cached_tokens: int
11+
"""The number of tokens that were retrieved from the cache.
12+
13+
[More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
14+
"""
715

816

917
class OutputTokensDetails(BaseModel):
@@ -15,6 +23,9 @@ class ResponseUsage(BaseModel):
1523
input_tokens: int
1624
"""The number of input tokens."""
1725

26+
input_tokens_details: InputTokensDetails
27+
"""A detailed breakdown of the input tokens."""
28+
1829
output_tokens: int
1930
"""The number of output tokens."""
2031

‎src/openai/types/shared/reasoning.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ class Reasoning(BaseModel):
2020
"""
2121

2222
generate_summary: Optional[Literal["concise", "detailed"]] = None
23-
"""**o-series models only**
23+
"""**computer_use_preview only**
2424
2525
A summary of the reasoning performed by the model. This can be useful for
2626
debugging and understanding the model's reasoning process. One of `concise` or

‎src/openai/types/shared_params/reasoning.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,15 @@
33
from __future__ import annotations
44

55
from typing import Optional
6-
from typing_extensions import Literal, Required, TypedDict
6+
from typing_extensions import Literal, TypedDict
77

88
from ..shared.reasoning_effort import ReasoningEffort
99

1010
__all__ = ["Reasoning"]
1111

1212

1313
class Reasoning(TypedDict, total=False):
14-
effort: Required[Optional[ReasoningEffort]]
14+
effort: Optional[ReasoningEffort]
1515
"""**o-series models only**
1616
1717
Constrains effort on reasoning for
@@ -21,7 +21,7 @@ class Reasoning(TypedDict, total=False):
2121
"""
2222

2323
generate_summary: Optional[Literal["concise", "detailed"]]
24-
"""**o-series models only**
24+
"""**computer_use_preview only**
2525
2626
A summary of the reasoning performed by the model. This can be useful for
2727
debugging and understanding the model's reasoning process. One of `concise` or

‎tests/api_resources/test_batches.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class TestBatches:
2222
def test_method_create(self, client: OpenAI) -> None:
2323
batch = client.batches.create(
2424
completion_window="24h",
25-
endpoint="/v1/chat/completions",
25+
endpoint="/v1/responses",
2626
input_file_id="string",
2727
)
2828
assert_matches_type(Batch, batch, path=["response"])
@@ -31,7 +31,7 @@ def test_method_create(self, client: OpenAI) -> None:
3131
def test_method_create_with_all_params(self, client: OpenAI) -> None:
3232
batch = client.batches.create(
3333
completion_window="24h",
34-
endpoint="/v1/chat/completions",
34+
endpoint="/v1/responses",
3535
input_file_id="string",
3636
metadata={"foo": "string"},
3737
)
@@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
4141
def test_raw_response_create(self, client: OpenAI) -> None:
4242
response = client.batches.with_raw_response.create(
4343
completion_window="24h",
44-
endpoint="/v1/chat/completions",
44+
endpoint="/v1/responses",
4545
input_file_id="string",
4646
)
4747

@@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI) -> None:
5454
def test_streaming_response_create(self, client: OpenAI) -> None:
5555
with client.batches.with_streaming_response.create(
5656
completion_window="24h",
57-
endpoint="/v1/chat/completions",
57+
endpoint="/v1/responses",
5858
input_file_id="string",
5959
) as response:
6060
assert not response.is_closed
@@ -182,7 +182,7 @@ class TestAsyncBatches:
182182
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
183183
batch = await async_client.batches.create(
184184
completion_window="24h",
185-
endpoint="/v1/chat/completions",
185+
endpoint="/v1/responses",
186186
input_file_id="string",
187187
)
188188
assert_matches_type(Batch, batch, path=["response"])
@@ -191,7 +191,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
191191
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
192192
batch = await async_client.batches.create(
193193
completion_window="24h",
194-
endpoint="/v1/chat/completions",
194+
endpoint="/v1/responses",
195195
input_file_id="string",
196196
metadata={"foo": "string"},
197197
)
@@ -201,7 +201,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
201201
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
202202
response = await async_client.batches.with_raw_response.create(
203203
completion_window="24h",
204-
endpoint="/v1/chat/completions",
204+
endpoint="/v1/responses",
205205
input_file_id="string",
206206
)
207207

@@ -214,7 +214,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
214214
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
215215
async with async_client.batches.with_streaming_response.create(
216216
completion_window="24h",
217-
endpoint="/v1/chat/completions",
217+
endpoint="/v1/responses",
218218
input_file_id="string",
219219
) as response:
220220
assert not response.is_closed

0 commit comments

Comments
 (0)
Please sign in to comment.