Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 2357a8f

Browse files
committedFeb 13, 2025
feat(api): add support for storing chat completions (#2117)
1 parent 3f8d820 commit 2357a8f

22 files changed

+1350
-85
lines changed
 

‎.stats.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
configured_endpoints: 69
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-dfb00c627f58e5180af7a9b29ed2f2aa0764a3b9daa6a32a1cc45bc8e48dfe15.yml
1+
configured_endpoints: 74
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4aa6ee65ba9efc789e05e6a5ef0883b2cadf06def8efd863dbf75e9e233067e1.yml

‎api.md

+13-1
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ from openai.types.chat import (
4848
ChatCompletionContentPartInputAudio,
4949
ChatCompletionContentPartRefusal,
5050
ChatCompletionContentPartText,
51+
ChatCompletionDeleted,
5152
ChatCompletionDeveloperMessageParam,
5253
ChatCompletionFunctionCallOption,
5354
ChatCompletionFunctionMessageParam,
@@ -59,6 +60,7 @@ from openai.types.chat import (
5960
ChatCompletionPredictionContent,
6061
ChatCompletionReasoningEffort,
6162
ChatCompletionRole,
63+
ChatCompletionStoreMessage,
6264
ChatCompletionStreamOptions,
6365
ChatCompletionSystemMessageParam,
6466
ChatCompletionTokenLogprob,
@@ -71,7 +73,17 @@ from openai.types.chat import (
7173

7274
Methods:
7375

74-
- <code title="post /chat/completions">client.chat.completions.<a href="./src/openai/resources/chat/completions.py">create</a>(\*\*<a href="src/openai/types/chat/completion_create_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion.py">ChatCompletion</a></code>
76+
- <code title="post /chat/completions">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">create</a>(\*\*<a href="src/openai/types/chat/completion_create_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion.py">ChatCompletion</a></code>
77+
- <code title="get /chat/completions/{completion_id}">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">retrieve</a>(completion_id) -> <a href="./src/openai/types/chat/chat_completion.py">ChatCompletion</a></code>
78+
- <code title="post /chat/completions/{completion_id}">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">update</a>(completion_id, \*\*<a href="src/openai/types/chat/completion_update_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion.py">ChatCompletion</a></code>
79+
- <code title="get /chat/completions">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">list</a>(\*\*<a href="src/openai/types/chat/completion_list_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion.py">SyncCursorPage[ChatCompletion]</a></code>
80+
- <code title="delete /chat/completions/{completion_id}">client.chat.completions.<a href="./src/openai/resources/chat/completions/completions.py">delete</a>(completion_id) -> <a href="./src/openai/types/chat/chat_completion_deleted.py">ChatCompletionDeleted</a></code>
81+
82+
### Messages
83+
84+
Methods:
85+
86+
- <code title="get /chat/completions/{completion_id}/messages">client.chat.completions.messages.<a href="./src/openai/resources/chat/completions/messages.py">list</a>(completion_id, \*\*<a href="src/openai/types/chat/completions/message_list_params.py">params</a>) -> <a href="./src/openai/types/chat/chat_completion_store_message.py">SyncCursorPage[ChatCompletionStoreMessage]</a></code>
7587

7688
# Embeddings
7789

‎src/openai/_utils/_sync.py

+18-2
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,20 @@
77
from typing import Any, TypeVar, Callable, Awaitable
88
from typing_extensions import ParamSpec
99

10+
import anyio
11+
import sniffio
12+
import anyio.to_thread
13+
1014
T_Retval = TypeVar("T_Retval")
1115
T_ParamSpec = ParamSpec("T_ParamSpec")
1216

1317

1418
if sys.version_info >= (3, 9):
15-
to_thread = asyncio.to_thread
19+
_asyncio_to_thread = asyncio.to_thread
1620
else:
1721
# backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread
1822
# for Python 3.8 support
19-
async def to_thread(
23+
async def _asyncio_to_thread(
2024
func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
2125
) -> Any:
2226
"""Asynchronously run function *func* in a separate thread.
@@ -34,6 +38,17 @@ async def to_thread(
3438
return await loop.run_in_executor(None, func_call)
3539

3640

41+
async def to_thread(
42+
func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
43+
) -> T_Retval:
44+
if sniffio.current_async_library() == "asyncio":
45+
return await _asyncio_to_thread(func, *args, **kwargs)
46+
47+
return await anyio.to_thread.run_sync(
48+
functools.partial(func, *args, **kwargs),
49+
)
50+
51+
3752
# inspired by `asyncer`, https://github.com/tiangolo/asyncer
3853
def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]:
3954
"""
@@ -50,6 +65,7 @@ def blocking_func(arg1, arg2, kwarg1=None):
5065
# blocking code
5166
return result
5267
68+
5369
result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1)
5470
```
5571

‎src/openai/cli/_api/chat/completions.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -104,13 +104,13 @@ def create(args: CLIChatCompletionCreateArgs) -> None:
104104
"stream": False,
105105
}
106106
if args.temperature is not None:
107-
params['temperature'] = args.temperature
107+
params["temperature"] = args.temperature
108108
if args.stop is not None:
109-
params['stop'] = args.stop
109+
params["stop"] = args.stop
110110
if args.top_p is not None:
111-
params['top_p'] = args.top_p
111+
params["top_p"] = args.top_p
112112
if args.n is not None:
113-
params['n'] = args.n
113+
params["n"] = args.n
114114
if args.stream:
115115
params["stream"] = args.stream # type: ignore
116116
if args.max_tokens is not None:

‎src/openai/lib/_parsing/_completions.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,13 @@ def validate_input_tools(
4545
for tool in tools:
4646
if tool["type"] != "function":
4747
raise ValueError(
48-
f'Currently only `function` tool types support auto-parsing; Received `{tool["type"]}`',
48+
f"Currently only `function` tool types support auto-parsing; Received `{tool['type']}`",
4949
)
5050

5151
strict = tool["function"].get("strict")
5252
if strict is not True:
5353
raise ValueError(
54-
f'`{tool["function"]["name"]}` is not strict. Only `strict` function tools can be auto-parsed'
54+
f"`{tool['function']['name']}` is not strict. Only `strict` function tools can be auto-parsed"
5555
)
5656

5757

‎src/openai/resources/chat/chat.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
from ..._compat import cached_property
66
from ..._resource import SyncAPIResource, AsyncAPIResource
7-
from .completions import (
7+
from .completions.completions import (
88
Completions,
99
AsyncCompletions,
1010
CompletionsWithRawResponse,
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from .messages import (
4+
Messages,
5+
AsyncMessages,
6+
MessagesWithRawResponse,
7+
AsyncMessagesWithRawResponse,
8+
MessagesWithStreamingResponse,
9+
AsyncMessagesWithStreamingResponse,
10+
)
11+
from .completions import (
12+
Completions,
13+
AsyncCompletions,
14+
CompletionsWithRawResponse,
15+
AsyncCompletionsWithRawResponse,
16+
CompletionsWithStreamingResponse,
17+
AsyncCompletionsWithStreamingResponse,
18+
)
19+
20+
__all__ = [
21+
"Messages",
22+
"AsyncMessages",
23+
"MessagesWithRawResponse",
24+
"AsyncMessagesWithRawResponse",
25+
"MessagesWithStreamingResponse",
26+
"AsyncMessagesWithStreamingResponse",
27+
"Completions",
28+
"AsyncCompletions",
29+
"CompletionsWithRawResponse",
30+
"AsyncCompletionsWithRawResponse",
31+
"CompletionsWithStreamingResponse",
32+
"AsyncCompletionsWithStreamingResponse",
33+
]

‎src/openai/resources/chat/completions.py ‎src/openai/resources/chat/completions/completions.py

+465-21
Large diffs are not rendered by default.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,212 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing_extensions import Literal
6+
7+
import httpx
8+
9+
from .... import _legacy_response
10+
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
11+
from ...._utils import maybe_transform
12+
from ...._compat import cached_property
13+
from ...._resource import SyncAPIResource, AsyncAPIResource
14+
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
15+
from ....pagination import SyncCursorPage, AsyncCursorPage
16+
from ...._base_client import AsyncPaginator, make_request_options
17+
from ....types.chat.completions import message_list_params
18+
from ....types.chat.chat_completion_store_message import ChatCompletionStoreMessage
19+
20+
__all__ = ["Messages", "AsyncMessages"]
21+
22+
23+
class Messages(SyncAPIResource):
24+
@cached_property
25+
def with_raw_response(self) -> MessagesWithRawResponse:
26+
"""
27+
This property can be used as a prefix for any HTTP method call to return
28+
the raw response object instead of the parsed content.
29+
30+
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
31+
"""
32+
return MessagesWithRawResponse(self)
33+
34+
@cached_property
35+
def with_streaming_response(self) -> MessagesWithStreamingResponse:
36+
"""
37+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
38+
39+
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
40+
"""
41+
return MessagesWithStreamingResponse(self)
42+
43+
def list(
44+
self,
45+
completion_id: str,
46+
*,
47+
after: str | NotGiven = NOT_GIVEN,
48+
limit: int | NotGiven = NOT_GIVEN,
49+
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
50+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
51+
# The extra values given here take precedence over values defined on the client or passed to this method.
52+
extra_headers: Headers | None = None,
53+
extra_query: Query | None = None,
54+
extra_body: Body | None = None,
55+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
56+
) -> SyncCursorPage[ChatCompletionStoreMessage]:
57+
"""Get the messages in a stored chat completion.
58+
59+
Only chat completions that have
60+
been created with the `store` parameter set to `true` will be returned.
61+
62+
Args:
63+
after: Identifier for the last message from the previous pagination request.
64+
65+
limit: Number of messages to retrieve.
66+
67+
order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
68+
for descending order. Defaults to `asc`.
69+
70+
extra_headers: Send extra headers
71+
72+
extra_query: Add additional query parameters to the request
73+
74+
extra_body: Add additional JSON properties to the request
75+
76+
timeout: Override the client-level default timeout for this request, in seconds
77+
"""
78+
if not completion_id:
79+
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
80+
return self._get_api_list(
81+
f"/chat/completions/{completion_id}/messages",
82+
page=SyncCursorPage[ChatCompletionStoreMessage],
83+
options=make_request_options(
84+
extra_headers=extra_headers,
85+
extra_query=extra_query,
86+
extra_body=extra_body,
87+
timeout=timeout,
88+
query=maybe_transform(
89+
{
90+
"after": after,
91+
"limit": limit,
92+
"order": order,
93+
},
94+
message_list_params.MessageListParams,
95+
),
96+
),
97+
model=ChatCompletionStoreMessage,
98+
)
99+
100+
101+
class AsyncMessages(AsyncAPIResource):
102+
@cached_property
103+
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
104+
"""
105+
This property can be used as a prefix for any HTTP method call to return
106+
the raw response object instead of the parsed content.
107+
108+
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
109+
"""
110+
return AsyncMessagesWithRawResponse(self)
111+
112+
@cached_property
113+
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
114+
"""
115+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
116+
117+
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
118+
"""
119+
return AsyncMessagesWithStreamingResponse(self)
120+
121+
def list(
122+
self,
123+
completion_id: str,
124+
*,
125+
after: str | NotGiven = NOT_GIVEN,
126+
limit: int | NotGiven = NOT_GIVEN,
127+
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
128+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
129+
# The extra values given here take precedence over values defined on the client or passed to this method.
130+
extra_headers: Headers | None = None,
131+
extra_query: Query | None = None,
132+
extra_body: Body | None = None,
133+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
134+
) -> AsyncPaginator[ChatCompletionStoreMessage, AsyncCursorPage[ChatCompletionStoreMessage]]:
135+
"""Get the messages in a stored chat completion.
136+
137+
Only chat completions that have
138+
been created with the `store` parameter set to `true` will be returned.
139+
140+
Args:
141+
after: Identifier for the last message from the previous pagination request.
142+
143+
limit: Number of messages to retrieve.
144+
145+
order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
146+
for descending order. Defaults to `asc`.
147+
148+
extra_headers: Send extra headers
149+
150+
extra_query: Add additional query parameters to the request
151+
152+
extra_body: Add additional JSON properties to the request
153+
154+
timeout: Override the client-level default timeout for this request, in seconds
155+
"""
156+
if not completion_id:
157+
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
158+
return self._get_api_list(
159+
f"/chat/completions/{completion_id}/messages",
160+
page=AsyncCursorPage[ChatCompletionStoreMessage],
161+
options=make_request_options(
162+
extra_headers=extra_headers,
163+
extra_query=extra_query,
164+
extra_body=extra_body,
165+
timeout=timeout,
166+
query=maybe_transform(
167+
{
168+
"after": after,
169+
"limit": limit,
170+
"order": order,
171+
},
172+
message_list_params.MessageListParams,
173+
),
174+
),
175+
model=ChatCompletionStoreMessage,
176+
)
177+
178+
179+
class MessagesWithRawResponse:
180+
def __init__(self, messages: Messages) -> None:
181+
self._messages = messages
182+
183+
self.list = _legacy_response.to_raw_response_wrapper(
184+
messages.list,
185+
)
186+
187+
188+
class AsyncMessagesWithRawResponse:
189+
def __init__(self, messages: AsyncMessages) -> None:
190+
self._messages = messages
191+
192+
self.list = _legacy_response.async_to_raw_response_wrapper(
193+
messages.list,
194+
)
195+
196+
197+
class MessagesWithStreamingResponse:
198+
def __init__(self, messages: Messages) -> None:
199+
self._messages = messages
200+
201+
self.list = to_streamed_response_wrapper(
202+
messages.list,
203+
)
204+
205+
206+
class AsyncMessagesWithStreamingResponse:
207+
def __init__(self, messages: AsyncMessages) -> None:
208+
self._messages = messages
209+
210+
self.list = async_to_streamed_response_wrapper(
211+
messages.list,
212+
)

‎src/openai/types/chat/__init__.py

+4
Original file line numberDiff line numberDiff line change
@@ -6,21 +6,25 @@
66
from .chat_completion_role import ChatCompletionRole as ChatCompletionRole
77
from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio
88
from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
9+
from .completion_list_params import CompletionListParams as CompletionListParams
910
from .parsed_chat_completion import (
1011
ParsedChoice as ParsedChoice,
1112
ParsedChatCompletion as ParsedChatCompletion,
1213
ParsedChatCompletionMessage as ParsedChatCompletionMessage,
1314
)
15+
from .chat_completion_deleted import ChatCompletionDeleted as ChatCompletionDeleted
1416
from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
1517
from .chat_completion_modality import ChatCompletionModality as ChatCompletionModality
1618
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
19+
from .completion_update_params import CompletionUpdateParams as CompletionUpdateParams
1720
from .parsed_function_tool_call import (
1821
ParsedFunction as ParsedFunction,
1922
ParsedFunctionToolCall as ParsedFunctionToolCall,
2023
)
2124
from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam
2225
from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam
2326
from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
27+
from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage
2428
from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
2529
from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort
2630
from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing_extensions import Literal
4+
5+
from ..._models import BaseModel
6+
7+
__all__ = ["ChatCompletionDeleted"]
8+
9+
10+
class ChatCompletionDeleted(BaseModel):
11+
id: str
12+
"""The ID of the chat completion that was deleted."""
13+
14+
deleted: bool
15+
"""Whether the chat completion was deleted."""
16+
17+
object: Literal["chat.completion.deleted"]
18+
"""The type of object being deleted."""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
4+
from .chat_completion_message import ChatCompletionMessage
5+
6+
__all__ = ["ChatCompletionStoreMessage"]
7+
8+
9+
class ChatCompletionStoreMessage(ChatCompletionMessage):
10+
id: str
11+
"""The identifier of the chat message."""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import Optional
6+
from typing_extensions import Literal, TypedDict
7+
8+
from ..shared_params.metadata import Metadata
9+
10+
__all__ = ["CompletionListParams"]
11+
12+
13+
class CompletionListParams(TypedDict, total=False):
14+
after: str
15+
"""Identifier for the last chat completion from the previous pagination request."""
16+
17+
limit: int
18+
"""Number of chat completions to retrieve."""
19+
20+
metadata: Optional[Metadata]
21+
"""A list of metadata keys to filter the chat completions by. Example:
22+
23+
`metadata[key1]=value1&metadata[key2]=value2`
24+
"""
25+
26+
model: str
27+
"""The model used to generate the chat completions."""
28+
29+
order: Literal["asc", "desc"]
30+
"""Sort order for chat completions by timestamp.
31+
32+
Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
33+
"""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import Optional
6+
from typing_extensions import Required, TypedDict
7+
8+
from ..shared_params.metadata import Metadata
9+
10+
__all__ = ["CompletionUpdateParams"]
11+
12+
13+
class CompletionUpdateParams(TypedDict, total=False):
14+
metadata: Required[Optional[Metadata]]
15+
"""Set of 16 key-value pairs that can be attached to an object.
16+
17+
This can be useful for storing additional information about the object in a
18+
structured format, and querying for objects via API or the dashboard.
19+
20+
Keys are strings with a maximum length of 64 characters. Values are strings with
21+
a maximum length of 512 characters.
22+
"""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from .message_list_params import MessageListParams as MessageListParams
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing_extensions import Literal, TypedDict
6+
7+
__all__ = ["MessageListParams"]
8+
9+
10+
class MessageListParams(TypedDict, total=False):
11+
after: str
12+
"""Identifier for the last message from the previous pagination request."""
13+
14+
limit: int
15+
"""Number of messages to retrieve."""
16+
17+
order: Literal["asc", "desc"]
18+
"""Sort order for messages by timestamp.
19+
20+
Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
21+
"""

‎src/openai/types/moderation.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

3-
from typing import List
3+
from typing import List, Optional
44
from typing_extensions import Literal
55

66
from pydantic import Field as FieldInfo
@@ -38,14 +38,14 @@ class Categories(BaseModel):
3838
orientation, disability status, or caste.
3939
"""
4040

41-
illicit: bool
41+
illicit: Optional[bool] = None
4242
"""
4343
Content that includes instructions or advice that facilitate the planning or
4444
execution of wrongdoing, or that gives advice or instruction on how to commit
4545
illicit acts. For example, "how to shoplift" would fit this category.
4646
"""
4747

48-
illicit_violent: bool = FieldInfo(alias="illicit/violent")
48+
illicit_violent: Optional[bool] = FieldInfo(alias="illicit/violent", default=None)
4949
"""
5050
Content that includes instructions or advice that facilitate the planning or
5151
execution of wrongdoing that also includes violence, or that gives advice or
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
import os
6+
from typing import Any, cast
7+
8+
import pytest
9+
10+
from openai import OpenAI, AsyncOpenAI
11+
from tests.utils import assert_matches_type
12+
from openai.pagination import SyncCursorPage, AsyncCursorPage
13+
from openai.types.chat import ChatCompletionStoreMessage
14+
15+
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
16+
17+
18+
class TestMessages:
19+
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
20+
21+
@parametrize
22+
def test_method_list(self, client: OpenAI) -> None:
23+
message = client.chat.completions.messages.list(
24+
completion_id="completion_id",
25+
)
26+
assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
27+
28+
@parametrize
29+
def test_method_list_with_all_params(self, client: OpenAI) -> None:
30+
message = client.chat.completions.messages.list(
31+
completion_id="completion_id",
32+
after="after",
33+
limit=0,
34+
order="asc",
35+
)
36+
assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
37+
38+
@parametrize
39+
def test_raw_response_list(self, client: OpenAI) -> None:
40+
response = client.chat.completions.messages.with_raw_response.list(
41+
completion_id="completion_id",
42+
)
43+
44+
assert response.is_closed is True
45+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
46+
message = response.parse()
47+
assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
48+
49+
@parametrize
50+
def test_streaming_response_list(self, client: OpenAI) -> None:
51+
with client.chat.completions.messages.with_streaming_response.list(
52+
completion_id="completion_id",
53+
) as response:
54+
assert not response.is_closed
55+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
56+
57+
message = response.parse()
58+
assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
59+
60+
assert cast(Any, response.is_closed) is True
61+
62+
@parametrize
63+
def test_path_params_list(self, client: OpenAI) -> None:
64+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
65+
client.chat.completions.messages.with_raw_response.list(
66+
completion_id="",
67+
)
68+
69+
70+
class TestAsyncMessages:
71+
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
72+
73+
@parametrize
74+
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
75+
message = await async_client.chat.completions.messages.list(
76+
completion_id="completion_id",
77+
)
78+
assert_matches_type(AsyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
79+
80+
@parametrize
81+
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
82+
message = await async_client.chat.completions.messages.list(
83+
completion_id="completion_id",
84+
after="after",
85+
limit=0,
86+
order="asc",
87+
)
88+
assert_matches_type(AsyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
89+
90+
@parametrize
91+
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
92+
response = await async_client.chat.completions.messages.with_raw_response.list(
93+
completion_id="completion_id",
94+
)
95+
96+
assert response.is_closed is True
97+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
98+
message = response.parse()
99+
assert_matches_type(AsyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
100+
101+
@parametrize
102+
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
103+
async with async_client.chat.completions.messages.with_streaming_response.list(
104+
completion_id="completion_id",
105+
) as response:
106+
assert not response.is_closed
107+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
108+
109+
message = await response.parse()
110+
assert_matches_type(AsyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
111+
112+
assert cast(Any, response.is_closed) is True
113+
114+
@parametrize
115+
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
116+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
117+
await async_client.chat.completions.messages.with_raw_response.list(
118+
completion_id="",
119+
)

‎tests/api_resources/chat/test_completions.py

+310
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,10 @@
1010

1111
from openai import OpenAI, AsyncOpenAI
1212
from tests.utils import assert_matches_type
13+
from openai.pagination import SyncCursorPage, AsyncCursorPage
1314
from openai.types.chat import (
1415
ChatCompletion,
16+
ChatCompletionDeleted,
1517
)
1618

1719
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -248,6 +250,160 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
248250

249251
assert cast(Any, response.is_closed) is True
250252

253+
@parametrize
254+
def test_method_retrieve(self, client: OpenAI) -> None:
255+
completion = client.chat.completions.retrieve(
256+
"completion_id",
257+
)
258+
assert_matches_type(ChatCompletion, completion, path=["response"])
259+
260+
@parametrize
261+
def test_raw_response_retrieve(self, client: OpenAI) -> None:
262+
response = client.chat.completions.with_raw_response.retrieve(
263+
"completion_id",
264+
)
265+
266+
assert response.is_closed is True
267+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
268+
completion = response.parse()
269+
assert_matches_type(ChatCompletion, completion, path=["response"])
270+
271+
@parametrize
272+
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
273+
with client.chat.completions.with_streaming_response.retrieve(
274+
"completion_id",
275+
) as response:
276+
assert not response.is_closed
277+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
278+
279+
completion = response.parse()
280+
assert_matches_type(ChatCompletion, completion, path=["response"])
281+
282+
assert cast(Any, response.is_closed) is True
283+
284+
@parametrize
285+
def test_path_params_retrieve(self, client: OpenAI) -> None:
286+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
287+
client.chat.completions.with_raw_response.retrieve(
288+
"",
289+
)
290+
291+
@parametrize
292+
def test_method_update(self, client: OpenAI) -> None:
293+
completion = client.chat.completions.update(
294+
completion_id="completion_id",
295+
metadata={"foo": "string"},
296+
)
297+
assert_matches_type(ChatCompletion, completion, path=["response"])
298+
299+
@parametrize
300+
def test_raw_response_update(self, client: OpenAI) -> None:
301+
response = client.chat.completions.with_raw_response.update(
302+
completion_id="completion_id",
303+
metadata={"foo": "string"},
304+
)
305+
306+
assert response.is_closed is True
307+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
308+
completion = response.parse()
309+
assert_matches_type(ChatCompletion, completion, path=["response"])
310+
311+
@parametrize
312+
def test_streaming_response_update(self, client: OpenAI) -> None:
313+
with client.chat.completions.with_streaming_response.update(
314+
completion_id="completion_id",
315+
metadata={"foo": "string"},
316+
) as response:
317+
assert not response.is_closed
318+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
319+
320+
completion = response.parse()
321+
assert_matches_type(ChatCompletion, completion, path=["response"])
322+
323+
assert cast(Any, response.is_closed) is True
324+
325+
@parametrize
326+
def test_path_params_update(self, client: OpenAI) -> None:
327+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
328+
client.chat.completions.with_raw_response.update(
329+
completion_id="",
330+
metadata={"foo": "string"},
331+
)
332+
333+
@parametrize
334+
def test_method_list(self, client: OpenAI) -> None:
335+
completion = client.chat.completions.list()
336+
assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"])
337+
338+
@parametrize
339+
def test_method_list_with_all_params(self, client: OpenAI) -> None:
340+
completion = client.chat.completions.list(
341+
after="after",
342+
limit=0,
343+
metadata={"foo": "string"},
344+
model="model",
345+
order="asc",
346+
)
347+
assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"])
348+
349+
@parametrize
350+
def test_raw_response_list(self, client: OpenAI) -> None:
351+
response = client.chat.completions.with_raw_response.list()
352+
353+
assert response.is_closed is True
354+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
355+
completion = response.parse()
356+
assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"])
357+
358+
@parametrize
359+
def test_streaming_response_list(self, client: OpenAI) -> None:
360+
with client.chat.completions.with_streaming_response.list() as response:
361+
assert not response.is_closed
362+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
363+
364+
completion = response.parse()
365+
assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"])
366+
367+
assert cast(Any, response.is_closed) is True
368+
369+
@parametrize
370+
def test_method_delete(self, client: OpenAI) -> None:
371+
completion = client.chat.completions.delete(
372+
"completion_id",
373+
)
374+
assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
375+
376+
@parametrize
377+
def test_raw_response_delete(self, client: OpenAI) -> None:
378+
response = client.chat.completions.with_raw_response.delete(
379+
"completion_id",
380+
)
381+
382+
assert response.is_closed is True
383+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
384+
completion = response.parse()
385+
assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
386+
387+
@parametrize
388+
def test_streaming_response_delete(self, client: OpenAI) -> None:
389+
with client.chat.completions.with_streaming_response.delete(
390+
"completion_id",
391+
) as response:
392+
assert not response.is_closed
393+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
394+
395+
completion = response.parse()
396+
assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
397+
398+
assert cast(Any, response.is_closed) is True
399+
400+
@parametrize
401+
def test_path_params_delete(self, client: OpenAI) -> None:
402+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
403+
client.chat.completions.with_raw_response.delete(
404+
"",
405+
)
406+
251407
@parametrize
252408
def test_method_create_disallows_pydantic(self, client: OpenAI) -> None:
253409
class MyModel(pydantic.BaseModel):
@@ -497,6 +653,160 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncOpe
497653

498654
assert cast(Any, response.is_closed) is True
499655

656+
@parametrize
657+
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
658+
completion = await async_client.chat.completions.retrieve(
659+
"completion_id",
660+
)
661+
assert_matches_type(ChatCompletion, completion, path=["response"])
662+
663+
@parametrize
664+
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
665+
response = await async_client.chat.completions.with_raw_response.retrieve(
666+
"completion_id",
667+
)
668+
669+
assert response.is_closed is True
670+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
671+
completion = response.parse()
672+
assert_matches_type(ChatCompletion, completion, path=["response"])
673+
674+
@parametrize
675+
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
676+
async with async_client.chat.completions.with_streaming_response.retrieve(
677+
"completion_id",
678+
) as response:
679+
assert not response.is_closed
680+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
681+
682+
completion = await response.parse()
683+
assert_matches_type(ChatCompletion, completion, path=["response"])
684+
685+
assert cast(Any, response.is_closed) is True
686+
687+
@parametrize
688+
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
689+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
690+
await async_client.chat.completions.with_raw_response.retrieve(
691+
"",
692+
)
693+
694+
@parametrize
695+
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
696+
completion = await async_client.chat.completions.update(
697+
completion_id="completion_id",
698+
metadata={"foo": "string"},
699+
)
700+
assert_matches_type(ChatCompletion, completion, path=["response"])
701+
702+
@parametrize
703+
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
704+
response = await async_client.chat.completions.with_raw_response.update(
705+
completion_id="completion_id",
706+
metadata={"foo": "string"},
707+
)
708+
709+
assert response.is_closed is True
710+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
711+
completion = response.parse()
712+
assert_matches_type(ChatCompletion, completion, path=["response"])
713+
714+
@parametrize
715+
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
716+
async with async_client.chat.completions.with_streaming_response.update(
717+
completion_id="completion_id",
718+
metadata={"foo": "string"},
719+
) as response:
720+
assert not response.is_closed
721+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
722+
723+
completion = await response.parse()
724+
assert_matches_type(ChatCompletion, completion, path=["response"])
725+
726+
assert cast(Any, response.is_closed) is True
727+
728+
@parametrize
729+
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
730+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
731+
await async_client.chat.completions.with_raw_response.update(
732+
completion_id="",
733+
metadata={"foo": "string"},
734+
)
735+
736+
@parametrize
737+
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
738+
completion = await async_client.chat.completions.list()
739+
assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
740+
741+
@parametrize
742+
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
743+
completion = await async_client.chat.completions.list(
744+
after="after",
745+
limit=0,
746+
metadata={"foo": "string"},
747+
model="model",
748+
order="asc",
749+
)
750+
assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
751+
752+
@parametrize
753+
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
754+
response = await async_client.chat.completions.with_raw_response.list()
755+
756+
assert response.is_closed is True
757+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
758+
completion = response.parse()
759+
assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
760+
761+
@parametrize
762+
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
763+
async with async_client.chat.completions.with_streaming_response.list() as response:
764+
assert not response.is_closed
765+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
766+
767+
completion = await response.parse()
768+
assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
769+
770+
assert cast(Any, response.is_closed) is True
771+
772+
@parametrize
773+
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
774+
completion = await async_client.chat.completions.delete(
775+
"completion_id",
776+
)
777+
assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
778+
779+
@parametrize
780+
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
781+
response = await async_client.chat.completions.with_raw_response.delete(
782+
"completion_id",
783+
)
784+
785+
assert response.is_closed is True
786+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
787+
completion = response.parse()
788+
assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
789+
790+
@parametrize
791+
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
792+
async with async_client.chat.completions.with_streaming_response.delete(
793+
"completion_id",
794+
) as response:
795+
assert not response.is_closed
796+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
797+
798+
completion = await response.parse()
799+
assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
800+
801+
assert cast(Any, response.is_closed) is True
802+
803+
@parametrize
804+
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
805+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
806+
await async_client.chat.completions.with_raw_response.delete(
807+
"",
808+
)
809+
500810
@parametrize
501811
async def test_method_create_disallows_pydantic(self, async_client: AsyncOpenAI) -> None:
502812
class MyModel(pydantic.BaseModel):

‎tests/lib/test_azure.py

+7-17
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,6 @@ def token_provider() -> str:
153153

154154

155155
class TestAzureLogging:
156-
157156
@pytest.fixture(autouse=True)
158157
def logger_with_filter(self) -> logging.Logger:
159158
logger = logging.getLogger("openai")
@@ -165,9 +164,7 @@ def logger_with_filter(self) -> logging.Logger:
165164
def test_azure_api_key_redacted(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None:
166165
respx_mock.post(
167166
"https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01"
168-
).mock(
169-
return_value=httpx.Response(200, json={"model": "gpt-4"})
170-
)
167+
).mock(return_value=httpx.Response(200, json={"model": "gpt-4"}))
171168

172169
client = AzureOpenAI(
173170
api_version="2024-06-01",
@@ -182,14 +179,11 @@ def test_azure_api_key_redacted(self, respx_mock: MockRouter, caplog: pytest.Log
182179
if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]):
183180
assert record.args["headers"]["api-key"] == "<redacted>"
184181

185-
186182
@pytest.mark.respx()
187183
def test_azure_bearer_token_redacted(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None:
188184
respx_mock.post(
189185
"https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01"
190-
).mock(
191-
return_value=httpx.Response(200, json={"model": "gpt-4"})
192-
)
186+
).mock(return_value=httpx.Response(200, json={"model": "gpt-4"}))
193187

194188
client = AzureOpenAI(
195189
api_version="2024-06-01",
@@ -204,15 +198,12 @@ def test_azure_bearer_token_redacted(self, respx_mock: MockRouter, caplog: pytes
204198
if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]):
205199
assert record.args["headers"]["Authorization"] == "<redacted>"
206200

207-
208201
@pytest.mark.asyncio
209202
@pytest.mark.respx()
210203
async def test_azure_api_key_redacted_async(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None:
211204
respx_mock.post(
212205
"https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01"
213-
).mock(
214-
return_value=httpx.Response(200, json={"model": "gpt-4"})
215-
)
206+
).mock(return_value=httpx.Response(200, json={"model": "gpt-4"}))
216207

217208
client = AsyncAzureOpenAI(
218209
api_version="2024-06-01",
@@ -227,15 +218,14 @@ async def test_azure_api_key_redacted_async(self, respx_mock: MockRouter, caplog
227218
if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]):
228219
assert record.args["headers"]["api-key"] == "<redacted>"
229220

230-
231221
@pytest.mark.asyncio
232222
@pytest.mark.respx()
233-
async def test_azure_bearer_token_redacted_async(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None:
223+
async def test_azure_bearer_token_redacted_async(
224+
self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture
225+
) -> None:
234226
respx_mock.post(
235227
"https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01"
236-
).mock(
237-
return_value=httpx.Response(200, json={"model": "gpt-4"})
238-
)
228+
).mock(return_value=httpx.Response(200, json={"model": "gpt-4"}))
239229

240230
client = AsyncAzureOpenAI(
241231
api_version="2024-06-01",

‎tests/test_client.py

+46-32
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,13 @@
2323

2424
from openai import OpenAI, AsyncOpenAI, APIResponseValidationError
2525
from openai._types import Omit
26+
from openai._utils import maybe_transform
2627
from openai._models import BaseModel, FinalRequestOptions
2728
from openai._constants import RAW_RESPONSE_HEADER
2829
from openai._streaming import Stream, AsyncStream
2930
from openai._exceptions import OpenAIError, APIStatusError, APITimeoutError, APIResponseValidationError
3031
from openai._base_client import DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, make_request_options
32+
from openai.types.chat.completion_create_params import CompletionCreateParamsNonStreaming
3133

3234
from .utils import update_env
3335

@@ -724,14 +726,17 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> No
724726
"/chat/completions",
725727
body=cast(
726728
object,
727-
dict(
728-
messages=[
729-
{
730-
"role": "user",
731-
"content": "Say this is a test",
732-
}
733-
],
734-
model="gpt-4o",
729+
maybe_transform(
730+
dict(
731+
messages=[
732+
{
733+
"role": "user",
734+
"content": "Say this is a test",
735+
}
736+
],
737+
model="gpt-4o",
738+
),
739+
CompletionCreateParamsNonStreaming,
735740
),
736741
),
737742
cast_to=httpx.Response,
@@ -750,14 +755,17 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> Non
750755
"/chat/completions",
751756
body=cast(
752757
object,
753-
dict(
754-
messages=[
755-
{
756-
"role": "user",
757-
"content": "Say this is a test",
758-
}
759-
],
760-
model="gpt-4o",
758+
maybe_transform(
759+
dict(
760+
messages=[
761+
{
762+
"role": "user",
763+
"content": "Say this is a test",
764+
}
765+
],
766+
model="gpt-4o",
767+
),
768+
CompletionCreateParamsNonStreaming,
761769
),
762770
),
763771
cast_to=httpx.Response,
@@ -1591,14 +1599,17 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter)
15911599
"/chat/completions",
15921600
body=cast(
15931601
object,
1594-
dict(
1595-
messages=[
1596-
{
1597-
"role": "user",
1598-
"content": "Say this is a test",
1599-
}
1600-
],
1601-
model="gpt-4o",
1602+
maybe_transform(
1603+
dict(
1604+
messages=[
1605+
{
1606+
"role": "user",
1607+
"content": "Say this is a test",
1608+
}
1609+
],
1610+
model="gpt-4o",
1611+
),
1612+
CompletionCreateParamsNonStreaming,
16021613
),
16031614
),
16041615
cast_to=httpx.Response,
@@ -1617,14 +1628,17 @@ async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter)
16171628
"/chat/completions",
16181629
body=cast(
16191630
object,
1620-
dict(
1621-
messages=[
1622-
{
1623-
"role": "user",
1624-
"content": "Say this is a test",
1625-
}
1626-
],
1627-
model="gpt-4o",
1631+
maybe_transform(
1632+
dict(
1633+
messages=[
1634+
{
1635+
"role": "user",
1636+
"content": "Say this is a test",
1637+
}
1638+
],
1639+
model="gpt-4o",
1640+
),
1641+
CompletionCreateParamsNonStreaming,
16281642
),
16291643
),
16301644
cast_to=httpx.Response,

0 commit comments

Comments
 (0)
Please sign in to comment.