Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 6aa424d

Browse files
committedNov 5, 2024
fix: add new prediction param to all methods
1 parent b32507d commit 6aa424d

File tree

3 files changed

+51
-7
lines changed

3 files changed

+51
-7
lines changed
 

‎src/openai/resources/beta/chat/completions.py

+9
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
3434
from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
3535
from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
36+
from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
3637
from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
3738

3839
__all__ = ["Completions", "AsyncCompletions"]
@@ -76,6 +77,7 @@ def parse(
7677
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
7778
n: Optional[int] | NotGiven = NOT_GIVEN,
7879
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
80+
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
7981
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
8082
seed: Optional[int] | NotGiven = NOT_GIVEN,
8183
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
@@ -169,6 +171,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
169171
"modalities": modalities,
170172
"n": n,
171173
"parallel_tool_calls": parallel_tool_calls,
174+
"prediction": prediction,
172175
"presence_penalty": presence_penalty,
173176
"response_format": _type_to_response_format(response_format),
174177
"seed": seed,
@@ -217,6 +220,7 @@ def stream(
217220
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
218221
n: Optional[int] | NotGiven = NOT_GIVEN,
219222
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
223+
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
220224
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
221225
seed: Optional[int] | NotGiven = NOT_GIVEN,
222226
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
@@ -281,6 +285,7 @@ def stream(
281285
modalities=modalities,
282286
n=n,
283287
parallel_tool_calls=parallel_tool_calls,
288+
prediction=prediction,
284289
presence_penalty=presence_penalty,
285290
seed=seed,
286291
service_tier=service_tier,
@@ -343,6 +348,7 @@ async def parse(
343348
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
344349
n: Optional[int] | NotGiven = NOT_GIVEN,
345350
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
351+
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
346352
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
347353
seed: Optional[int] | NotGiven = NOT_GIVEN,
348354
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
@@ -436,6 +442,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
436442
"modalities": modalities,
437443
"n": n,
438444
"parallel_tool_calls": parallel_tool_calls,
445+
"prediction": prediction,
439446
"presence_penalty": presence_penalty,
440447
"response_format": _type_to_response_format(response_format),
441448
"seed": seed,
@@ -484,6 +491,7 @@ def stream(
484491
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
485492
n: Optional[int] | NotGiven = NOT_GIVEN,
486493
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
494+
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
487495
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
488496
seed: Optional[int] | NotGiven = NOT_GIVEN,
489497
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
@@ -549,6 +557,7 @@ def stream(
549557
modalities=modalities,
550558
n=n,
551559
parallel_tool_calls=parallel_tool_calls,
560+
prediction=prediction,
552561
presence_penalty=presence_penalty,
553562
seed=seed,
554563
service_tier=service_tier,

‎tests/lib/chat/test_completions.py

+36-6
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,12 @@ def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pyte
7777
system_fingerprint='fp_b40fb1c6fb',
7878
usage=CompletionUsage(
7979
completion_tokens=37,
80-
completion_tokens_details=CompletionTokensDetails(audio_tokens=None, reasoning_tokens=0),
80+
completion_tokens_details=CompletionTokensDetails(
81+
accepted_prediction_tokens=None,
82+
audio_tokens=None,
83+
reasoning_tokens=0,
84+
rejected_prediction_tokens=None
85+
),
8186
prompt_tokens=14,
8287
prompt_tokens_details=None,
8388
total_tokens=51
@@ -139,7 +144,12 @@ class Location(BaseModel):
139144
system_fingerprint='fp_5050236cbd',
140145
usage=CompletionUsage(
141146
completion_tokens=14,
142-
completion_tokens_details=CompletionTokensDetails(audio_tokens=None, reasoning_tokens=0),
147+
completion_tokens_details=CompletionTokensDetails(
148+
accepted_prediction_tokens=None,
149+
audio_tokens=None,
150+
reasoning_tokens=0,
151+
rejected_prediction_tokens=None
152+
),
143153
prompt_tokens=79,
144154
prompt_tokens_details=None,
145155
total_tokens=93
@@ -203,7 +213,12 @@ class Location(BaseModel):
203213
system_fingerprint='fp_b40fb1c6fb',
204214
usage=CompletionUsage(
205215
completion_tokens=14,
206-
completion_tokens_details=CompletionTokensDetails(audio_tokens=None, reasoning_tokens=0),
216+
completion_tokens_details=CompletionTokensDetails(
217+
accepted_prediction_tokens=None,
218+
audio_tokens=None,
219+
reasoning_tokens=0,
220+
rejected_prediction_tokens=None
221+
),
207222
prompt_tokens=88,
208223
prompt_tokens_details=None,
209224
total_tokens=102
@@ -396,7 +411,12 @@ class CalendarEvent:
396411
system_fingerprint='fp_7568d46099',
397412
usage=CompletionUsage(
398413
completion_tokens=17,
399-
completion_tokens_details=CompletionTokensDetails(audio_tokens=None, reasoning_tokens=0),
414+
completion_tokens_details=CompletionTokensDetails(
415+
accepted_prediction_tokens=None,
416+
audio_tokens=None,
417+
reasoning_tokens=0,
418+
rejected_prediction_tokens=None
419+
),
400420
prompt_tokens=92,
401421
prompt_tokens_details=None,
402422
total_tokens=109
@@ -847,7 +867,12 @@ class Location(BaseModel):
847867
system_fingerprint='fp_5050236cbd',
848868
usage=CompletionUsage(
849869
completion_tokens=14,
850-
completion_tokens_details=CompletionTokensDetails(audio_tokens=None, reasoning_tokens=0),
870+
completion_tokens_details=CompletionTokensDetails(
871+
accepted_prediction_tokens=None,
872+
audio_tokens=None,
873+
reasoning_tokens=0,
874+
rejected_prediction_tokens=None
875+
),
851876
prompt_tokens=79,
852877
prompt_tokens_details=None,
853878
total_tokens=93
@@ -917,7 +942,12 @@ class Location(BaseModel):
917942
system_fingerprint='fp_5050236cbd',
918943
usage=CompletionUsage(
919944
completion_tokens=14,
920-
completion_tokens_details=CompletionTokensDetails(audio_tokens=None, reasoning_tokens=0),
945+
completion_tokens_details=CompletionTokensDetails(
946+
accepted_prediction_tokens=None,
947+
audio_tokens=None,
948+
reasoning_tokens=0,
949+
rejected_prediction_tokens=None
950+
),
921951
prompt_tokens=79,
922952
prompt_tokens_details=None,
923953
total_tokens=93

‎tests/lib/chat/test_completions_streaming.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,12 @@ def on_event(stream: ChatCompletionStream[Location], event: ChatCompletionStream
157157
system_fingerprint='fp_5050236cbd',
158158
usage=CompletionUsage(
159159
completion_tokens=14,
160-
completion_tokens_details=CompletionTokensDetails(audio_tokens=None, reasoning_tokens=0),
160+
completion_tokens_details=CompletionTokensDetails(
161+
accepted_prediction_tokens=None,
162+
audio_tokens=None,
163+
reasoning_tokens=0,
164+
rejected_prediction_tokens=None
165+
),
161166
prompt_tokens=79,
162167
prompt_tokens_details=None,
163168
total_tokens=93

0 commit comments

Comments
 (0)
Please sign in to comment.