Skip to content

Commit

Permalink
partners/openai: fix depracation errors of pydantic's .dict() functio…
Browse files Browse the repository at this point in the history
…n (reopen langchain-ai#16629) (langchain-ai#17404)



---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
  • Loading branch information
2 people authored and Hayden Wolff committed Feb 27, 2024
1 parent 5415af8 commit e6b8a8b
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 15 deletions.
8 changes: 5 additions & 3 deletions libs/partners/openai/langchain_openai/chat_models/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

import openai
from langchain_core.outputs import ChatResult
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env

from langchain_openai.chat_models.base import ChatOpenAI
Expand Down Expand Up @@ -209,9 +209,11 @@ def lc_attributes(self) -> Dict[str, Any]:
"openai_api_version": self.openai_api_version,
}

def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
def _create_chat_result(
self, response: Union[dict, openai.BaseModel]
) -> ChatResult:
if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
for res in response["choices"]:
if res.get("finish_reason", None) == "content_filter":
raise ValueError(
Expand Down
10 changes: 6 additions & 4 deletions libs/partners/openai/langchain_openai/chat_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ def _stream(
default_chunk_class = AIMessageChunk
for chunk in self.client.create(messages=message_dicts, **params):
if not isinstance(chunk, dict):
chunk = chunk.dict()
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
Expand Down Expand Up @@ -449,10 +449,12 @@ def _create_message_dicts(
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params

def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
def _create_chat_result(
self, response: Union[dict, openai.BaseModel]
) -> ChatResult:
generations = []
if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
generation_info = dict(finish_reason=res.get("finish_reason"))
Expand Down Expand Up @@ -486,7 +488,7 @@ async def _astream(
messages=message_dicts, **params
):
if not isinstance(chunk, dict):
chunk = chunk.dict()
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
Expand Down
8 changes: 4 additions & 4 deletions libs/partners/openai/langchain_openai/embeddings/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ def _get_len_safe_embeddings(
input=tokens[i : i + _chunk_size], **self._invocation_params
)
if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
batched_embeddings.extend(r["embedding"] for r in response["data"])

results: List[List[List[float]]] = [[] for _ in range(len(texts))]
Expand All @@ -343,7 +343,7 @@ def _get_len_safe_embeddings(
input="", **self._invocation_params
)
if not isinstance(average_embedded, dict):
average_embedded = average_embedded.dict()
average_embedded = average_embedded.model_dump()
average = average_embedded["data"][0]["embedding"]
else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
Expand Down Expand Up @@ -436,7 +436,7 @@ async def _aget_len_safe_embeddings(
)

if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
batched_embeddings.extend(r["embedding"] for r in response["data"])

results: List[List[List[float]]] = [[] for _ in range(len(texts))]
Expand All @@ -453,7 +453,7 @@ async def _aget_len_safe_embeddings(
input="", **self._invocation_params
)
if not isinstance(average_embedded, dict):
average_embedded = average_embedded.dict()
average_embedded = average_embedded.model_dump()
average = average_embedded["data"][0]["embedding"]
else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
Expand Down
8 changes: 4 additions & 4 deletions libs/partners/openai/langchain_openai/llms/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def _stream(
self.get_sub_prompts(params, [prompt], stop) # this mutates params
for stream_resp in self.client.create(prompt=prompt, **params):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
stream_resp = stream_resp.model_dump()
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
Expand Down Expand Up @@ -279,7 +279,7 @@ async def _astream(
prompt=prompt, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
stream_resp = stream_resp.model_dump()
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
Expand Down Expand Up @@ -357,7 +357,7 @@ def _generate(
if not isinstance(response, dict):
# V1 client returns the response in an PyDantic object instead of
# dict. For the transition period, we deep convert it to dict.
response = response.dict()
response = response.model_dump()

choices.extend(response["choices"])
_update_token_usage(_keys, response, token_usage)
Expand Down Expand Up @@ -420,7 +420,7 @@ async def _agenerate(
else:
response = await self.async_client.create(prompt=_prompts, **params)
if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
choices.extend(response["choices"])
_update_token_usage(_keys, response, token_usage)
return self.create_llm_result(
Expand Down

0 comments on commit e6b8a8b

Please sign in to comment.