Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix request error when name has no value #19298

Closed
wants to merge 1 commit into from

Conversation

169
Copy link
Contributor

@169 169 commented Mar 20, 2024

In 17537, the name parameter is added to the request, but when the name hasn't value, the original usage will be affected. For example:

from langchain_openai import ChatOpenAI
llm = ChatOpenAI()
llm.invoke("how can langsmith help with testing?")
In [4]: from langchain_openai import ChatOpenAI
   ...: llm = ChatOpenAI()
   ...: llm.invoke("how can langsmith help with testing?")
---------------------------------------------------------------------------
BadRequestError                           Traceback (most recent call last)
Cell In[4], line 3
      1 from langchain_openai import ChatOpenAI
      2 llm = ChatOpenAI()
----> 3 llm.invoke("how can langsmith help with testing?")

File ~/workspace/langchain/libs/core/langchain_core/language_models/chat_models.py:175, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
    164 def invoke(
    165     self,
    166     input: LanguageModelInput,
   (...)
    170     **kwargs: Any,
    171 ) -> BaseMessage:
    172     config = ensure_config(config)
    173     return cast(
    174         ChatGeneration,
--> 175         self.generate_prompt(
    176             [self._convert_input(input)],
    177             stop=stop,
    178             callbacks=config.get("callbacks"),
    179             tags=config.get("tags"),
    180             metadata=config.get("metadata"),
    181             run_name=config.get("run_name"),
    182             **kwargs,
    183         ).generations[0][0],
    184     ).message

File ~/workspace/langchain/libs/core/langchain_core/language_models/chat_models.py:586, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    578 def generate_prompt(
    579     self,
    580     prompts: List[PromptValue],
   (...)
    583     **kwargs: Any,
    584 ) -> LLMResult:
    585     prompt_messages = [p.to_messages() for p in prompts]
--> 586     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File ~/workspace/langchain/libs/core/langchain_core/language_models/chat_models.py:447, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    445         if run_managers:
    446             run_managers[i].on_llm_error(e, response=LLMResult(generations=[]))
--> 447         raise e
    448 flattened_outputs = [
    449     LLMResult(generations=[res.generations], llm_output=res.llm_output)  # type: ignore[list-item]
    450     for res in results
    451 ]
    452 llm_output = self._combine_llm_outputs([res.llm_output for res in results])

File ~/workspace/langchain/libs/core/langchain_core/language_models/chat_models.py:437, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    434 for i, m in enumerate(messages):
    435     try:
    436         results.append(
--> 437             self._generate_with_cache(
    438                 m,
    439                 stop=stop,
    440                 run_manager=run_managers[i] if run_managers else None,
    441                 **kwargs,
    442             )
    443         )
    444     except BaseException as e:
    445         if run_managers:

File ~/workspace/langchain/libs/core/langchain_core/language_models/chat_models.py:629, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
    625         raise ValueError(
    626             "Asked to cache, but no cache found at `langchain.cache`."
    627         )
    628 if inspect.signature(self._generate).parameters.get("run_manager"):
--> 629     result = self._generate(
    630         messages, stop=stop, run_manager=run_manager, **kwargs
    631     )
    632 else:
    633     result = self._generate(messages, stop=stop, **kwargs)

File ~/workspace/langchain/libs/partners/openai/langchain_openai/chat_models/base.py:484, in ChatOpenAI._generate(self, messages, stop, run_manager, stream, **kwargs)
    478 message_dicts, params = self._create_message_dicts(messages, stop)
    479 params = {
    480     **params,
    481     **({"stream": stream} if stream is not None else {}),
    482     **kwargs,
    483 }
--> 484 response = self.client.create(messages=message_dicts, **params)
    485 return self._create_chat_result(response)

File ~/test/venv/lib/python3.11/site-packages/openai/_utils/_utils.py:275, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
    273             msg = f"Missing required argument: {quote(missing[0])}"
    274     raise TypeError(msg)
--> 275 return func(*args, **kwargs)

File ~/test/venv/lib/python3.11/site-packages/openai/resources/chat/completions.py:667, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
    615 @required_args(["messages", "model"], ["messages", "model", "stream"])
    616 def create(
    617     self,
   (...)
    665     timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
    666 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
--> 667     return self._post(
    668         "/chat/completions",
    669         body=maybe_transform(
    670             {
    671                 "messages": messages,
    672                 "model": model,
    673                 "frequency_penalty": frequency_penalty,
    674                 "function_call": function_call,
    675                 "functions": functions,
    676                 "logit_bias": logit_bias,
    677                 "logprobs": logprobs,
    678                 "max_tokens": max_tokens,
    679                 "n": n,
    680                 "presence_penalty": presence_penalty,
    681                 "response_format": response_format,
    682                 "seed": seed,
    683                 "stop": stop,
    684                 "stream": stream,
    685                 "temperature": temperature,
    686                 "tool_choice": tool_choice,
    687                 "tools": tools,
    688                 "top_logprobs": top_logprobs,
    689                 "top_p": top_p,
    690                 "user": user,
    691             },
    692             completion_create_params.CompletionCreateParams,
    693         ),
    694         options=make_request_options(
    695             extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
    696         ),
    697         cast_to=ChatCompletion,
    698         stream=stream or False,
    699         stream_cls=Stream[ChatCompletionChunk],
    700     )

File ~/test/venv/lib/python3.11/site-packages/openai/_base_client.py:1208, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
   1194 def post(
   1195     self,
   1196     path: str,
   (...)
   1203     stream_cls: type[_StreamT] | None = None,
   1204 ) -> ResponseT | _StreamT:
   1205     opts = FinalRequestOptions.construct(
   1206         method="post", url=path, json_data=body, files=to_httpx_files(files), **options
   1207     )
-> 1208     return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File ~/test/venv/lib/python3.11/site-packages/openai/_base_client.py:897, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
    888 def request(
    889     self,
    890     cast_to: Type[ResponseT],
   (...)
    895     stream_cls: type[_StreamT] | None = None,
    896 ) -> ResponseT | _StreamT:
--> 897     return self._request(
    898         cast_to=cast_to,
    899         options=options,
    900         stream=stream,
    901         stream_cls=stream_cls,
    902         remaining_retries=remaining_retries,
    903     )

File ~/test/venv/lib/python3.11/site-packages/openai/_base_client.py:988, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
    985         err.response.read()
    987     log.debug("Re-raising status error")
--> 988     raise self._make_status_error_from_response(err.response) from None
    990 return self._process_response(
    991     cast_to=cast_to,
    992     options=options,
   (...)
    995     stream_cls=stream_cls,
    996 )

BadRequestError: Error code: 400 - {'error': {'message': "None is not of type 'string' - 'messages.0.name'", 'type': 'invalid_request_error', 'param': None, 'code': None}}

@efriis efriis added the partner label Mar 20, 2024
@efriis efriis self-assigned this Mar 20, 2024
@dosubot dosubot bot added the size:XS This PR changes 0-9 lines, ignoring generated files. label Mar 20, 2024
Copy link

vercel bot commented Mar 20, 2024

The latest updates on your projects. Learn more about Vercel for Git ↗︎

1 Ignored Deployment
Name Status Preview Comments Updated (UTC)
langchain ⬜️ Ignored (Inspect) Visit Preview Mar 20, 2024 1:50am

@dosubot dosubot bot added 🔌: openai Primarily related to OpenAI integrations 🤖:bug Related to a bug, vulnerability, unexpected error with an existing feature labels Mar 20, 2024
@baskaryan
Copy link
Collaborator

thanks for contribution! believe fix was already landed in #19435

@baskaryan baskaryan closed this Mar 26, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
🤖:bug Related to a bug, vulnerability, unexpected error with an existing feature 🔌: openai Primarily related to OpenAI integrations partner size:XS This PR changes 0-9 lines, ignoring generated files.
Projects
None yet
Development

Successfully merging this pull request may close these issues.

None yet

3 participants