Skip to content

Commit

Permalink
community: fix openai streaming throws 'AIMessageChunk' object has no…
Browse files Browse the repository at this point in the history
… attribute 'text' (langchain-ai#18006)

After upgrading langchain-community to 0.0.22, it's not possible to use
openai from the community package with streaming=True
```
  File "/home/runner/work/ragstack-ai/ragstack-ai/ragstack-e2e-tests/.tox/langchain/lib/python3.11/site-packages/langchain_community/chat_models/openai.py", line 434, in _generate
    return generate_from_stream(stream_iter)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/runner/work/ragstack-ai/ragstack-ai/ragstack-e2e-tests/.tox/langchain/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 65, in generate_from_stream
    for chunk in stream:
  File "/home/runner/work/ragstack-ai/ragstack-ai/ragstack-e2e-tests/.tox/langchain/lib/python3.11/site-packages/langchain_community/chat_models/openai.py", line 418, in _stream
    run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
                                 ^^^^^^^^^^
AttributeError: 'AIMessageChunk' object has no attribute 'text'
```

Fix regression of langchain-ai#17907 
**Twitter handle:** @nicoloboschi
  • Loading branch information
nicoloboschi authored and Hayden Wolff committed Feb 27, 2024
1 parent 2ba2ca8 commit e807be0
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def _stream(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk

async def _astream(
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/konko.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def _stream(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk

def _generate(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def _stream(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk

def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
Expand Down
4 changes: 2 additions & 2 deletions libs/community/langchain_community/chat_models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ def _stream(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk

def _generate(
Expand Down Expand Up @@ -507,7 +507,7 @@ async def _astream(
message=chunk, generation_info=generation_info
)
if run_manager:
await run_manager.on_llm_new_token(token=chunk.text, chunk=cg_chunk)
await run_manager.on_llm_new_token(token=cg_chunk.text, chunk=cg_chunk)
yield cg_chunk

async def _agenerate(
Expand Down

0 comments on commit e807be0

Please sign in to comment.