Skip to content

Commit

Permalink
community[patch]: invoke callback prior to yielding token (openai) (l…
Browse files Browse the repository at this point in the history
…angchain-ai#19389)

**Description:** Invoke callback prior to yielding token for BaseOpenAI
& OpenAIChat
**Issue:** [Callback for on_llm_new_token should be invoked before the
token is yielded by the model
langchain-ai#16913](langchain-ai#16913)
**Dependencies:** None
  • Loading branch information
sepiatone authored and gkorland committed Mar 30, 2024
1 parent cb71e37 commit 6f09a88
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions libs/community/langchain_community/llms/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,6 @@ async def _astream(
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
Expand All @@ -401,6 +400,7 @@ async def _astream(
if chunk.generation_info
else None,
)
yield chunk

def _generate(
self,
Expand Down Expand Up @@ -1113,9 +1113,9 @@ def _stream(
stream_resp = stream_resp.dict()
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk

async def _astream(
self,
Expand All @@ -1133,9 +1133,9 @@ async def _astream(
stream_resp = stream_resp.dict()
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk

def _generate(
self,
Expand Down

0 comments on commit 6f09a88

Please sign in to comment.