Skip to content

Commit

Permalink
langchain[patch]: add stop for various non-openai agents (langchain-a…
Browse files Browse the repository at this point in the history
…i#19333)

* Description: add stop for various non-openai agents.
* Issue: N/A
* Dependencies: N/A

---------

Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
  • Loading branch information
2 people authored and chrispy-snps committed Mar 30, 2024
1 parent 41acb70 commit f1027d5
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 9 deletions.
13 changes: 9 additions & 4 deletions libs/langchain/langchain/agents/json_chat/base.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Sequence
from typing import List, Sequence, Union

from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
Expand All @@ -15,7 +15,7 @@ def create_json_chat_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
stop_sequence: bool = True,
stop_sequence: Union[bool, List[str]] = True,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable:
"""Create an agent that uses JSON to format its logic, build for Chat Models.
Expand All @@ -24,7 +24,11 @@ def create_json_chat_agent(
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more.
stop_sequence: Adds a stop token of "Observation:" to avoid hallucinates.
stop_sequence: bool or list of str.
If True, adds a stop token of "Observation:" to avoid hallucinates.
If False, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
Default is True. You may to set this to False if the LLM you are using
does not support stop sequences.
tools_renderer: This controls how the tools are converted into a string and
Expand Down Expand Up @@ -158,7 +162,8 @@ def create_json_chat_agent(
tool_names=", ".join([t.name for t in tools]),
)
if stop_sequence:
llm_to_use = llm.bind(stop=["\nObservation"])
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
llm_to_use = llm.bind(stop=stop)
else:
llm_to_use = llm

Expand Down
17 changes: 15 additions & 2 deletions libs/langchain/langchain/agents/react/agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from typing import Optional, Sequence
from typing import List, Optional, Sequence, Union

from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
Expand All @@ -19,6 +19,8 @@ def create_react_agent(
prompt: BasePromptTemplate,
output_parser: Optional[AgentOutputParser] = None,
tools_renderer: ToolsRenderer = render_text_description,
*,
stop_sequence: Union[bool, List[str]] = True,
) -> Runnable:
"""Create an agent that uses ReAct prompting.
Expand All @@ -29,6 +31,13 @@ def create_react_agent(
output_parser: AgentOutputParser for parse the LLM output.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
stop_sequence: bool or list of str.
If True, adds a stop token of "Observation:" to avoid hallucinates.
If False, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
Default is True. You may to set this to False if the LLM you are using
does not support stop sequences.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
Expand Down Expand Up @@ -108,7 +117,11 @@ def create_react_agent(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
llm_with_stop = llm.bind(stop=["\nObservation"])
if stop_sequence:
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
llm_with_stop = llm.bind(stop=stop)
else:
llm_with_stop = llm
output_parser = output_parser or ReActSingleInputOutputParser()
agent = (
RunnablePassthrough.assign(
Expand Down
17 changes: 15 additions & 2 deletions libs/langchain/langchain/agents/structured_chat/base.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import re
from typing import Any, List, Optional, Sequence, Tuple
from typing import Any, List, Optional, Sequence, Tuple, Union

from langchain_core._api import deprecated
from langchain_core.agents import AgentAction
Expand Down Expand Up @@ -155,13 +155,22 @@ def create_structured_chat_agent(
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
tools_renderer: ToolsRenderer = render_text_description_and_args,
*,
stop_sequence: Union[bool, List[str]] = True,
) -> Runnable:
"""Create an agent aimed at supporting tools with multiple inputs.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more.
stop_sequence: bool or list of str.
If True, adds a stop token of "Observation:" to avoid hallucinates.
If False, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
Default is True. You may to set this to False if the LLM you are using
does not support stop sequences.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Expand Down Expand Up @@ -273,7 +282,11 @@ def create_structured_chat_agent(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
llm_with_stop = llm.bind(stop=["Observation"])
if stop_sequence:
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
llm_with_stop = llm.bind(stop=stop)
else:
llm_with_stop = llm

agent = (
RunnablePassthrough.assign(
Expand Down
16 changes: 15 additions & 1 deletion libs/langchain/langchain/agents/xml/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,8 @@ def create_xml_agent(
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
tools_renderer: ToolsRenderer = render_text_description,
*,
stop_sequence: Union[bool, List[str]] = True,
) -> Runnable:
"""Create an agent that uses XML to format its logic.
Expand All @@ -123,6 +125,13 @@ def create_xml_agent(
`agent_scratchpad`: contains previous agent actions and tool outputs.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
stop_sequence: bool or list of str.
If True, adds a stop token of "</tool_input>" to avoid hallucinates.
If False, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
Default is True. You may to set this to False if the LLM you are using
does not support stop sequences.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
Expand Down Expand Up @@ -201,7 +210,12 @@ def create_xml_agent(
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
)
llm_with_stop = llm.bind(stop=["</tool_input>"])

if stop_sequence:
stop = ["</tool_input>"] if stop_sequence is True else stop_sequence
llm_with_stop = llm.bind(stop=stop)
else:
llm_with_stop = llm

agent = (
RunnablePassthrough.assign(
Expand Down

0 comments on commit f1027d5

Please sign in to comment.