Skip to content

Commit

Permalink
langchain[patch]: add stop for various non-openai agents
Browse files Browse the repository at this point in the history
  • Loading branch information
mackong committed Mar 20, 2024
1 parent 4c2e887 commit 651137a
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 7 deletions.
6 changes: 4 additions & 2 deletions libs/langchain/langchain/agents/json_chat/base.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Sequence
from typing import List, Optional, Sequence

from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
Expand All @@ -16,6 +16,7 @@ def create_json_chat_agent(
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
stop_sequence: bool = True,
stop: Optional[List[str]] = None,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable:
"""Create an agent that uses JSON to format its logic, build for Chat Models.
Expand All @@ -27,6 +28,7 @@ def create_json_chat_agent(
stop_sequence: Adds a stop token of "Observation:" to avoid hallucinates.
Default is True. You may to set this to False if the LLM you are using
does not support stop sequences.
stop: Optional list of stop words to use when generating.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Expand Down Expand Up @@ -158,7 +160,7 @@ def create_json_chat_agent(
tool_names=", ".join([t.name for t in tools]),
)
if stop_sequence:
llm_to_use = llm.bind(stop=["\nObservation"])
llm_to_use = llm.bind(stop=stop or ["\nObservation"])
else:
llm_to_use = llm

Expand Down
6 changes: 4 additions & 2 deletions libs/langchain/langchain/agents/react/agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from typing import Optional, Sequence
from typing import List, Optional, Sequence

from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
Expand All @@ -19,6 +19,7 @@ def create_react_agent(
prompt: BasePromptTemplate,
output_parser: Optional[AgentOutputParser] = None,
tools_renderer: ToolsRenderer = render_text_description,
stop: Optional[List[str]] = None,
) -> Runnable:
"""Create an agent that uses ReAct prompting.
Expand All @@ -29,6 +30,7 @@ def create_react_agent(
output_parser: AgentOutputParser for parse the LLM output.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
stop: Optional list of stop words to use when generating.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
Expand Down Expand Up @@ -108,7 +110,7 @@ def create_react_agent(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
llm_with_stop = llm.bind(stop=["\nObservation"])
llm_with_stop = llm.bind(stop=stop or ["\nObservation"])
output_parser = output_parser or ReActSingleInputOutputParser()
agent = (
RunnablePassthrough.assign(
Expand Down
4 changes: 3 additions & 1 deletion libs/langchain/langchain/agents/structured_chat/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ def create_structured_chat_agent(
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
tools_renderer: ToolsRenderer = render_text_description_and_args,
stop: Optional[List[str]] = None,
) -> Runnable:
"""Create an agent aimed at supporting tools with multiple inputs.
Expand All @@ -164,6 +165,7 @@ def create_structured_chat_agent(
prompt: The prompt to use. See Prompt section below for more.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
stop: Optional list of stop words to use when generating.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
Expand Down Expand Up @@ -273,7 +275,7 @@ def create_structured_chat_agent(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
llm_with_stop = llm.bind(stop=["Observation"])
llm_with_stop = llm.bind(stop=stop or ["Observation"])

agent = (
RunnablePassthrough.assign(
Expand Down
6 changes: 4 additions & 2 deletions libs/langchain/langchain/agents/xml/base.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Any, List, Sequence, Tuple, Union
from typing import Any, List, Optional, Sequence, Tuple, Union

from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
Expand Down Expand Up @@ -112,6 +112,7 @@ def create_xml_agent(
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
tools_renderer: ToolsRenderer = render_text_description,
stop: Optional[List[str]] = None,
) -> Runnable:
"""Create an agent that uses XML to format its logic.
Expand All @@ -123,6 +124,7 @@ def create_xml_agent(
`agent_scratchpad`: contains previous agent actions and tool outputs.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
stop: Optional list of stop words to use when generating.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
Expand Down Expand Up @@ -201,7 +203,7 @@ def create_xml_agent(
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
)
llm_with_stop = llm.bind(stop=["</tool_input>"])
llm_with_stop = llm.bind(stop=stop or ["</tool_input>"])

agent = (
RunnablePassthrough.assign(
Expand Down

0 comments on commit 651137a

Please sign in to comment.