feat: migrate agents and toolkits to Component syntax (#2579)

* feat: migrate agents and toolkits to Component syntax

* fix mypy

* fix mypy

* [autofix.ci] apply automated fixes

* fix mypy

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
Nicolò Boschi 2024-07-10 14:09:14 +02:00 committed by GitHub
commit 05044a3434
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 365 additions and 706 deletions

View file

@ -1,78 +1,91 @@
from typing import List, Optional, Union, cast
from abc import abstractmethod
from typing import List
from langchain.agents import AgentExecutor, BaseMultiActionAgent, BaseSingleActionAgent
from langchain_core.messages import BaseMessage
from langchain.agents.agent import RunnableAgent
from langchain.agents import AgentExecutor
from langchain_core.runnables import Runnable
from langflow.base.agents.utils import data_to_messages, get_agents_list
from langflow.custom import CustomComponent
from langflow.field_typing import Text, Tool
from langflow.schema import Data
from langflow.custom import Component
from langflow.inputs import BoolInput, IntInput, HandleInput
from langflow.inputs.inputs import InputTypes
from langflow.template import Output
class LCAgentComponent(CustomComponent):
def get_agents_list(self):
return get_agents_list()
class LCAgentComponent(Component):
trace_type = "agent"
_base_inputs: List[InputTypes] = [
BoolInput(
name="handle_parsing_errors",
display_name="Handle Parse Errors",
value=True,
advanced=True,
),
BoolInput(
name="verbose",
display_name="Verbose",
value=True,
advanced=True,
),
IntInput(
name="max_iterations",
display_name="Max Iterations",
value=15,
advanced=True,
),
]
def build_config(self):
return {
"lc": {
"display_name": "LangChain",
"info": "The LangChain to interact with.",
},
"handle_parsing_errors": {
"display_name": "Handle Parsing Errors",
"info": "If True, the agent will handle parsing errors. If False, the agent will raise an error.",
"advanced": True,
},
"output_key": {
"display_name": "Output Key",
"info": "The key to use to get the output from the agent.",
"advanced": True,
},
"memory": {
"display_name": "Memory",
"info": "Memory to use for the agent.",
},
"tools": {
"display_name": "Tools",
"info": "Tools the agent can use.",
},
"input_value": {
"display_name": "Input",
"info": "Input text to pass to the agent.",
},
outputs = [
Output(display_name="Agent", name="agent", method="build_agent"),
]
def _validate_outputs(self):
required_output_methods = ["build_agent"]
output_names = [output.name for output in self.outputs]
for method_name in required_output_methods:
if method_name not in output_names:
raise ValueError(f"Output with name '{method_name}' must be defined.")
elif not hasattr(self, method_name):
raise ValueError(f"Method '{method_name}' must be defined.")
def get_agent_kwargs(self, flatten: bool = False) -> dict:
base = {
"handle_parsing_errors": self.handle_parsing_errors,
"verbose": self.verbose,
"allow_dangerous_code": True,
}
agent_kwargs = {
"handle_parsing_errors": self.handle_parsing_errors,
"max_iterations": self.max_iterations,
}
if flatten:
return {
**base,
**agent_kwargs,
}
return {**base, "agent_executor_kwargs": agent_kwargs}
async def run_agent(
self,
agent: Union[Runnable, BaseSingleActionAgent, BaseMultiActionAgent, AgentExecutor],
inputs: str,
tools: List[Tool],
message_history: Optional[List[Data]] = None,
handle_parsing_errors: bool = True,
output_key: str = "output",
) -> Text:
if isinstance(agent, AgentExecutor):
runnable = agent
else:
runnable = AgentExecutor.from_agent_and_tools(
agent=agent, # type: ignore
tools=tools,
verbose=True,
handle_parsing_errors=handle_parsing_errors,
)
input_dict: dict[str, str | list[BaseMessage]] = {"input": inputs}
if message_history:
input_dict["chat_history"] = data_to_messages(message_history)
result = await runnable.ainvoke(input_dict)
self.status = result
if output_key in result:
return cast(str, result.get(output_key))
elif "output" not in result:
if output_key != "output":
raise ValueError(f"Output key not found in result. Tried '{output_key}' and 'output'.")
else:
raise ValueError("Output key not found in result. Tried 'output'.")
return cast(str, result.get("output"))
class LCToolsAgentComponent(LCAgentComponent):
_base_inputs = LCAgentComponent._base_inputs + [
HandleInput(
name="tools",
display_name="Tools",
input_types=["Tool"],
is_list=True,
),
HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True),
]
def build_agent(self) -> AgentExecutor:
agent = self.creat_agent_runnable()
return AgentExecutor.from_agent_and_tools(
agent=RunnableAgent(runnable=agent, input_keys_arg=["input"], return_keys_arg=["output"]),
tools=self.tools,
**self.get_agent_kwargs(flatten=True),
)
@abstractmethod
def creat_agent_runnable(self) -> Runnable:
"""Create the agent."""
pass

View file

@ -1,4 +1,4 @@
from typing import List
from typing import List, cast
from langchain_core.documents import Document
from loguru import logger
@ -23,11 +23,16 @@ class LCVectorStoreComponent(Component):
name="search_results",
method="search_documents",
),
Output(
display_name="Vector Store",
name="vector_store",
method="cast_vector_store",
),
]
def _validate_outputs(self):
# At least these three outputs must be defined
required_output_methods = ["build_base_retriever", "search_documents"]
required_output_methods = ["build_base_retriever", "search_documents", "build_vector_store"]
output_names = [output.name for output in self.outputs]
for method_name in required_output_methods:
if method_name not in output_names:
@ -67,6 +72,9 @@ class LCVectorStoreComponent(Component):
self.status = data
return data
def cast_vector_store(self) -> VectorStore:
return cast(VectorStore, self.build_vector_store())
def build_vector_store(self) -> VectorStore:
"""
Builds the Vector Store object.c

View file

@ -1,35 +1,27 @@
from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_agent
from langflow.custom import CustomComponent
from langflow.field_typing import AgentExecutor, LanguageModel
from langflow.base.agents.agent import LCAgentComponent
from langflow.field_typing import AgentExecutor
from langflow.inputs import HandleInput, FileInput, DropdownInput
class CSVAgentComponent(CustomComponent):
class CSVAgentComponent(LCAgentComponent):
display_name = "CSVAgent"
description = "Construct a CSV agent from a CSV and tools."
documentation = "https://python.langchain.com/docs/modules/agents/toolkits/csv"
name = "CSVAgent"
def build_config(self):
return {
"llm": {"display_name": "LLM", "type": LanguageModel},
"path": {"display_name": "Path", "field_type": "file", "suffixes": [".csv"], "file_types": [".csv"]},
"handle_parsing_errors": {"display_name": "Handle Parse Errors", "advanced": True},
"agent_type": {
"display_name": "Agent Type",
"options": ["zero-shot-react-description", "openai-functions", "openai-tools"],
"advanced": True,
},
}
inputs = LCAgentComponent._base_inputs + [
FileInput(name="path", display_name="File Path", file_types=["csv"], required=True),
HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True),
DropdownInput(
name="agent_type",
display_name="Agent Type",
advanced=True,
options=["zero-shot-react-description", "openai-functions", "openai-tools"],
value="openai-tools",
),
]
def build(
self, llm: LanguageModel, path: str, handle_parsing_errors: bool = True, agent_type: str = "openai-tools"
) -> AgentExecutor:
# Instantiate and return the CSV agent class with the provided llm and path
return create_csv_agent(
llm=llm,
path=path,
agent_type=agent_type,
verbose=True,
agent_executor_kwargs=dict(handle_parsing_errors=handle_parsing_errors),
)
def build_agent(self) -> AgentExecutor:
return create_csv_agent(llm=self.llm, path=self.path, agent_type=self.agent_type, **self.get_agent_kwargs())

View file

@ -1,25 +1,31 @@
from pathlib import Path
import yaml
from langchain.agents import AgentExecutor
from langchain_community.agent_toolkits import create_json_agent
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
from langchain_community.tools.json.tool import JsonSpec
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
from langflow.base.agents.agent import LCAgentComponent
from langflow.inputs import HandleInput, FileInput
class JsonAgentComponent(CustomComponent):
class JsonAgentComponent(LCAgentComponent):
display_name = "JsonAgent"
description = "Construct a json agent from an LLM and tools."
name = "JsonAgent"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"toolkit": {"display_name": "Toolkit"},
}
inputs = LCAgentComponent._base_inputs + [
FileInput(name="path", display_name="File Path", file_types=["json", "yaml", "yml"], required=True),
HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True),
]
def build(
self,
llm: LanguageModel,
toolkit: JsonToolkit,
) -> AgentExecutor:
return create_json_agent(llm=llm, toolkit=toolkit)
def build_agent(self) -> AgentExecutor:
if self.path.endswith("yaml") or self.path.endswith("yml"):
yaml_dict = yaml.load(open(self.path, "r"), Loader=yaml.FullLoader)
spec = JsonSpec(dict_=yaml_dict)
else:
spec = JsonSpec.from_file(Path(self.path))
toolkit = JsonToolkit(spec=spec)
return create_json_agent(llm=self.llm, toolkit=toolkit, **self.get_agent_kwargs())

View file

@ -0,0 +1,36 @@
from langchain.agents import create_openai_tools_agent
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate
from langflow.base.agents.agent import LCToolsAgentComponent
from langflow.inputs import MultilineInput
class OpenAIToolsAgentComponent(LCToolsAgentComponent):
display_name: str = "OpenAI Tools Agent"
description: str = "Agent that uses tools via openai-tools."
icon = "LangChain"
beta = True
name = "OpenAIToolsAgent"
inputs = LCToolsAgentComponent._base_inputs + [
MultilineInput(
name="system_prompt",
display_name="System Prompt",
info="System prompt for the agent.",
value="You are a helpful assistant",
),
MultilineInput(
name="user_prompt", display_name="Prompt", info="This prompt must contain 'input' key.", value="{input}"
),
]
def creat_agent_runnable(self):
if "input" not in self.user_prompt:
raise ValueError("Prompt must contain 'input' key.")
messages = [
("system", self.system_prompt),
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=["input"], template=self.user_prompt)),
("placeholder", "{agent_scratchpad}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
return create_openai_tools_agent(self.llm, self.tools, prompt)

View file

@ -0,0 +1,42 @@
from pathlib import Path
import yaml
from langchain.agents import AgentExecutor
from langchain_community.agent_toolkits import create_openapi_agent
from langchain_community.tools.json.tool import JsonSpec
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
from langflow.base.agents.agent import LCAgentComponent
from langflow.inputs import BoolInput, HandleInput, FileInput
from langchain_community.utilities.requests import TextRequestsWrapper
class OpenAPIAgentComponent(LCAgentComponent):
display_name = "OpenAPI Agent"
description = "Agent to interact with OpenAPI API."
name = "OpenAPIAgent"
inputs = LCAgentComponent._base_inputs + [
FileInput(name="path", display_name="File Path", file_types=["json", "yaml", "yml"], required=True),
HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True),
BoolInput(name="allow_dangerous_requests", display_name="Allow Dangerous Requests", value=False, required=True),
]
def build_agent(self) -> AgentExecutor:
if self.path.endswith("yaml") or self.path.endswith("yml"):
yaml_dict = yaml.load(open(self.path, "r"), Loader=yaml.FullLoader)
spec = JsonSpec(dict_=yaml_dict)
else:
spec = JsonSpec.from_file(Path(self.path))
requests_wrapper = TextRequestsWrapper()
toolkit = OpenAPIToolkit.from_llm(
llm=self.llm,
json_spec=spec,
requests_wrapper=requests_wrapper,
allow_dangerous_requests=self.allow_dangerous_requests,
)
agent_args = self.get_agent_kwargs()
agent_args["max_iterations"] = agent_args["agent_executor_kwargs"]["max_iterations"]
del agent_args["agent_executor_kwargs"]["max_iterations"]
return create_openapi_agent(llm=self.llm, toolkit=toolkit, **agent_args)

View file

@ -1,32 +1,26 @@
from typing import Callable, Union
from langchain.agents import AgentExecutor
from langchain_community.agent_toolkits import SQLDatabaseToolkit
from langchain_community.agent_toolkits.sql.base import create_sql_agent
from langchain_community.utilities import SQLDatabase
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
from langflow.base.agents.agent import LCAgentComponent
from langflow.inputs import MessageTextInput, HandleInput
class SQLAgentComponent(CustomComponent):
class SQLAgentComponent(LCAgentComponent):
display_name = "SQLAgent"
description = "Construct an SQL agent from an LLM and tools."
name = "SQLAgent"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"database_uri": {"display_name": "Database URI"},
"verbose": {"display_name": "Verbose", "value": False, "advanced": True},
}
inputs = LCAgentComponent._base_inputs + [
MessageTextInput(name="database_uri", display_name="Database URI", required=True),
HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True),
]
def build(
self,
llm: LanguageModel,
database_uri: str,
verbose: bool = False,
) -> Union[AgentExecutor, Callable]:
db = SQLDatabase.from_uri(database_uri)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
return create_sql_agent(llm=llm, toolkit=toolkit)
def build_agent(self) -> AgentExecutor:
db = SQLDatabase.from_uri(self.database_uri)
toolkit = SQLDatabaseToolkit(db=db, llm=self.llm)
agent_args = self.get_agent_kwargs()
agent_args["max_iterations"] = agent_args["agent_executor_kwargs"]["max_iterations"]
del agent_args["agent_executor_kwargs"]["max_iterations"]
return create_sql_agent(llm=self.llm, toolkit=toolkit, **agent_args)

View file

@ -1,111 +1,35 @@
from typing import Dict, List, cast
from langchain.agents import AgentExecutor, BaseSingleActionAgent
from langchain.agents.tool_calling_agent.base import create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate
from langflow.custom import Component
from langflow.io import BoolInput, HandleInput, MessageTextInput, Output
from langflow.schema import Data
from langflow.schema.message import Message
from langchain.agents import create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate
from langflow.base.agents.agent import LCToolsAgentComponent
from langflow.inputs import MultilineInput
class ToolCallingAgentComponent(Component):
class ToolCallingAgentComponent(LCToolsAgentComponent):
display_name: str = "Tool Calling Agent"
description: str = "Agent that uses tools. Only models that are compatible with function calling are supported."
description: str = "Agent that uses tools"
icon = "LangChain"
beta = True
name = "ToolCallingAgent"
inputs = [
MessageTextInput(
inputs = LCToolsAgentComponent._base_inputs + [
MultilineInput(
name="system_prompt",
display_name="System Prompt",
info="System prompt for the agent.",
value="You are a helpful assistant",
),
MessageTextInput(
name="input_value",
display_name="Inputs",
info="Input text to pass to the agent.",
),
MessageTextInput(
name="user_prompt",
display_name="Prompt",
info="This prompt must contain 'input' key.",
value="{input}",
advanced=True,
),
BoolInput(
name="handle_parsing_errors",
display_name="Handle Parsing Errors",
info="If True, the agent will handle parsing errors. If False, the agent will raise an error.",
advanced=True,
value=True,
),
HandleInput(
name="memory",
display_name="Memory",
input_types=["Data"],
info="Memory to use for the agent.",
),
HandleInput(
name="tools",
display_name="Tools",
input_types=["Tool"],
is_list=True,
),
HandleInput(
name="llm",
display_name="LLM",
input_types=["LanguageModel"],
MultilineInput(
name="user_prompt", display_name="Prompt", info="This prompt must contain 'input' key.", value="{input}"
),
]
outputs = [
Output(display_name="Text", name="text_output", method="run_agent"),
]
async def run_agent(self) -> Message:
def creat_agent_runnable(self):
if "input" not in self.user_prompt:
raise ValueError("Prompt must contain 'input' key.")
messages = [
("system", self.system_prompt),
(
"placeholder",
"{chat_history}",
),
("human", self.user_prompt),
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=["input"], template=self.user_prompt)),
("placeholder", "{agent_scratchpad}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
agent = create_tool_calling_agent(self.llm, self.tools, prompt)
runnable = AgentExecutor.from_agent_and_tools(
agent=cast(BaseSingleActionAgent, agent),
tools=self.tools,
verbose=True,
handle_parsing_errors=self.handle_parsing_errors,
)
input_dict: dict[str, str | list[Dict[str, str]]] = {"input": self.input_value}
if hasattr(self, "memory") and self.memory:
input_dict["chat_history"] = self.convert_chat_history(self.memory)
result = await runnable.ainvoke(input_dict)
if "output" not in result:
raise ValueError("Output key not found in result. Tried 'output'.")
results = result["output"]
if isinstance(results, list):
result_string = "\n".join([r["text"] for r in results if "text" in r and r.get("type") == "text"])
else:
result_string = results
self.status = result_string
return Message(text=result_string)
def convert_chat_history(self, chat_history: List[Data]) -> List[Dict[str, str]]:
messages = []
for item in chat_history:
role = "user" if item.sender == "User" else "assistant"
messages.append({"role": role, "content": item.text})
return messages
return create_tool_calling_agent(self.llm, self.tools, prompt)

View file

@ -1,26 +1,19 @@
from typing import Callable, Union
from langchain.agents import AgentExecutor, create_vectorstore_agent
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreToolkit
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
from langflow.base.agents.agent import LCAgentComponent
from langflow.inputs import HandleInput
class VectorStoreAgentComponent(CustomComponent):
class VectorStoreAgentComponent(LCAgentComponent):
display_name = "VectorStoreAgent"
description = "Construct an agent from a Vector Store."
name = "VectorStoreAgent"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"vector_store_toolkit": {"display_name": "Vector Store Info"},
}
inputs = LCAgentComponent._base_inputs + [
HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True),
HandleInput(name="vectorstore", display_name="Vector Store", input_types=["VectorStoreInfo"], required=True),
]
def build(
self,
llm: LanguageModel,
vector_store_toolkit: VectorStoreToolkit,
) -> Union[AgentExecutor, Callable]:
return create_vectorstore_agent(llm=llm, toolkit=vector_store_toolkit)
def build_agent(self) -> AgentExecutor:
toolkit = VectorStoreToolkit(vectorstore_info=self.vectorstore, llm=self.llm)
return create_vectorstore_agent(llm=self.llm, toolkit=toolkit, **self.get_agent_kwargs())

View file

@ -1,22 +1,27 @@
from typing import Callable
from langchain.agents import create_vectorstore_router_agent
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
from langflow.field_typing import LanguageModel
from langflow.custom import CustomComponent
from langflow.base.agents.agent import LCAgentComponent
from langchain.agents import AgentExecutor
from langflow.inputs import HandleInput
class VectorStoreRouterAgentComponent(CustomComponent):
class VectorStoreRouterAgentComponent(LCAgentComponent):
display_name = "VectorStoreRouterAgent"
description = "Construct an agent from a Vector Store Router."
name = "VectorStoreRouterAgent"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"vectorstoreroutertoolkit": {"display_name": "Vector Store Router Toolkit"},
}
inputs = LCAgentComponent._base_inputs + [
HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True),
HandleInput(
name="vectorstores",
display_name="Vector Stores",
input_types=["VectorStoreInfo"],
is_list=True,
required=True,
),
]
def build(self, llm: LanguageModel, vectorstoreroutertoolkit: VectorStoreRouterToolkit) -> Callable:
return create_vectorstore_router_agent(llm=llm, toolkit=vectorstoreroutertoolkit)
def build_agent(self) -> AgentExecutor:
toolkit = VectorStoreRouterToolkit(vectorstores=self.vectorstores, llm=self.llm)
return create_vectorstore_router_agent(llm=self.llm, toolkit=toolkit, **self.get_agent_kwargs())

View file

@ -1,111 +1,52 @@
from typing import List, Optional
from langchain.agents import create_xml_agent
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate
from langflow.base.agents.agent import LCAgentComponent
from langflow.field_typing import LanguageModel, Text, Tool
from langflow.schema import Data
from langflow.base.agents.agent import LCToolsAgentComponent
from langflow.inputs import MultilineInput
class XMLAgentComponent(LCAgentComponent):
display_name = "XMLAgent"
description = "Construct an XML agent from an LLM and tools."
class XMLAgentComponent(LCToolsAgentComponent):
display_name: str = "XML Agent"
description: str = "Agent that uses tools formatting instructions as xml to the Language Model."
icon = "LangChain"
beta = True
name = "XMLAgent"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"tools": {"display_name": "Tools"},
"user_prompt": {
"display_name": "Prompt",
"multiline": True,
"info": "This prompt must contain 'tools' and 'agent_scratchpad' keys.",
"value": """You are a helpful assistant. Help the user answer any questions.
inputs = LCToolsAgentComponent._base_inputs + [
MultilineInput(
name="user_prompt",
display_name="Prompt",
value="""
You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
You have access to the following tools:
{tools}
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<tool>search</tool><tool_input>weather in SF</tool_input>
<final_answer>The weather in SF is 64 degrees</final_answer>
<observation>64 degrees</observation>
Begin!
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
Previous Conversation:
{chat_history}
<final_answer>The weather in SF is 64 degrees</final_answer>
Question: {input}
{agent_scratchpad}""",
},
"system_message": {
"display_name": "System Message",
"info": "System message to be passed to the LLM.",
"advanced": True,
},
"tool_template": {
"display_name": "Tool Template",
"info": "Template for rendering tools in the prompt. Tools have 'name' and 'description' keys.",
"advanced": True,
},
"handle_parsing_errors": {
"display_name": "Handle Parsing Errors",
"info": "If True, the agent will handle parsing errors. If False, the agent will raise an error.",
"advanced": True,
},
"message_history": {
"display_name": "Message History",
"info": "Message history to pass to the agent.",
},
"input_value": {
"display_name": "Inputs",
"info": "Input text to pass to the agent.",
},
}
Begin!
async def build(
self,
input_value: str,
llm: LanguageModel,
tools: List[Tool],
user_prompt: str = "{input}",
system_message: str = "You are a helpful assistant",
message_history: Optional[List[Data]] = None,
tool_template: str = "{name}: {description}",
handle_parsing_errors: bool = True,
) -> Text:
if "input" not in user_prompt:
raise ValueError("Prompt must contain 'input' key.")
Question: {input}
def render_tool_description(tools):
return "\n".join(
[tool_template.format(name=tool.name, description=tool.description, args=tool.args) for tool in tools]
)
{agent_scratchpad}
""",
),
]
def creat_agent_runnable(self):
messages = [
("system", system_message),
(
"placeholder",
"{chat_history}",
),
("human", user_prompt),
("placeholder", "{agent_scratchpad}"),
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=["input"], template=self.user_prompt))
]
prompt = ChatPromptTemplate.from_messages(messages)
agent = create_xml_agent(llm, tools, prompt, tools_renderer=render_tool_description)
result = await self.run_agent(
agent=agent,
inputs=input_value,
tools=tools,
message_history=message_history,
handle_parsing_errors=handle_parsing_errors,
)
self.status = result
return result
return create_xml_agent(self.llm, self.tools, prompt)

View file

@ -1,186 +0,0 @@
from typing import Any, List, Optional, cast
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.prompts.chat import HumanMessagePromptTemplate, SystemMessagePromptTemplate
from langflow.base.agents.agent import LCAgentComponent
from langflow.base.agents.utils import AGENTS, AgentSpec, get_agents_list
from langflow.field_typing import LanguageModel, Text, Tool
from langflow.schema import Data
from langflow.schema.dotdict import dotdict
class AgentComponent(LCAgentComponent):
display_name = "Agent"
description = "Run any LangChain agent using a simplified interface."
field_order = [
"agent_name",
"llm",
"tools",
"prompt",
"tool_template",
"handle_parsing_errors",
"memory",
"input_value",
]
name = "AgentComponent"
def build_config(self):
return {
"agent_name": {
"display_name": "Agent",
"info": "The agent to use.",
"refresh_button": True,
"real_time_refresh": True,
"options": get_agents_list(),
},
"llm": {"display_name": "LLM"},
"tools": {"display_name": "Tools"},
"user_prompt": {
"display_name": "Prompt",
"multiline": True,
"info": "This prompt must contain 'tools' and 'agent_scratchpad' keys.",
},
"system_message": {
"display_name": "System Message",
"info": "System message to be passed to the LLM.",
"advanced": True,
},
"tool_template": {
"display_name": "Tool Template",
"info": "Template for rendering tools in the prompt. Tools have 'name' and 'description' keys.",
"advanced": True,
},
"handle_parsing_errors": {
"display_name": "Handle Parsing Errors",
"info": "If True, the agent will handle parsing errors. If False, the agent will raise an error.",
"advanced": True,
},
"message_history": {
"display_name": "Message History",
"info": "Message history to pass to the agent.",
},
"input_value": {
"display_name": "Input",
"info": "Input text to pass to the agent.",
},
"langchain_hub_api_key": {
"display_name": "LangChain Hub API Key",
"info": "API key to use for LangChain Hub. If provided, prompts will be fetched from LangChain Hub.",
"advanced": True,
},
}
def get_system_and_user_message_from_prompt(self, prompt: Any):
"""
Extracts the system message and user prompt from a given prompt object.
Args:
prompt (Any): The prompt object from which to extract the system message and user prompt.
Returns:
Tuple[Optional[str], Optional[str]]: A tuple containing the system message and user prompt.
If the prompt object does not have any messages, both values will be None.
"""
if hasattr(prompt, "messages"):
system_message = None
user_prompt = None
for message in prompt.messages:
if isinstance(message, SystemMessagePromptTemplate):
s_prompt = message.prompt
if isinstance(s_prompt, list):
s_template = " ".join([cast(str, s.template) for s in s_prompt if hasattr(s, "template")])
elif hasattr(s_prompt, "template"):
s_template = s_prompt.template
system_message = s_template
elif isinstance(message, HumanMessagePromptTemplate):
h_prompt = message.prompt
if isinstance(h_prompt, list):
h_template = " ".join([cast(str, h.template) for h in h_prompt if hasattr(h, "template")])
elif hasattr(h_prompt, "template"):
h_template = h_prompt.template
user_prompt = h_template
return system_message, user_prompt
return None, None
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
"""
Updates the build configuration based on the provided field value and field name.
Args:
build_config (dotdict): The build configuration to be updated.
field_value (Any): The value of the field being updated.
field_name (Text | None, optional): The name of the field being updated. Defaults to None.
Returns:
dotdict: The updated build configuration.
"""
if field_name == "agent":
build_config["agent"]["options"] = get_agents_list()
if field_value in AGENTS:
# if langchain_hub_api_key is provided, fetch the prompt from LangChain Hub
if build_config["langchain_hub_api_key"]["value"] and AGENTS[field_value].hub_repo:
from langchain import hub
hub_repo: str | None = AGENTS[field_value].hub_repo
if hub_repo:
hub_api_key: str = build_config["langchain_hub_api_key"]["value"]
prompt = hub.pull(hub_repo, api_key=hub_api_key)
system_message, user_prompt = self.get_system_and_user_message_from_prompt(prompt)
if system_message:
build_config["system_message"]["value"] = system_message
if user_prompt:
build_config["user_prompt"]["value"] = user_prompt
if AGENTS[field_value].prompt:
build_config["user_prompt"]["value"] = AGENTS[field_value].prompt
else:
build_config["user_prompt"]["value"] = "{input}"
fields = AGENTS[field_value].fields
for field in ["llm", "tools", "prompt", "tools_renderer"]:
if field not in fields:
build_config[field]["show"] = False
return build_config
async def build(
self,
agent_name: str,
input_value: str,
llm: LanguageModel,
tools: List[Tool],
system_message: str = "You are a helpful assistant. Help the user answer any questions.",
user_prompt: str = "{input}",
message_history: Optional[List[Data]] = None,
tool_template: str = "{name}: {description}",
handle_parsing_errors: bool = True,
) -> Text:
agent_spec: Optional[AgentSpec] = AGENTS.get(agent_name)
if agent_spec is None:
raise ValueError(f"{agent_name} not found.")
def render_tool_description(tools):
return "\n".join(
[tool_template.format(name=tool.name, description=tool.description, args=tool.args) for tool in tools]
)
messages = [
("system", system_message),
(
"placeholder",
"{chat_history}",
),
("human", user_prompt),
("placeholder", "{agent_scratchpad}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
agent_func = agent_spec.func
agent = agent_func(llm, tools, prompt, render_tool_description, True)
result = await self.run_agent(
agent=agent,
inputs=input_value,
tools=tools,
message_history=message_history,
handle_parsing_errors=handle_parsing_errors,
)
self.status = result
return result

View file

@ -1,4 +1,3 @@
from .AgentComponent import AgentComponent
from .ExtractKeyFromData import ExtractKeyFromDataComponent
from .ListFlows import ListFlowsComponent
from .MergeData import MergeDataComponent
@ -6,7 +5,6 @@ from .SelectivePassThrough import SelectivePassThroughComponent
from .SubFlow import SubFlowComponent
__all__ = [
"AgentComponent",
"ConditionalRouterComponent",
"ExtractKeyFromDataComponent",
"FlowToolComponent",

View file

@ -1,43 +1,44 @@
from langchain_core.runnables import Runnable
from langflow.custom import CustomComponent
from langflow.field_typing import Text
from langflow.custom import Component
from langflow.inputs import HandleInput, MessageTextInput
from langflow.schema.message import Message
from langflow.template import Output
class RunnableExecComponent(CustomComponent):
class RunnableExecComponent(Component):
description = "Execute a runnable. It will try to guess the input and output keys."
display_name = "Runnable Executor"
name = "RunnableExecutor"
beta: bool = True
field_order = [
"input_key",
"output_key",
"input_value",
"runnable",
inputs = [
MessageTextInput(name="input_value", display_name="Input", required=True),
HandleInput(
name="runnable",
display_name="Agent Executor",
input_types=["Chain", "AgentExecutor", "Agent", "Runnable"],
required=True,
),
MessageTextInput(
name="input_key",
display_name="Input Key",
value="input",
advanced=True,
),
MessageTextInput(
name="output_key",
display_name="Output Key",
value="output",
advanced=True,
),
]
def build_config(self):
return {
"input_key": {
"display_name": "Input Key",
"info": "The key to use for the input.",
"advanced": True,
},
"input_value": {
"display_name": "Inputs",
"info": "The inputs to pass to the runnable.",
},
"runnable": {
"display_name": "Runnable",
"info": "The runnable to execute.",
"input_types": ["Chain", "AgentExecutor", "Agent", "Runnable"],
},
"output_key": {
"display_name": "Output Key",
"info": "The key to use for the output.",
"advanced": True,
},
}
outputs = [
Output(
display_name="Text",
name="text",
method="build_executor",
),
]
def get_output(self, result, input_key, output_key):
"""
@ -107,16 +108,10 @@ class RunnableExecComponent(CustomComponent):
status = f"Warning: The input key is not '{input_key}'. The input key is '{runnable.input_keys}'."
return input_dict, status
def build(
self,
input_value: Text,
runnable: Runnable,
input_key: str = "input",
output_key: str = "output",
) -> Text:
input_dict, status = self.get_input_dict(runnable, input_key, input_value)
result = runnable.invoke(input_dict)
result_value, _status = self.get_output(result, input_key, output_key)
def build_executor(self) -> Message:
input_dict, status = self.get_input_dict(self.runnable, self.input_key, self.input_value)
result = self.runnable.invoke(input_dict)
result_value, _status = self.get_output(result, self.input_key, self.output_key)
status += _status
status += f"\n\nOutput: {result_value}\n\nRaw Output: {result}"
self.status = status

View file

@ -1,30 +0,0 @@
from pathlib import Path
import yaml
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
from langchain_community.tools.json.tool import JsonSpec
from langflow.custom import CustomComponent
class JsonToolkitComponent(CustomComponent):
display_name = "JsonToolkit"
description = "Toolkit for interacting with a JSON spec."
name = "JsonToolkit"
def build_config(self):
return {
"path": {
"display_name": "Path",
"field_type": "file",
"file_types": ["json", "yaml", "yml"],
},
}
def build(self, path: str) -> JsonToolkit:
if path.endswith("yaml") or path.endswith("yml"):
yaml_dict = yaml.load(open(path, "r"), Loader=yaml.FullLoader)
spec = JsonSpec(dict_=yaml_dict)
else:
spec = JsonSpec.from_file(Path(path))
return JsonToolkit(spec=spec)

View file

@ -1,35 +0,0 @@
from pathlib import Path
import yaml
from langchain_community.agent_toolkits.openapi.toolkit import BaseToolkit, OpenAPIToolkit
from langchain_community.tools.json.tool import JsonSpec
from langchain_community.utilities.requests import TextRequestsWrapper
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class OpenAPIToolkitComponent(CustomComponent):
display_name = "OpenAPIToolkit"
description = "Toolkit for interacting with an OpenAPI API."
name = "OpenAPIToolkit"
def build_config(self):
return {
"json_agent": {"display_name": "JSON Agent"},
"requests_wrapper": {"display_name": "Text Requests Wrapper"},
}
def build(self, llm: LanguageModel, path: str, allow_dangerous_requests: bool = False) -> BaseToolkit:
if path.endswith("yaml") or path.endswith("yml"):
yaml_dict = yaml.load(open(path, "r"), Loader=yaml.FullLoader)
spec = JsonSpec(dict_=yaml_dict)
else:
spec = JsonSpec.from_file(Path(path))
requests_wrapper = TextRequestsWrapper()
return OpenAPIToolkit.from_llm(
llm=llm,
json_spec=spec,
requests_wrapper=requests_wrapper,
allow_dangerous_requests=allow_dangerous_requests,
)

View file

@ -1,25 +1,44 @@
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
from langchain_core.vectorstores import VectorStore
from langflow.custom import CustomComponent
from langflow.custom import Component
from langflow.inputs import HandleInput, MultilineInput, MessageTextInput
from langflow.template import Output
class VectorStoreInfoComponent(CustomComponent):
class VectorStoreInfoComponent(Component):
display_name = "VectorStoreInfo"
description = "Information about a VectorStore"
name = "VectorStoreInfo"
def build_config(self):
return {
"vectorstore": {"display_name": "VectorStore"},
"description": {"display_name": "Description", "multiline": True},
"name": {"display_name": "Name"},
}
inputs = [
MessageTextInput(
name="vectorstore_name",
display_name="Name",
info="Name of the VectorStore",
required=True,
),
MultilineInput(
name="vectorstore_description",
display_name="Description",
info="Description of the VectorStore",
required=True,
),
HandleInput(
name="input_vectorstore",
display_name="Vector Store",
input_types=["VectorStore"],
required=True,
),
]
def build(
self,
vectorstore: VectorStore,
description: str,
name: str,
) -> VectorStoreInfo:
return VectorStoreInfo(vectorstore=vectorstore, description=description, name=name)
outputs = [
Output(display_name="Vector Store Info", name="info", method="build_info"),
]
def build_info(self) -> VectorStoreInfo:
self.status = {
"name": self.vectorstore_name,
"description": self.vectorstore_description,
}
return VectorStoreInfo(
vectorstore=self.input_vectorstore, description=self.vectorstore_description, name=self.vectorstore_name
)

View file

@ -1,23 +0,0 @@
from typing import List, Union
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo, VectorStoreRouterToolkit
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel, Tool
class VectorStoreRouterToolkitComponent(CustomComponent):
display_name = "VectorStoreRouterToolkit"
description = "Toolkit for routing between Vector Stores."
name = "VectorStoreRouterToolkit"
def build_config(self):
return {
"vectorstores": {"display_name": "Vector Stores"},
"llm": {"display_name": "LLM"},
}
def build(self, vectorstores: List[VectorStoreInfo], llm: LanguageModel) -> Union[Tool, VectorStoreRouterToolkit]:
print("vectorstores", vectorstores)
print("llm", llm)
return VectorStoreRouterToolkit(vectorstores=vectorstores, llm=llm)

View file

@ -1,25 +0,0 @@
from typing import Union
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo, VectorStoreToolkit
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel, Tool
class VectorStoreToolkitComponent(CustomComponent):
display_name = "VectorStoreToolkit"
description = "Toolkit for interacting with a Vector Store."
name = "VectorStoreToolkit"
def build_config(self):
return {
"vectorstore_info": {"display_name": "Vector Store Info"},
"llm": {"display_name": "LLM"},
}
def build(
self,
vectorstore_info: VectorStoreInfo,
llm: LanguageModel,
) -> Union[Tool, VectorStoreToolkit]:
return VectorStoreToolkit(vectorstore_info=vectorstore_info, llm=llm)

View file

@ -1,15 +1,7 @@
from .JsonToolkit import JsonToolkitComponent
from .Metaphor import MetaphorToolkit
from .OpenAPIToolkit import OpenAPIToolkitComponent
from .VectorStoreInfo import VectorStoreInfoComponent
from .VectorStoreRouterToolkit import VectorStoreRouterToolkitComponent
from .VectorStoreToolkit import VectorStoreToolkitComponent
__all__ = [
"JsonToolkitComponent",
"MetaphorToolkit",
"OpenAPIToolkitComponent",
"VectorStoreInfoComponent",
"VectorStoreRouterToolkitComponent",
"VectorStoreToolkitComponent",
]