diff --git a/docs/docs/components/tools.mdx b/docs/docs/components/tools.mdx
index 76ce93a01..c92d6eee0 100644
--- a/docs/docs/components/tools.mdx
+++ b/docs/docs/components/tools.mdx
@@ -6,4 +6,58 @@ import Admonition from '@theme/Admonition';
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
-
\ No newline at end of file
+
+
+
+### BingSearchRun
+
+Bing Search is a web search engine owned and operated by Microsoft. It provides search results for various types of content, including web pages, images, videos, and news articles. It uses a combination of algorithms and human editors to deliver search results to users.
+
+**Params**
+
+- **Api Wrapper:** A BingSearchAPIWrapper component that takes the search URL and a subscription key.
+
+
+### Calculator
+
+The calculator tool provides mathematical calculation capabilities to an agent by leveraging an LLMMathChain. It allows the agent to perform math when needed to answer questions.
+
+**Params**
+
+- **LLM:** Language Model to use in the calculation.
+
+
+### GoogleSearchResults
+
+A wrapper around Google Search. Useful for when the user needs to answer questions about with more control over the JSON data returned from the API. It returns the full JSON response configured based on the parameters passed to the API wrapper.
+
+**Params**
+
+- **Api Wrapper:** A GoogleSearchAPIWrapper with Google API key and CSE ID
+
+
+### GoogleSearchRun
+
+A quick wrapper around Google Search. It executes the search query and returns just the first result snippet from the highest-priority result type.
+
+**Params**
+
+- **Api Wrapper:** A GoogleSearchAPIWrapper with Google API key and CSE ID
+
+
+### GoogleSerperRun
+
+A low-cost Google Search API.
+
+**Params**
+
+- **Api Wrapper:** A GoogleSerperAPIWrapper component with API key and result keys
+
+
+### InfoSQLDatabaseTool
+
+Tool for getting metadata about a SQL database. The input to this tool is a comma-separated list of tables, and the output is the schema and sample rows for those tables. Example Input: `“table1`, `table2`, `table3”`.
+
+**Params**
+
+- **Db:** SQLDatabase to query.
\ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
index cf9357228..bccd237cb 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -7351,17 +7351,6 @@ files = [
{file = "validators-0.21.0.tar.gz", hash = "sha256:245b98ab778ed9352a7269c6a8f6c2a839bed5b2a7e3e60273ce399d247dd4b3"},
]
-[[package]]
-name = "vine"
-version = "5.0.0"
-description = "Promises, promises, promises."
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "vine-5.0.0-py2.py3-none-any.whl", hash = "sha256:4c9dceab6f76ed92105027c49c823800dd33cacce13bdedc5b914e3514b7fb30"},
- {file = "vine-5.0.0.tar.gz", hash = "sha256:7d3b1624a953da82ef63462013bbd271d3eb75751489f9807598e8f340bd637e"},
-]
-
[[package]]
name = "watchfiles"
version = "0.19.0"
diff --git a/pyproject.toml b/pyproject.toml
index b3e7170f8..6dbe201ab 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
-version = "0.4.7"
+version = "0.4.10"
description = "A Python package with a built-in web application"
authors = ["Logspace "]
maintainers = [
diff --git a/src/backend/langflow/components/agents/OpenAIConversationalAgent.py b/src/backend/langflow/components/agents/OpenAIConversationalAgent.py
new file mode 100644
index 000000000..e2b876978
--- /dev/null
+++ b/src/backend/langflow/components/agents/OpenAIConversationalAgent.py
@@ -0,0 +1,82 @@
+from langflow import CustomComponent
+from typing import Optional
+from langchain.prompts import SystemMessagePromptTemplate
+from langchain.tools import Tool
+from langchain.schema.memory import BaseMemory
+from langchain.chat_models import ChatOpenAI
+
+from langchain.agents.agent import AgentExecutor
+from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
+from langchain.memory.token_buffer import ConversationTokenBufferMemory
+from langchain.prompts.chat import MessagesPlaceholder
+from langchain.agents.agent_toolkits.conversational_retrieval.openai_functions import (
+ _get_default_system_message,
+)
+
+
+class ConversationalAgent(CustomComponent):
+ display_name: str = "OpenAI Conversational Agent"
+ description: str = "Conversational Agent that can use OpenAI's function calling API"
+
+ def build_config(self):
+ openai_function_models = [
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k-0613",
+ "gpt-4-0613",
+ "gpt-4-32k-0613",
+ ]
+ return {
+ "tools": {"is_list": True, "display_name": "Tools"},
+ "memory": {"display_name": "Memory"},
+ "system_message": {"display_name": "System Message"},
+ "max_token_limit": {"display_name": "Max Token Limit"},
+ "model_name": {
+ "display_name": "Model Name",
+ "options": openai_function_models,
+ "value": openai_function_models[0],
+ },
+ "code": {"show": False},
+ }
+
+ def build(
+ self,
+ model_name: str,
+ openai_api_key: str,
+ openai_api_base: str,
+ tools: Tool,
+ memory: Optional[BaseMemory] = None,
+ system_message: Optional[SystemMessagePromptTemplate] = None,
+ max_token_limit: int = 2000,
+ ) -> AgentExecutor:
+ llm = ChatOpenAI(
+ model=model_name,
+ openai_api_key=openai_api_key,
+ openai_api_base=openai_api_base,
+ )
+ if not memory:
+ memory_key = "chat_history"
+ memory = ConversationTokenBufferMemory(
+ memory_key=memory_key,
+ return_messages=True,
+ output_key="output",
+ llm=llm,
+ max_token_limit=max_token_limit,
+ )
+ else:
+ memory_key = memory.memory_key # type: ignore
+
+ _system_message = system_message or _get_default_system_message()
+ prompt = OpenAIFunctionsAgent.create_prompt(
+ system_message=_system_message, # type: ignore
+ extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
+ )
+ agent = OpenAIFunctionsAgent(
+ llm=llm, tools=tools, prompt=prompt # type: ignore
+ )
+ return AgentExecutor(
+ agent=agent,
+ tools=tools, # type: ignore
+ memory=memory,
+ verbose=True,
+ return_intermediate_steps=True,
+ )
diff --git a/src/backend/langflow/components/agents/__init__.py b/src/backend/langflow/components/agents/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/backend/langflow/graph/utils.py b/src/backend/langflow/graph/utils.py
index b78b2f961..e163d76c4 100644
--- a/src/backend/langflow/graph/utils.py
+++ b/src/backend/langflow/graph/utils.py
@@ -3,6 +3,10 @@ from typing import Any, Union
from langflow.interface.utils import extract_input_variables_from_prompt
+class UnbuiltObject:
+ pass
+
+
def validate_prompt(prompt: str):
"""Validate prompt."""
if extract_input_variables_from_prompt(prompt):
diff --git a/src/backend/langflow/graph/vertex/base.py b/src/backend/langflow/graph/vertex/base.py
index 39cf79bd3..87c8fa851 100644
--- a/src/backend/langflow/graph/vertex/base.py
+++ b/src/backend/langflow/graph/vertex/base.py
@@ -1,4 +1,5 @@
import ast
+from langflow.graph.utils import UnbuiltObject
from langflow.interface.initialize import loading
from langflow.interface.listing import lazy_load_dict
from langflow.utils.constants import DIRECT_TYPES
@@ -25,7 +26,7 @@ class Vertex:
self.edges: List["Edge"] = []
self.base_type: Optional[str] = base_type
self._parse_data()
- self._built_object = None
+ self._built_object = UnbuiltObject()
self._built = False
self.artifacts: Dict[str, Any] = {}
self.task_id: Optional[str] = None
@@ -271,8 +272,14 @@ class Vertex:
"""
Checks if the built object is None and raises a ValueError if so.
"""
- if self._built_object is None:
- raise ValueError(f"Node type {self.vertex_type} not found")
+ if isinstance(self._built_object, UnbuiltObject):
+ raise ValueError(f"{self.vertex_type}: {self._built_object_repr()}")
+ elif self._built_object is None:
+ message = f"{self.vertex_type} returned None."
+ if self.base_type == "custom_components":
+ message += " Make sure your build method returns a component."
+
+ raise ValueError(message)
def build(self, force: bool = False) -> Any:
if not self._built or force:
diff --git a/src/backend/langflow/graph/vertex/types.py b/src/backend/langflow/graph/vertex/types.py
index ab3e54e0b..597998515 100644
--- a/src/backend/langflow/graph/vertex/types.py
+++ b/src/backend/langflow/graph/vertex/types.py
@@ -226,7 +226,12 @@ class PromptVertex(Vertex):
# so the prompt format doesn't break
artifacts.pop("handle_keys", None)
try:
- template = self._built_object.template
+ if not hasattr(self._built_object, "template") and hasattr(
+ self._built_object, "prompt"
+ ):
+ template = self._built_object.prompt.template
+ else:
+ template = self._built_object.template
for key, value in artifacts.items():
if value:
replace_key = "{" + key + "}"
diff --git a/src/backend/langflow/interface/custom/constants.py b/src/backend/langflow/interface/custom/constants.py
index 83cf4b463..39a1e9f3b 100644
--- a/src/backend/langflow/interface/custom/constants.py
+++ b/src/backend/langflow/interface/custom/constants.py
@@ -8,10 +8,13 @@ from langchain.text_splitter import TextSplitter
from langchain.tools import Tool
from langchain.vectorstores.base import VectorStore
from langchain.schema import BaseOutputParser
-
+from langchain.schema.memory import BaseMemory
+from langchain.memory.chat_memory import BaseChatMemory
+from langchain.agents.agent import AgentExecutor
LANGCHAIN_BASE_TYPES = {
"Chain": Chain,
+ "AgentExecutor": AgentExecutor,
"Tool": Tool,
"BaseLLM": BaseLLM,
"PromptTemplate": PromptTemplate,
@@ -22,6 +25,8 @@ LANGCHAIN_BASE_TYPES = {
"Embeddings": Embeddings,
"BaseRetriever": BaseRetriever,
"BaseOutputParser": BaseOutputParser,
+ "BaseMemory": BaseMemory,
+ "BaseChatMemory": BaseChatMemory,
}
# Langchain base types plus Python base types
diff --git a/src/backend/langflow/interface/custom/custom_component.py b/src/backend/langflow/interface/custom/custom_component.py
index 4b3b11ed0..b1b1a2080 100644
--- a/src/backend/langflow/interface/custom/custom_component.py
+++ b/src/backend/langflow/interface/custom/custom_component.py
@@ -51,8 +51,8 @@ class CustomComponent(Component, extra=Extra.allow):
for type_hint in TYPE_HINT_LIST:
if reader._is_type_hint_used_in_args(
- "Optional", code
- ) and not reader._is_type_hint_imported("Optional", code):
+ type_hint, code
+ ) and not reader._is_type_hint_imported(type_hint, code):
error_detail = {
"error": "Type hint Error",
"traceback": f"Type hint '{type_hint}' is used but not imported in the code.",
diff --git a/src/backend/langflow/interface/initialize/utils.py b/src/backend/langflow/interface/initialize/utils.py
index 976d8906c..ceb8a53a1 100644
--- a/src/backend/langflow/interface/initialize/utils.py
+++ b/src/backend/langflow/interface/initialize/utils.py
@@ -51,7 +51,9 @@ def handle_partial_variables(prompt, format_kwargs: Dict):
}
# Remove handle_keys otherwise LangChain raises an error
partial_variables.pop("handle_keys", None)
- return prompt.partial(**partial_variables)
+ if partial_variables and hasattr(prompt, "partial"):
+ return prompt.partial(**partial_variables)
+ return prompt
def handle_variable(params: Dict, input_variable: str, format_kwargs: Dict):
diff --git a/src/backend/langflow/processing/base.py b/src/backend/langflow/processing/base.py
index f1d7b6e56..13ff6a385 100644
--- a/src/backend/langflow/processing/base.py
+++ b/src/backend/langflow/processing/base.py
@@ -5,6 +5,7 @@ from langflow.api.v1.callback import (
)
from langflow.processing.process import fix_memory_inputs, format_actions
from langflow.utils.logger import logger
+from langchain.agents.agent import AgentExecutor
async def get_result_and_steps(langchain_object, inputs: Union[dict, str], **kwargs):
@@ -20,7 +21,8 @@ async def get_result_and_steps(langchain_object, inputs: Union[dict, str], **kwa
# to display intermediate steps
langchain_object.return_intermediate_steps = True
try:
- fix_memory_inputs(langchain_object)
+ if not isinstance(langchain_object, AgentExecutor):
+ fix_memory_inputs(langchain_object)
except Exception as exc:
logger.error(f"Error fixing memory inputs: {exc}")
diff --git a/src/backend/langflow/services/settings/base.py b/src/backend/langflow/services/settings/base.py
index 071e6a296..c5a7e9bcd 100644
--- a/src/backend/langflow/services/settings/base.py
+++ b/src/backend/langflow/services/settings/base.py
@@ -138,12 +138,17 @@ class Settings(BaseSettings):
value = json.loads(str(value))
if isinstance(value, list):
for item in value:
+ if isinstance(item, Path):
+ item = str(item)
if item not in getattr(self, key):
getattr(self, key).append(item)
logger.debug(f"Extended {key}")
else:
- getattr(self, key).append(value)
- logger.debug(f"Appended {key}")
+ if isinstance(value, Path):
+ value = str(value)
+ if value not in getattr(self, key):
+ getattr(self, key).append(value)
+ logger.debug(f"Appended {key}")
else:
setattr(self, key, value)
diff --git a/src/frontend/src/App.tsx b/src/frontend/src/App.tsx
index 0088081ae..467e15ed8 100644
--- a/src/frontend/src/App.tsx
+++ b/src/frontend/src/App.tsx
@@ -9,7 +9,7 @@ import ErrorAlert from "./alerts/error";
import NoticeAlert from "./alerts/notice";
import SuccessAlert from "./alerts/success";
import CrashErrorComponent from "./components/CrashErrorComponent";
-import Header from "./components/headerComponent";
+import LoadingComponent from "./components/loadingComponent";
import { alertContext } from "./contexts/alertContext";
import { locationContext } from "./contexts/locationContext";
import { TabsContext } from "./contexts/tabsContext";
@@ -25,6 +25,7 @@ export default function App() {
setIsStackedOpen(true);
}, [location.pathname, setCurrent, setIsStackedOpen, setShowSideBar]);
const { hardReset } = useContext(TabsContext);
+
const {
errorData,
errorOpen,
@@ -35,6 +36,7 @@ export default function App() {
successData,
successOpen,
setSuccessOpen,
+ loading,
} = useContext(alertContext);
// Initialize state variable for the list of alerts
@@ -133,8 +135,15 @@ export default function App() {
}}
FallbackComponent={CrashErrorComponent}
>
-
-
+ {loading ? (
+