Changing to ShadUI, Edge Tooltips and Advanced Dialog

This commit is contained in:
Cristhian Zanforlin Lousa 2023-06-08 12:05:32 -03:00
commit c9b68f6f7a
96 changed files with 3741 additions and 3686 deletions

View file

@ -54,4 +54,4 @@ jobs:
token: ${{ secrets.SERVE_GITHUB_TOKEN }}
repository: jina-ai/langchain-serve
event-type: langflow-push
client-payload: '{"push_token": "${{ secrets.LCSERVE_PUSH_TOKEN }}", "branch": "dev"}'
client-payload: '{"push_token": "${{ secrets.LCSERVE_PUSH_TOKEN }}", "branch": "main"}'

View file

@ -43,6 +43,7 @@ install_backend:
poetry install
backend:
make install_backend
poetry run uvicorn langflow.main:app --port 7860 --reload --log-level debug
build_frontend:
@ -59,7 +60,7 @@ lcserve_push:
make build_frontend
@version=$$(poetry version --short); \
lc-serve push --app langflow.lcserve:app --app-dir . \
--image-name langflow --image-tag $${version} --verbose
--image-name langflow --image-tag $${version} --verbose --public
lcserve_deploy:
@:$(if $(uses),,$(error `uses` is not set. Please run `make uses=... lcserve_deploy`))

1374
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.0.78"
version = "0.0.86"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -22,14 +22,14 @@ langflow = "langflow.__main__:main"
[tool.poetry.dependencies]
python = ">=3.9,<3.12"
fastapi = "^0.95.0"
fastapi = "^0.96.0"
uvicorn = "^0.20.0"
beautifulsoup4 = "^4.11.2"
google-search-results = "^2.4.1"
google-api-python-client = "^2.79.0"
typer = "^0.7.0"
gunicorn = "^20.1.0"
langchain = "^0.0.184"
langchain = "^0.0.194"
openai = "^0.27.7"
types-pyyaml = "^6.0.12.8"
dill = "^0.3.6"
@ -49,7 +49,7 @@ psycopg2-binary = "^2.9.6"
pyarrow = "^11.0.0"
tiktoken = "^0.3.3"
wikipedia = "^1.4.0"
langchain-serve = { version = "^0.0.38", optional = true }
langchain-serve = { version = ">0.0.39", optional = true }
qdrant-client = "^1.2.0"
websockets = "^11.0.3"
weaviate-client = "^3.19.2"
@ -57,6 +57,8 @@ jina = "3.15.2"
sentence-transformers = "^2.2.2"
ctransformers = "^0.2.2"
cohere = "^4.6.0"
faiss-cpu = "^1.7.4"
anthropic = "^0.2.9"
[tool.poetry.group.dev.dependencies]
@ -76,6 +78,15 @@ types-pillow = "^9.5.0.2"
[tool.poetry.extras]
deploy = ["langchain-serve"]
[tool.pytest.ini_options]
minversion = "6.0"
addopts = "-ra"
testpaths = ["tests", "integration"]
console_output_style = "progress"
filterwarnings = ["ignore::DeprecationWarning"]
log_cli = true
[tool.ruff]
line-length = 120

View file

@ -1,4 +1,4 @@
from langflow.cache import cache_manager
from langflow.interface.loading import load_flow_from_json
from langflow.processing.process import load_flow_from_json
__all__ = ["load_flow_from_json", "cache_manager"]

View file

@ -0,0 +1,3 @@
from langflow.api.router import router
__all__ = ["router"]

View file

@ -0,0 +1,8 @@
# Router for base api
from fastapi import APIRouter
from langflow.api.v1 import chat_router, endpoints_router, validate_router
router = APIRouter(prefix="/api/v1", tags=["api"])
router.include_router(chat_router)
router.include_router(endpoints_router)
router.include_router(validate_router)

View file

@ -0,0 +1,5 @@
from langflow.api.v1.endpoints import router as endpoints_router
from langflow.api.v1.validate import router as validate_router
from langflow.api.v1.chat import router as chat_router
__all__ = ["chat_router", "endpoints_router", "validate_router"]

View file

@ -1,6 +1,6 @@
from pydantic import BaseModel, validator
from langflow.graph.utils import extract_input_variables_from_prompt
from langflow.interface.utils import extract_input_variables_from_prompt
class CacheResponse(BaseModel):

View file

@ -3,7 +3,7 @@ from typing import Any
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langflow.api.schemas import ChatResponse
from langflow.api.v1.schemas import ChatResponse
# https://github.com/hwchase17/chat-langchain/blob/master/callback.py

View file

@ -6,7 +6,7 @@ from fastapi import (
status,
)
from langflow.api.chat_manager import ChatManager
from langflow.chat.manager import ChatManager
from langflow.utils.logger import logger
router = APIRouter()

View file

@ -3,13 +3,13 @@ from importlib.metadata import version
from fastapi import APIRouter, HTTPException
from langflow.api.schemas import (
from langflow.api.v1.schemas import (
ExportedFlow,
GraphData,
PredictRequest,
PredictResponse,
)
from langflow.interface.run import process_graph_cached
from langflow.interface.types import build_langchain_types_dict
# build router
@ -25,6 +25,8 @@ def get_all():
@router.post("/predict", response_model=PredictResponse)
async def get_load(predict_request: PredictRequest):
try:
from langflow.processing.process import process_graph_cached
exported_flow: ExportedFlow = predict_request.exported_flow
graph_data: GraphData = exported_flow.data
data = graph_data.dict()
@ -40,8 +42,3 @@ async def get_load(predict_request: PredictRequest):
@router.get("/version")
def get_version():
return {"version": version("langflow")}
@router.get("/health")
def get_health():
return {"status": "OK"}

View file

@ -2,15 +2,15 @@ import json
from fastapi import APIRouter, HTTPException
from langflow.api.base import (
from langflow.api.v1.base import (
Code,
CodeValidationResponse,
Prompt,
PromptValidationResponse,
validate_prompt,
)
from langflow.graph.nodes import VectorStoreNode
from langflow.interface.run import build_graph
from langflow.graph.vertex.types import VectorStoreVertex
from langflow.graph import Graph
from langflow.utils.logger import logger
from langflow.utils.validate import validate_code
@ -44,12 +44,12 @@ def post_validate_prompt(prompt: Prompt):
def post_validate_node(node_id: str, data: dict):
try:
# build graph
graph = build_graph(data)
graph = Graph.from_payload(data)
# validate node
node = graph.get_node(node_id)
if node is None:
raise ValueError(f"Node {node_id} not found")
if not isinstance(node, VectorStoreNode):
if not isinstance(node, VectorStoreVertex):
node.build()
return json.dumps({"valid": True, "params": str(node._built_object_repr())})
except Exception as e:

View file

View file

@ -1,21 +1,18 @@
import asyncio
import json
from collections import defaultdict
from typing import Dict, List
from fastapi import WebSocket, status
from langflow.api.schemas import ChatMessage, ChatResponse, FileResponse
from langflow.api.v1.schemas import ChatMessage, ChatResponse, FileResponse
from langflow.cache import cache_manager
from langflow.cache.manager import Subject
from langflow.interface.run import (
get_result_and_steps,
load_or_build_langchain_object,
)
from langflow.interface.utils import pil_to_base64, try_setting_streaming_options
from langflow.chat.utils import process_graph
from langflow.interface.utils import pil_to_base64
from langflow.utils.logger import logger
import asyncio
import json
from typing import Dict, List
class ChatHistory(Subject):
def __init__(self):
super().__init__()
@ -191,33 +188,3 @@ class ChatManager:
except Exception as e:
logger.exception(e)
self.disconnect(client_id)
async def process_graph(
graph_data: Dict,
is_first_message: bool,
chat_message: ChatMessage,
websocket: WebSocket,
):
langchain_object = load_or_build_langchain_object(graph_data, is_first_message)
langchain_object = try_setting_streaming_options(langchain_object, websocket)
logger.debug("Loaded langchain object")
if langchain_object is None:
# Raise user facing error
raise ValueError(
"There was an error loading the langchain_object. Please, check all the nodes and try again."
)
# Generate result and thought
try:
logger.debug("Generating result and thought")
result, intermediate_steps = await get_result_and_steps(
langchain_object, chat_message.message or "", websocket=websocket
)
logger.debug("Generated result and intermediate_steps")
return result, intermediate_steps
except Exception as e:
# Log stack trace
logger.exception(e)
raise e

View file

@ -0,0 +1,41 @@
from fastapi import WebSocket
from langflow.api.v1.schemas import ChatMessage
from langflow.processing.process import (
load_or_build_langchain_object,
)
from langflow.processing.base import get_result_and_steps
from langflow.interface.utils import try_setting_streaming_options
from langflow.utils.logger import logger
from typing import Dict
async def process_graph(
graph_data: Dict,
is_first_message: bool,
chat_message: ChatMessage,
websocket: WebSocket,
):
langchain_object = load_or_build_langchain_object(graph_data, is_first_message)
langchain_object = try_setting_streaming_options(langchain_object, websocket)
logger.debug("Loaded langchain object")
if langchain_object is None:
# Raise user facing error
raise ValueError(
"There was an error loading the langchain_object. Please, check all the nodes and try again."
)
# Generate result and thought
try:
logger.debug("Generating result and thought")
result, intermediate_steps = await get_result_and_steps(
langchain_object, chat_message.message or "", websocket=websocket
)
logger.debug("Generated result and intermediate_steps")
return result, intermediate_steps
except Exception as e:
# Log stack trace
logger.exception(e)
raise e

View file

@ -51,11 +51,14 @@ embeddings:
llms:
- OpenAI
# - AzureOpenAI
# - AzureChatOpenAI
- ChatOpenAI
- HuggingFaceHub
- LlamaCpp
- CTransformers
- Cohere
- Anthropic
- ChatAnthropic
- HuggingFaceHub
memories:
- ConversationBufferMemory
- ConversationSummaryMemory
@ -74,12 +77,14 @@ toolkits:
- JsonToolkit
- VectorStoreInfo
- VectorStoreRouterToolkit
- VectorStoreToolkit
tools:
- Search
- PAL-MATH
- Calculator
- Serper Search
- Tool
- PythonFunctionTool
- PythonFunction
- JsonSpec
- News API
@ -119,6 +124,7 @@ vectorstores:
- Chroma
- Qdrant
- Weaviate
- FAISS
wrappers:
- RequestsWrapper
# - ChatPromptTemplate

View file

@ -4,6 +4,7 @@ from langflow.template import frontend_node
CUSTOM_NODES = {
"prompts": {"ZeroShotPrompt": frontend_node.prompts.ZeroShotPromptNode()},
"tools": {
"PythonFunctionTool": frontend_node.tools.PythonFunctionToolNode(),
"PythonFunction": frontend_node.tools.PythonFunctionNode(),
"Tool": frontend_node.tools.ToolNode(),
},

View file

@ -1,4 +1,35 @@
from langflow.graph.base import Edge, Node
from langflow.graph.graph import Graph
from langflow.graph.edge.base import Edge
from langflow.graph.graph.base import Graph
from langflow.graph.vertex.base import Vertex
from langflow.graph.vertex.types import (
AgentVertex,
ChainVertex,
DocumentLoaderVertex,
EmbeddingVertex,
LLMVertex,
MemoryVertex,
PromptVertex,
TextSplitterVertex,
ToolVertex,
ToolkitVertex,
VectorStoreVertex,
WrapperVertex,
)
__all__ = ["Graph", "Node", "Edge"]
__all__ = [
"Graph",
"Vertex",
"Edge",
"AgentVertex",
"ChainVertex",
"DocumentLoaderVertex",
"EmbeddingVertex",
"LLMVertex",
"MemoryVertex",
"PromptVertex",
"TextSplitterVertex",
"ToolVertex",
"ToolkitVertex",
"VectorStoreVertex",
"WrapperVertex",
]

View file

@ -0,0 +1,52 @@
from langflow.utils.logger import logger
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langflow.graph.vertex.base import Vertex
class Edge:
def __init__(self, source: "Vertex", target: "Vertex"):
self.source: "Vertex" = source
self.target: "Vertex" = target
self.validate_edge()
def validate_edge(self) -> None:
# Validate that the outputs of the source node are valid inputs
# for the target node
self.source_types = self.source.output
self.target_reqs = self.target.required_inputs + self.target.optional_inputs
# Both lists contain strings and sometimes a string contains the value we are
# looking for e.g. comgin_out=["Chain"] and target_reqs=["LLMChain"]
# so we need to check if any of the strings in source_types is in target_reqs
self.valid = any(
output in target_req
for output in self.source_types
for target_req in self.target_reqs
)
# Get what type of input the target node is expecting
self.matched_type = next(
(
output
for output in self.source_types
for target_req in self.target_reqs
if output in target_req
),
None,
)
no_matched_type = self.matched_type is None
if no_matched_type:
logger.debug(self.source_types)
logger.debug(self.target_reqs)
if no_matched_type:
raise ValueError(
f"Edge between {self.source.vertex_type} and {self.target.vertex_type} "
f"has no matched type"
)
def __repr__(self) -> str:
return (
f"Edge(source={self.source.id}, target={self.target.id}, valid={self.valid}"
f", matched_type={self.matched_type})"
)

View file

@ -1,38 +1,20 @@
from typing import Dict, List, Type, Union
from langflow.graph.base import Edge, Node
from langflow.graph.nodes import (
AgentNode,
ChainNode,
DocumentLoaderNode,
EmbeddingNode,
FileToolNode,
LLMNode,
MemoryNode,
PromptNode,
TextSplitterNode,
ToolkitNode,
ToolNode,
VectorStoreNode,
WrapperNode,
from langflow.graph.edge.base import Edge
from langflow.graph.graph.constants import VERTEX_TYPE_MAP
from langflow.graph.vertex.base import Vertex
from langflow.graph.vertex.types import (
FileToolVertex,
LLMVertex,
ToolkitVertex,
)
from langflow.interface.agents.base import agent_creator
from langflow.interface.chains.base import chain_creator
from langflow.interface.document_loaders.base import documentloader_creator
from langflow.interface.embeddings.base import embedding_creator
from langflow.interface.llms.base import llm_creator
from langflow.interface.memories.base import memory_creator
from langflow.interface.prompts.base import prompt_creator
from langflow.interface.text_splitters.base import textsplitter_creator
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.tools.constants import FILE_TOOLS
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
from langflow.utils import payload
class Graph:
"""A class representing a graph of nodes and edges."""
def __init__(
self,
nodes: List[Dict[str, Union[str, Dict[str, Union[str, List[str]]]]]],
@ -42,8 +24,30 @@ class Graph:
self._edges = edges
self._build_graph()
@classmethod
@classmethod
def from_payload(cls, payload: Dict) -> "Graph":
"""
Creates a graph from a payload.
Args:
payload (Dict): The payload to create the graph from.
Returns:
Graph: The created graph.
"""
if "data" in payload:
payload = payload["data"]
try:
nodes = payload["nodes"]
edges = payload["edges"]
return cls(nodes, edges)
except KeyError as exc:
raise ValueError("Invalid payload") from exc
def _build_graph(self) -> None:
self.nodes = self._build_nodes()
"""Builds the graph from the nodes and edges."""
self.nodes = self._build_vertices()
self.edges = self._build_edges()
for edge in self.edges:
edge.source.add_edge(edge)
@ -51,17 +55,25 @@ class Graph:
# This is a hack to make sure that the LLM node is sent to
# the toolkit node
self._build_node_params()
# remove invalid nodes
self._remove_invalid_nodes()
def _build_node_params(self) -> None:
"""Identifies and handles the LLM node within the graph."""
llm_node = None
for node in self.nodes:
node._build_params()
if isinstance(node, LLMNode):
if isinstance(node, LLMVertex):
llm_node = node
for node in self.nodes:
if isinstance(node, ToolkitNode):
node.params["llm"] = llm_node
# remove invalid nodes
if llm_node:
for node in self.nodes:
if isinstance(node, ToolkitVertex):
node.params["llm"] = llm_node
def _remove_invalid_nodes(self) -> None:
"""Removes invalid nodes from the graph."""
self.nodes = [
node
for node in self.nodes
@ -69,28 +81,33 @@ class Graph:
or (len(self.nodes) == 1 and len(self.edges) == 0)
]
def _validate_node(self, node: Node) -> bool:
def _validate_node(self, node: Vertex) -> bool:
"""Validates a node."""
# All nodes that do not have edges are invalid
return len(node.edges) > 0
def get_node(self, node_id: str) -> Union[None, Node]:
def get_node(self, node_id: str) -> Union[None, Vertex]:
"""Returns a node by id."""
return next((node for node in self.nodes if node.id == node_id), None)
def get_nodes_with_target(self, node: Node) -> List[Node]:
connected_nodes: List[Node] = [
def get_nodes_with_target(self, node: Vertex) -> List[Vertex]:
"""Returns the nodes connected to a node."""
connected_nodes: List[Vertex] = [
edge.source for edge in self.edges if edge.target == node
]
return connected_nodes
def build(self) -> List[Node]:
def build(self) -> List[Vertex]:
"""Builds the graph."""
# Get root node
root_node = payload.get_root_node(self)
if root_node is None:
raise ValueError("No root node found")
return root_node.build()
def get_node_neighbors(self, node: Node) -> Dict[Node, int]:
neighbors: Dict[Node, int] = {}
def get_node_neighbors(self, node: Vertex) -> Dict[Vertex, int]:
"""Returns the neighbors of a node."""
neighbors: Dict[Vertex, int] = {}
for edge in self.edges:
if edge.source == node:
neighbor = edge.target
@ -105,6 +122,7 @@ class Graph:
return neighbors
def _build_edges(self) -> List[Edge]:
"""Builds the edges of the graph."""
# Edge takes two nodes as arguments, so we need to build the nodes first
# and then build the edges
# if we can't find a node, we raise an error
@ -120,43 +138,31 @@ class Graph:
edges.append(Edge(source, target))
return edges
def _get_node_class(self, node_type: str, node_lc_type: str) -> Type[Node]:
node_type_map: Dict[str, Type[Node]] = {
**{t: PromptNode for t in prompt_creator.to_list()},
**{t: AgentNode for t in agent_creator.to_list()},
**{t: ChainNode for t in chain_creator.to_list()},
**{t: ToolNode for t in tool_creator.to_list()},
**{t: ToolkitNode for t in toolkits_creator.to_list()},
**{t: WrapperNode for t in wrapper_creator.to_list()},
**{t: LLMNode for t in llm_creator.to_list()},
**{t: MemoryNode for t in memory_creator.to_list()},
**{t: EmbeddingNode for t in embedding_creator.to_list()},
**{t: VectorStoreNode for t in vectorstore_creator.to_list()},
**{t: DocumentLoaderNode for t in documentloader_creator.to_list()},
**{t: TextSplitterNode for t in textsplitter_creator.to_list()},
}
def _get_vertex_class(self, node_type: str, node_lc_type: str) -> Type[Vertex]:
"""Returns the node class based on the node type."""
if node_type in FILE_TOOLS:
return FileToolNode
if node_type in node_type_map:
return node_type_map[node_type]
if node_lc_type in node_type_map:
return node_type_map[node_lc_type]
return Node
return FileToolVertex
if node_type in VERTEX_TYPE_MAP:
return VERTEX_TYPE_MAP[node_type]
return (
VERTEX_TYPE_MAP[node_lc_type] if node_lc_type in VERTEX_TYPE_MAP else Vertex
)
def _build_nodes(self) -> List[Node]:
nodes: List[Node] = []
def _build_vertices(self) -> List[Vertex]:
"""Builds the vertices of the graph."""
nodes: List[Vertex] = []
for node in self._nodes:
node_data = node["data"]
node_type: str = node_data["type"] # type: ignore
node_lc_type: str = node_data["node"]["template"]["_type"] # type: ignore
NodeClass = self._get_node_class(node_type, node_lc_type)
nodes.append(NodeClass(node))
VertexClass = self._get_vertex_class(node_type, node_lc_type)
nodes.append(VertexClass(node))
return nodes
def get_children_by_node_type(self, node: Node, node_type: str) -> List[Node]:
def get_children_by_node_type(self, node: Vertex, node_type: str) -> List[Vertex]:
"""Returns the children of a node based on the node type."""
children = []
node_types = [node.data["type"]]
if "node" in node.data:

View file

@ -0,0 +1,49 @@
from langflow.graph.vertex.base import Vertex
from langflow.graph.vertex.types import (
AgentVertex,
ChainVertex,
DocumentLoaderVertex,
EmbeddingVertex,
LLMVertex,
MemoryVertex,
PromptVertex,
TextSplitterVertex,
ToolVertex,
ToolkitVertex,
VectorStoreVertex,
WrapperVertex,
)
from langflow.interface.agents.base import agent_creator
from langflow.interface.chains.base import chain_creator
from langflow.interface.document_loaders.base import documentloader_creator
from langflow.interface.embeddings.base import embedding_creator
from langflow.interface.llms.base import llm_creator
from langflow.interface.memories.base import memory_creator
from langflow.interface.prompts.base import prompt_creator
from langflow.interface.text_splitters.base import textsplitter_creator
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
from typing import Dict, Type
DIRECT_TYPES = ["str", "bool", "code", "int", "float", "Any", "prompt"]
VERTEX_TYPE_MAP: Dict[str, Type[Vertex]] = {
**{t: PromptVertex for t in prompt_creator.to_list()},
**{t: AgentVertex for t in agent_creator.to_list()},
**{t: ChainVertex for t in chain_creator.to_list()},
**{t: ToolVertex for t in tool_creator.to_list()},
**{t: ToolkitVertex for t in toolkits_creator.to_list()},
**{t: WrapperVertex for t in wrapper_creator.to_list()},
**{t: LLMVertex for t in llm_creator.to_list()},
**{t: MemoryVertex for t in memory_creator.to_list()},
**{t: EmbeddingVertex for t in embedding_creator.to_list()},
**{t: VectorStoreVertex for t in vectorstore_creator.to_list()},
**{t: DocumentLoaderVertex for t in documentloader_creator.to_list()},
**{t: TextSplitterVertex for t in textsplitter_creator.to_list()},
}

View file

@ -1,4 +1,6 @@
import re
from typing import Any, Union
from langflow.interface.utils import extract_input_variables_from_prompt
def validate_prompt(prompt: str):
@ -14,6 +16,12 @@ def fix_prompt(prompt: str):
return prompt + " {input}"
def extract_input_variables_from_prompt(prompt: str) -> list[str]:
"""Extract input variables from prompt."""
return re.findall(r"{(.*?)}", prompt)
def flatten_list(list_of_lists: list[Union[list, Any]]) -> list:
"""Flatten list of lists."""
new_list = []
for item in list_of_lists:
if isinstance(item, list):
new_list.extend(item)
else:
new_list.append(item)
return new_list

View file

@ -1,28 +1,27 @@
# Description: Graph class for building a graph of nodes and edges
# Insights:
# - Defer prompts building to the last moment or when they have all the tools
# - Build each inner agent first, then build the outer agent
import contextlib
import inspect
import types
import warnings
from copy import deepcopy
from typing import Any, Dict, List, Optional
from langflow.cache import base as cache_utils
from langflow.graph.constants import DIRECT_TYPES
from langflow.graph.vertex.constants import DIRECT_TYPES
from langflow.interface import loading
from langflow.interface.listing import ALL_TYPES_DICT
from langflow.utils.logger import logger
from langflow.utils.util import sync_to_async
class Node:
import contextlib
import inspect
import types
import warnings
from typing import Any, Dict, List, Optional
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langflow.graph.edge.base import Edge
class Vertex:
def __init__(self, data: Dict, base_type: Optional[str] = None) -> None:
self.id: str = data["id"]
self._data = data
self.edges: List[Edge] = []
self.edges: List["Edge"] = []
self.base_type: Optional[str] = base_type
self._parse_data()
self._built_object = None
@ -49,12 +48,12 @@ class Node:
]
template_dict = self.data["node"]["template"]
self.node_type = (
self.vertex_type = (
self.data["type"] if "Tool" not in self.output else template_dict["_type"]
)
if self.base_type is None:
for base_type, value in ALL_TYPES_DICT.items():
if self.node_type in value:
if self.vertex_type in value:
self.base_type = base_type
break
@ -114,7 +113,7 @@ class Node:
if value["required"] and not edges:
# If a required parameter is not found, raise an error
raise ValueError(
f"Required input {key} for module {self.node_type} not found"
f"Required input {key} for module {self.vertex_type} not found"
)
elif value["list"]:
# If this is a list parameter, append all sources to a list
@ -129,7 +128,7 @@ class Node:
# so we need to check if value has value
new_value = value.get("value")
if new_value is None:
warnings.warn(f"Value for {key} in {self.node_type} is None. ")
warnings.warn(f"Value for {key} in {self.vertex_type} is None. ")
if value.get("type") == "int":
with contextlib.suppress(TypeError, ValueError):
new_value = int(new_value) # type: ignore
@ -149,12 +148,12 @@ class Node:
# and continue
# Another aspect is that the node_type is the class that we need to import
# and instantiate with these built params
logger.debug(f"Building {self.node_type}")
logger.debug(f"Building {self.vertex_type}")
# Build each node in the params dict
for key, value in self.params.copy().items():
# Check if Node or list of Nodes and not self
# to avoid recursion
if isinstance(value, Node):
if isinstance(value, Vertex):
if value == self:
del self.params[key]
continue
@ -175,10 +174,16 @@ class Node:
# turn result which is a function into a coroutine
# so that it can be awaited
self.params["coroutine"] = sync_to_async(result)
if isinstance(result, list):
# If the result is a list, then we need to extend the list
# with the result but first check if the key exists
# if it doesn't, then we need to create a new list
if isinstance(self.params[key], list):
self.params[key].extend(result)
self.params[key] = result
elif isinstance(value, list) and all(
isinstance(node, Node) for node in value
isinstance(node, Vertex) for node in value
):
self.params[key] = []
for node in value:
@ -194,17 +199,17 @@ class Node:
try:
self._built_object = loading.instantiate_class(
node_type=self.node_type,
node_type=self.vertex_type,
base_type=self.base_type,
params=self.params,
)
except Exception as exc:
raise ValueError(
f"Error building node {self.node_type}: {str(exc)}"
f"Error building node {self.vertex_type}: {str(exc)}"
) from exc
if self._built_object is None:
raise ValueError(f"Node type {self.node_type} not found")
raise ValueError(f"Node type {self.vertex_type} not found")
self._built = True
@ -212,19 +217,7 @@ class Node:
if not self._built or force:
self._build()
#! Deepcopy is breaking for vectorstores
if self.base_type in [
"vectorstores",
"VectorStoreRouterAgent",
"VectorStoreAgent",
"VectorStoreInfo",
] or self.node_type in [
"VectorStoreInfo",
"VectorStoreRouterToolkit",
"SQLDatabase",
]:
return self._built_object
return deepcopy(self._built_object)
return self._built_object
def add_edge(self, edge: "Edge") -> None:
self.edges.append(edge)
@ -233,57 +226,10 @@ class Node:
return f"Node(id={self.id}, data={self.data})"
def __eq__(self, __o: object) -> bool:
return self.id == __o.id if isinstance(__o, Node) else False
return self.id == __o.id if isinstance(__o, Vertex) else False
def __hash__(self) -> int:
return id(self)
def _built_object_repr(self):
return repr(self._built_object)
class Edge:
def __init__(self, source: "Node", target: "Node"):
self.source: "Node" = source
self.target: "Node" = target
self.validate_edge()
def validate_edge(self) -> None:
# Validate that the outputs of the source node are valid inputs
# for the target node
self.source_types = self.source.output
self.target_reqs = self.target.required_inputs + self.target.optional_inputs
# Both lists contain strings and sometimes a string contains the value we are
# looking for e.g. comgin_out=["Chain"] and target_reqs=["LLMChain"]
# so we need to check if any of the strings in source_types is in target_reqs
self.valid = any(
output in target_req
for output in self.source_types
for target_req in self.target_reqs
)
# Get what type of input the target node is expecting
self.matched_type = next(
(
output
for output in self.source_types
for target_req in self.target_reqs
if output in target_req
),
None,
)
no_matched_type = self.matched_type is None
if no_matched_type:
logger.debug(self.source_types)
logger.debug(self.target_reqs)
if no_matched_type:
raise ValueError(
f"Edge between {self.source.node_type} and {self.target.node_type} "
f"has no matched type"
)
def __repr__(self) -> str:
return (
f"Edge(source={self.source.id}, target={self.target.id}, valid={self.valid}"
f", matched_type={self.matched_type})"
)

View file

@ -1,22 +1,23 @@
from typing import Any, Dict, List, Optional, Union
from langflow.graph.base import Node
from langflow.graph.utils import extract_input_variables_from_prompt
from langflow.graph.vertex.base import Vertex
from langflow.graph.utils import flatten_list
from langflow.interface.utils import extract_input_variables_from_prompt
class AgentNode(Node):
class AgentVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="agents")
self.tools: List[ToolNode] = []
self.chains: List[ChainNode] = []
self.tools: List[Union[ToolkitVertex, ToolVertex]] = []
self.chains: List[ChainVertex] = []
def _set_tools_and_chains(self) -> None:
for edge in self.edges:
source_node = edge.source
if isinstance(source_node, ToolNode):
if isinstance(source_node, (ToolVertex, ToolkitVertex)):
self.tools.append(source_node)
elif isinstance(source_node, ChainNode):
elif isinstance(source_node, ChainVertex):
self.chains.append(source_node)
def build(self, force: bool = False) -> Any:
@ -32,25 +33,130 @@ class AgentNode(Node):
self._build()
#! Cannot deepcopy VectorStore, VectorStoreRouter, or SQL agents
if self.node_type in ["VectorStoreAgent", "VectorStoreRouterAgent", "SQLAgent"]:
return self._built_object
return self._built_object
class ToolNode(Node):
class ToolVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="tools")
class PromptNode(Node):
class LLMVertex(Vertex):
built_node_type = None
class_built_object = None
def __init__(self, data: Dict):
super().__init__(data, base_type="llms")
def build(self, force: bool = False) -> Any:
# LLM is different because some models might take up too much memory
# or time to load. So we only load them when we need them.ß
if self.vertex_type == self.built_node_type:
return self.class_built_object
if not self._built or force:
self._build()
self.built_node_type = self.vertex_type
self.class_built_object = self._built_object
# Avoid deepcopying the LLM
# that are loaded from a file
return self._built_object
class ToolkitVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="toolkits")
class FileToolVertex(ToolVertex):
def __init__(self, data: Dict):
super().__init__(data)
class WrapperVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="wrappers")
def build(self, force: bool = False) -> Any:
if not self._built or force:
if "headers" in self.params:
self.params["headers"] = eval(self.params["headers"])
self._build()
return self._built_object
class DocumentLoaderVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="documentloaders")
def _built_object_repr(self):
# This built_object is a list of documents. Maybe we should
# show how many documents are in the list?
if self._built_object:
return f"""{self.vertex_type}({len(self._built_object)} documents)
Documents: {self._built_object[:3]}..."""
return f"{self.vertex_type}()"
class EmbeddingVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="embeddings")
class VectorStoreVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="vectorstores")
def _built_object_repr(self):
return "Vector stores can take time to build. It will build on the first query."
class MemoryVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="memory")
class TextSplitterVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="textsplitters")
def _built_object_repr(self):
# This built_object is a list of documents. Maybe we should
# show how many documents are in the list?
if self._built_object:
return f"""{self.vertex_type}({len(self._built_object)} documents)
\nDocuments: {self._built_object[:3]}..."""
return f"{self.vertex_type}()"
class ChainVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="chains")
def build(
self,
force: bool = False,
tools: Optional[List[Union[ToolkitVertex, ToolVertex]]] = None,
) -> Any:
if not self._built or force:
# Check if the chain requires a PromptVertex
for key, value in self.params.items():
if isinstance(value, PromptVertex):
# Build the PromptVertex, passing the tools if available
self.params[key] = value.build(tools=tools, force=force)
self._build()
return self._built_object
class PromptVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="prompts")
def build(
self,
force: bool = False,
tools: Optional[Union[List[Node], List[ToolNode]]] = None,
tools: Optional[List[Union[ToolkitVertex, ToolVertex]]] = None,
) -> Any:
if not self._built or force:
if (
@ -59,12 +165,16 @@ class PromptNode(Node):
):
self.params["input_variables"] = []
# Check if it is a ZeroShotPrompt and needs a tool
if "ShotPrompt" in self.node_type:
if "ShotPrompt" in self.vertex_type:
tools = (
[tool_node.build() for tool_node in tools]
if tools is not None
else []
)
# flatten the list of tools if it is a list of lists
# first check if it is a list
if tools and isinstance(tools, list) and isinstance(tools[0], list):
tools = flatten_list(tools)
self.params["tools"] = tools
prompt_params = [
key
@ -81,113 +191,3 @@ class PromptNode(Node):
self._build()
return self._built_object
class ChainNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="chains")
def build(
self,
force: bool = False,
tools: Optional[Union[List[Node], List[ToolNode]]] = None,
) -> Any:
if not self._built or force:
# Check if the chain requires a PromptNode
for key, value in self.params.items():
if isinstance(value, PromptNode):
# Build the PromptNode, passing the tools if available
self.params[key] = value.build(tools=tools, force=force)
self._build()
#! Cannot deepcopy SQLDatabaseChain
if self.node_type in ["SQLDatabaseChain"]:
return self._built_object
return self._built_object
class LLMNode(Node):
built_node_type = None
class_built_object = None
def __init__(self, data: Dict):
super().__init__(data, base_type="llms")
def build(self, force: bool = False) -> Any:
# LLM is different because some models might take up too much memory
# or time to load. So we only load them when we need them.ß
if self.node_type == self.built_node_type:
return self.class_built_object
if not self._built or force:
self._build()
self.built_node_type = self.node_type
self.class_built_object = self._built_object
# Avoid deepcopying the LLM
# that are loaded from a file
return self._built_object
class ToolkitNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="toolkits")
class FileToolNode(ToolNode):
def __init__(self, data: Dict):
super().__init__(data)
class WrapperNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="wrappers")
def build(self, force: bool = False) -> Any:
if not self._built or force:
if "headers" in self.params:
self.params["headers"] = eval(self.params["headers"])
self._build()
return self._built_object
class DocumentLoaderNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="documentloaders")
def _built_object_repr(self):
# This built_object is a list of documents. Maybe we should
# show how many documents are in the list?
if self._built_object:
return f"""{self.node_type}({len(self._built_object)} documents)
Documents: {self._built_object[:3]}..."""
return f"{self.node_type}()"
class EmbeddingNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="embeddings")
class VectorStoreNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="vectorstores")
def _built_object_repr(self):
return "Vector stores can take time to build. It will build on the first query."
class MemoryNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="memory")
class TextSplitterNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="textsplitters")
def _built_object_repr(self):
# This built_object is a list of documents. Maybe we should
# show how many documents are in the list?
if self._built_object:
return f"""{self.node_type}({len(self._built_object)} documents)\nDocuments: {self._built_object[:3]}..."""
return f"{self.node_type}()"

View file

@ -1,6 +1,6 @@
from typing import Dict, List, Optional
from langchain.agents import loading
from langchain.agents import types
from langflow.custom.customs import get_custom_nodes
from langflow.interface.agents.custom import CUSTOM_AGENTS
@ -16,7 +16,7 @@ class AgentCreator(LangChainTypeCreator):
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict = loading.AGENT_TO_CLASS
self.type_dict = types.AGENT_TO_CLASS
# Add JsonAgent to the list of agents
for name, agent in CUSTOM_AGENTS.items():
# TODO: validate AgentType

View file

@ -69,7 +69,7 @@ class JsonAgent(CustomAgentExecutor):
@classmethod
def from_toolkit_and_llm(cls, toolkit: JsonToolkit, llm: BaseLanguageModel):
tools = toolkit.get_tools()
tools = toolkit if isinstance(toolkit, list) else toolkit.get_tools()
tool_names = {tool.name for tool in tools}
prompt = ZeroShotAgent.create_prompt(
tools,

View file

@ -5,7 +5,7 @@ from langchain.memory.buffer import ConversationBufferMemory
from langchain.schema import BaseMemory
from pydantic import Field, root_validator
from langflow.graph.utils import extract_input_variables_from_prompt
from langflow.interface.utils import extract_input_variables_from_prompt
DEFAULT_SUFFIX = """"
Current conversation:

View file

@ -11,12 +11,15 @@ from langchain import (
text_splitter,
)
from langchain.agents import agent_toolkits
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import AzureChatOpenAI, ChatOpenAI
from langchain.chat_models import ChatAnthropic
from langflow.interface.importing.utils import import_class
## LLMs
llm_type_to_cls_dict = llms.type_to_cls_dict
llm_type_to_cls_dict["anthropic-chat"] = ChatAnthropic # type: ignore
llm_type_to_cls_dict["azure-chat"] = AzureChatOpenAI # type: ignore
llm_type_to_cls_dict["openai-chat"] = ChatOpenAI # type: ignore
## Chains

View file

@ -9,6 +9,7 @@ from langchain.base_language import BaseLanguageModel
from langchain.chains.base import Chain
from langchain.chat_models.base import BaseChatModel
from langchain.tools import BaseTool
from langflow.utils import validate
def import_module(module_path: str) -> Any:
@ -147,3 +148,10 @@ def import_utility(utility: str) -> Any:
if utility == "SQLDatabase":
return import_class(f"langchain.sql_database.{utility}")
return import_class(f"langchain.utilities.{utility}")
def get_function(code):
"""Get the function"""
function_name = validate.extract_function_name(code)
return validate.create_function(code, function_name)

View file

@ -20,8 +20,7 @@ from langchain.llms.loading import load_llm_from_config
from pydantic import ValidationError
from langflow.interface.agents.custom import CUSTOM_AGENTS
from langflow.interface.importing.utils import import_by_type
from langflow.interface.run import fix_memory_inputs
from langflow.interface.importing.utils import get_function, import_by_type
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.types import get_type_list
from langflow.interface.utils import load_file_into_dict
@ -31,7 +30,7 @@ from langflow.utils import util, validate
def instantiate_class(node_type: str, base_type: str, params: Dict) -> Any:
"""Instantiate class from module type and key, and params"""
params = convert_params_to_sets(params)
params = convert_kwargs(params)
if node_type in CUSTOM_AGENTS:
custom_agent = CUSTOM_AGENTS.get(node_type)
if custom_agent:
@ -50,6 +49,16 @@ def convert_params_to_sets(params):
return params
def convert_kwargs(params):
# if *kwargs are passed as a string, convert to dict
# first find any key that has kwargs in it
kwargs_keys = [key for key in params.keys() if "kwargs" in key]
for key in kwargs_keys:
if isinstance(params[key], str):
params[key] = json.loads(params[key])
return params
def instantiate_based_on_type(class_object, base_type, node_type, params):
if base_type == "agents":
return instantiate_agent(class_object, params)
@ -89,6 +98,10 @@ def instantiate_tool(node_type, class_object, params):
if node_type == "JsonSpec":
params["dict_"] = load_file_into_dict(params.pop("path"))
return class_object(**params)
elif node_type == "PythonFunctionTool":
params["func"] = get_function(params.get("code"))
return class_object(**params)
# For backward compatibility
elif node_type == "PythonFunction":
function_string = params["code"]
if isinstance(function_string, str):
@ -101,8 +114,11 @@ def instantiate_tool(node_type, class_object, params):
def instantiate_toolkit(node_type, class_object, params):
loaded_toolkit = class_object(**params)
if toolkits_creator.has_create_function(node_type):
return load_toolkits_executor(node_type, loaded_toolkit, params)
# Commenting this out for now to use toolkits as normal tools
# if toolkits_creator.has_create_function(node_type):
# return load_toolkits_executor(node_type, loaded_toolkit, params)
if isinstance(loaded_toolkit, BaseToolkit):
return loaded_toolkit.get_tools()
return loaded_toolkit
@ -151,38 +167,6 @@ def instantiate_utility(node_type, class_object, params):
return class_object(**params)
def load_flow_from_json(path: str, build=True):
"""Load flow from json file"""
# This is done to avoid circular imports
from langflow.graph import Graph
with open(path, "r", encoding="utf-8") as f:
flow_graph = json.load(f)
data_graph = flow_graph["data"]
nodes = data_graph["nodes"]
# Substitute ZeroShotPrompt with PromptTemplate
# nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
# Add input variables
# nodes = payload.extract_input_variables(nodes)
# Nodes, edges and root node
edges = data_graph["edges"]
graph = Graph(nodes, edges)
if build:
langchain_object = graph.build()
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
return langchain_object
return graph
def replace_zero_shot_prompt_with_prompt_template(nodes):
"""Replace ZeroShotPrompt with PromptTemplate"""
for node in nodes:

View file

@ -3,7 +3,7 @@ from typing import Dict, List, Optional, Type
from langchain.prompts import PromptTemplate
from pydantic import root_validator
from langflow.graph.utils import extract_input_variables_from_prompt
from langflow.interface.utils import extract_input_variables_from_prompt
# Steps to create a BaseCustomPrompt:
# 1. Create a prompt template that endes with:

View file

@ -1,12 +1,5 @@
import contextlib
import io
from typing import Any, Dict, List, Tuple
from langchain.schema import AgentAction
from langflow.api.callback import AsyncStreamingLLMCallbackHandler, StreamingLLMCallbackHandler # type: ignore
from langflow.cache.base import compute_dict_hash, load_cache, memoize_dict
from langflow.graph.graph import Graph
from langflow.graph import Graph
from langflow.utils.logger import logger
@ -24,15 +17,6 @@ def load_langchain_object(data_graph, is_first_message=False):
return computed_hash, langchain_object
def load_or_build_langchain_object(data_graph, is_first_message=False):
"""
Load langchain object from cache if it exists, otherwise build it.
"""
if is_first_message:
build_langchain_object_with_caching.clear_cache()
return build_langchain_object_with_caching(data_graph)
@memoize_dict(maxsize=10)
def build_langchain_object_with_caching(data_graph):
"""
@ -40,16 +24,10 @@ def build_langchain_object_with_caching(data_graph):
"""
logger.debug("Building langchain object")
graph = build_graph(data_graph)
graph = Graph.from_payload(data_graph)
return graph.build()
def build_graph(data_graph):
nodes = data_graph["nodes"]
edges = data_graph["edges"]
return Graph(nodes, edges)
def build_langchain_object(data_graph):
"""
Build langchain object from data_graph.
@ -66,29 +44,6 @@ def build_langchain_object(data_graph):
return graph.build()
def process_graph_cached(data_graph: Dict[str, Any], message: str):
"""
Process graph by extracting input variables and replacing ZeroShotPrompt
with PromptTemplate,then run the graph and return the result and thought.
"""
# Load langchain object
is_first_message = len(data_graph.get("chatHistory", [])) == 0
langchain_object = load_or_build_langchain_object(data_graph, is_first_message)
logger.debug("Loaded langchain object")
if langchain_object is None:
# Raise user facing error
raise ValueError(
"There was an error loading the langchain_object. Please, check all the nodes and try again."
)
# Generate result and thought
logger.debug("Generating result and thought")
result, thought = get_result_and_thought(langchain_object, message)
logger.debug("Generated result and thought")
return {"result": str(result), "thought": thought.strip()}
def get_memory_key(langchain_object):
"""
Given a LangChain object, this function retrieves the current memory key from the object's memory attribute.
@ -124,147 +79,3 @@ def update_memory_keys(langchain_object, possible_new_mem_key):
langchain_object.memory.input_key = input_key
langchain_object.memory.output_key = output_key
langchain_object.memory.memory_key = possible_new_mem_key
def fix_memory_inputs(langchain_object):
"""
Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the
object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the
get_memory_key function and updates the memory keys using the update_memory_keys function.
"""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
try:
if langchain_object.memory.memory_key in langchain_object.input_variables:
return
except AttributeError:
input_variables = (
langchain_object.prompt.input_variables
if hasattr(langchain_object, "prompt")
else langchain_object.input_keys
)
if langchain_object.memory.memory_key in input_variables:
return
possible_new_mem_key = get_memory_key(langchain_object)
if possible_new_mem_key is not None:
update_memory_keys(langchain_object, possible_new_mem_key)
async def get_result_and_steps(langchain_object, message: str, **kwargs):
"""Get result and thought from extracted json"""
try:
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
chat_input = None
memory_key = ""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
memory_key = langchain_object.memory.memory_key
if hasattr(langchain_object, "input_keys"):
for key in langchain_object.input_keys:
if key not in [memory_key, "chat_history"]:
chat_input = {key: message}
else:
chat_input = message # type: ignore
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = True
fix_memory_inputs(langchain_object)
try:
async_callbacks = [AsyncStreamingLLMCallbackHandler(**kwargs)]
output = await langchain_object.acall(chat_input, callbacks=async_callbacks)
except Exception as exc:
# make the error message more informative
logger.debug(f"Error: {str(exc)}")
sync_callbacks = [StreamingLLMCallbackHandler(**kwargs)]
output = langchain_object(chat_input, callbacks=sync_callbacks)
intermediate_steps = (
output.get("intermediate_steps", []) if isinstance(output, dict) else []
)
result = (
output.get(langchain_object.output_keys[0])
if isinstance(output, dict)
else output
)
thought = format_actions(intermediate_steps) if intermediate_steps else ""
except Exception as exc:
raise ValueError(f"Error: {str(exc)}") from exc
return result, thought
def get_result_and_thought(langchain_object, message: str):
"""Get result and thought from extracted json"""
try:
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
chat_input = None
memory_key = ""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
memory_key = langchain_object.memory.memory_key
if hasattr(langchain_object, "input_keys"):
for key in langchain_object.input_keys:
if key not in [memory_key, "chat_history"]:
chat_input = {key: message}
else:
chat_input = message # type: ignore
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer):
try:
# if hasattr(langchain_object, "acall"):
# output = await langchain_object.acall(chat_input)
# else:
output = langchain_object(chat_input)
except ValueError as exc:
# make the error message more informative
logger.debug(f"Error: {str(exc)}")
output = langchain_object.run(chat_input)
intermediate_steps = (
output.get("intermediate_steps", []) if isinstance(output, dict) else []
)
result = (
output.get(langchain_object.output_keys[0])
if isinstance(output, dict)
else output
)
if intermediate_steps:
thought = format_actions(intermediate_steps)
else:
thought = output_buffer.getvalue()
except Exception as exc:
raise ValueError(f"Error: {str(exc)}") from exc
return result, thought
def format_actions(actions: List[Tuple[AgentAction, str]]) -> str:
"""Format a list of (AgentAction, answer) tuples into a string."""
output = []
for action, answer in actions:
log = action.log
tool = action.tool
tool_input = action.tool_input
output.append(f"Log: {log}")
if "Action" not in log and "Action Input" not in log:
output.append(f"Tool: {tool}")
output.append(f"Tool Input: {tool_input}")
output.append(f"Answer: {answer}")
output.append("") # Add a blank line
return "\n".join(output)

View file

@ -39,7 +39,7 @@ class TextSplitterCreator(LangChainTypeCreator):
"type": "int",
"required": True,
"show": True,
"value": 4000,
"value": 1000,
"name": "chunk_size",
"display_name": "Chunk Size",
}

View file

@ -42,24 +42,27 @@ class ToolkitCreator(LangChainTypeCreator):
def get_signature(self, name: str) -> Optional[Dict]:
try:
return build_template_from_class(name, self.type_to_loader_dict)
template = build_template_from_class(name, self.type_to_loader_dict)
# add Tool to base_classes
if "toolkit" in name.lower() and template:
template["base_classes"].append("Tool")
return template
except ValueError as exc:
raise ValueError("Prompt not found") from exc
raise ValueError("Toolkit not found") from exc
except AttributeError as exc:
logger.error(f"Prompt {name} not loaded: {exc}")
logger.error(f"Toolkit {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return list(self.type_to_loader_dict.keys())
def get_create_function(self, name: str) -> Callable:
if loader_name := self.create_functions.get(name, None):
# import loader
if loader_name := self.create_functions.get(name):
return import_module(
f"from langchain.agents.agent_toolkits import {loader_name[0]}"
)
else:
raise ValueError("Loader not found")
raise ValueError("Toolkit not found")
def has_create_function(self, name: str) -> bool:
# check if the function list is not empty

View file

@ -71,7 +71,8 @@ class ToolCreator(LangChainTypeCreator):
for tool, tool_fcn in ALL_TOOLS_NAMES.items():
tool_params = get_tool_params(tool_fcn)
tool_name = tool_params.get("name", tool)
tool_name = tool_params.get("name") or tool
if tool_name in settings.tools or settings.dev:
if tool_name == "JsonSpec":

View file

@ -9,10 +9,14 @@ from langchain.agents.load_tools import (
from langchain.tools.json.tool import JsonSpec
from langflow.interface.importing.utils import import_class
from langflow.interface.tools.custom import PythonFunction
from langflow.interface.tools.custom import PythonFunctionTool, PythonFunction
FILE_TOOLS = {"JsonSpec": JsonSpec}
CUSTOM_TOOLS = {"Tool": Tool, "PythonFunction": PythonFunction}
CUSTOM_TOOLS = {
"Tool": Tool,
"PythonFunctionTool": PythonFunctionTool,
"PythonFunction": PythonFunction,
}
OTHER_TOOLS = {tool: import_class(f"langchain.tools.{tool}") for tool in tools.__all__}

View file

@ -1,8 +1,10 @@
from typing import Callable, Optional
from langflow.interface.importing.utils import get_function
from pydantic import BaseModel, validator
from langflow.utils import validate
from langchain.agents.tools import Tool
class Function(BaseModel):
@ -31,6 +33,21 @@ class Function(BaseModel):
return validate.create_function(self.code, function_name)
class PythonFunctionTool(Function, Tool):
"""Python function"""
name: str = "Custom Tool"
description: str
code: str
def ___init__(self, name: str, description: str, code: str):
self.name = name
self.description = description
self.code = code
self.func = get_function(self.code)
super().__init__(name=name, description=description, func=self.func)
class PythonFunction(Function):
"""Python function"""

View file

@ -2,6 +2,7 @@ import base64
import json
import os
from io import BytesIO
import re
import yaml
from langchain.base_language import BaseLanguageModel
@ -44,7 +45,16 @@ def try_setting_streaming_options(langchain_object, websocket):
langchain_object.llm_chain, "llm"
):
llm = langchain_object.llm_chain.llm
if isinstance(llm, BaseLanguageModel) and hasattr(llm, "streaming"):
llm.streaming = True
if isinstance(llm, BaseLanguageModel):
if hasattr(llm, "streaming") and isinstance(llm.streaming, bool):
llm.streaming = True
elif hasattr(llm, "stream") and isinstance(llm.stream, bool):
llm.stream = True
return langchain_object
def extract_input_variables_from_prompt(prompt: str) -> list[str]:
"""Extract input variables from prompt."""
return re.findall(r"{(.*?)}", prompt)

View file

@ -1,9 +1,7 @@
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from langflow.api.chat import router as chat_router
from langflow.api.endpoints import router as endpoints_router
from langflow.api.validate import router as validate_router
from langflow.api import router
def create_app():
@ -14,6 +12,10 @@ def create_app():
"*",
]
@app.get("/health")
def get_health():
return {"status": "OK"}
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
@ -22,9 +24,7 @@ def create_app():
allow_headers=["*"],
)
app.include_router(endpoints_router)
app.include_router(validate_router)
app.include_router(chat_router)
app.include_router(router)
return app

View file

@ -0,0 +1,55 @@
from langflow.api.v1.callback import (
AsyncStreamingLLMCallbackHandler,
StreamingLLMCallbackHandler,
)
from langflow.processing.process import fix_memory_inputs, format_actions
from langflow.utils.logger import logger
async def get_result_and_steps(langchain_object, message: str, **kwargs):
"""Get result and thought from extracted json"""
try:
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
chat_input = None
memory_key = ""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
memory_key = langchain_object.memory.memory_key
if hasattr(langchain_object, "input_keys"):
for key in langchain_object.input_keys:
if key not in [memory_key, "chat_history"]:
chat_input = {key: message}
else:
chat_input = message # type: ignore
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = True
fix_memory_inputs(langchain_object)
try:
async_callbacks = [AsyncStreamingLLMCallbackHandler(**kwargs)]
output = await langchain_object.acall(chat_input, callbacks=async_callbacks)
except Exception as exc:
# make the error message more informative
logger.debug(f"Error: {str(exc)}")
sync_callbacks = [StreamingLLMCallbackHandler(**kwargs)]
output = langchain_object(chat_input, callbacks=sync_callbacks)
intermediate_steps = (
output.get("intermediate_steps", []) if isinstance(output, dict) else []
)
result = (
output.get(langchain_object.output_keys[0])
if isinstance(output, dict)
else output
)
thought = format_actions(intermediate_steps) if intermediate_steps else ""
except Exception as exc:
raise ValueError(f"Error: {str(exc)}") from exc
return result, thought

View file

@ -0,0 +1,172 @@
import contextlib
import io
from langchain.schema import AgentAction
import json
from langflow.interface.run import (
build_langchain_object_with_caching,
get_memory_key,
update_memory_keys,
)
from langflow.utils.logger import logger
from langflow.graph import Graph
from typing import Any, Dict, List, Tuple
def fix_memory_inputs(langchain_object):
"""
Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the
object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the
get_memory_key function and updates the memory keys using the update_memory_keys function.
"""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
try:
if langchain_object.memory.memory_key in langchain_object.input_variables:
return
except AttributeError:
input_variables = (
langchain_object.prompt.input_variables
if hasattr(langchain_object, "prompt")
else langchain_object.input_keys
)
if langchain_object.memory.memory_key in input_variables:
return
possible_new_mem_key = get_memory_key(langchain_object)
if possible_new_mem_key is not None:
update_memory_keys(langchain_object, possible_new_mem_key)
def format_actions(actions: List[Tuple[AgentAction, str]]) -> str:
"""Format a list of (AgentAction, answer) tuples into a string."""
output = []
for action, answer in actions:
log = action.log
tool = action.tool
tool_input = action.tool_input
output.append(f"Log: {log}")
if "Action" not in log and "Action Input" not in log:
output.append(f"Tool: {tool}")
output.append(f"Tool Input: {tool_input}")
output.append(f"Answer: {answer}")
output.append("") # Add a blank line
return "\n".join(output)
def get_result_and_thought(langchain_object, message: str):
"""Get result and thought from extracted json"""
try:
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
chat_input = None
memory_key = ""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
memory_key = langchain_object.memory.memory_key
if hasattr(langchain_object, "input_keys"):
for key in langchain_object.input_keys:
if key not in [memory_key, "chat_history"]:
chat_input = {key: message}
else:
chat_input = message # type: ignore
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer):
try:
# if hasattr(langchain_object, "acall"):
# output = await langchain_object.acall(chat_input)
# else:
output = langchain_object(chat_input)
except ValueError as exc:
# make the error message more informative
logger.debug(f"Error: {str(exc)}")
output = langchain_object.run(chat_input)
intermediate_steps = (
output.get("intermediate_steps", []) if isinstance(output, dict) else []
)
result = (
output.get(langchain_object.output_keys[0])
if isinstance(output, dict)
else output
)
if intermediate_steps:
thought = format_actions(intermediate_steps)
else:
thought = output_buffer.getvalue()
except Exception as exc:
raise ValueError(f"Error: {str(exc)}") from exc
return result, thought
def load_or_build_langchain_object(data_graph, is_first_message=False):
"""
Load langchain object from cache if it exists, otherwise build it.
"""
if is_first_message:
build_langchain_object_with_caching.clear_cache()
return build_langchain_object_with_caching(data_graph)
def process_graph_cached(data_graph: Dict[str, Any], message: str):
"""
Process graph by extracting input variables and replacing ZeroShotPrompt
with PromptTemplate,then run the graph and return the result and thought.
"""
# Load langchain object
is_first_message = len(data_graph.get("chatHistory", [])) == 0
langchain_object = load_or_build_langchain_object(data_graph, is_first_message)
logger.debug("Loaded langchain object")
if langchain_object is None:
# Raise user facing error
raise ValueError(
"There was an error loading the langchain_object. Please, check all the nodes and try again."
)
# Generate result and thought
logger.debug("Generating result and thought")
result, thought = get_result_and_thought(langchain_object, message)
logger.debug("Generated result and thought")
return {"result": str(result), "thought": thought.strip()}
def load_flow_from_json(path: str, build=True):
"""Load flow from json file"""
# This is done to avoid circular imports
with open(path, "r", encoding="utf-8") as f:
flow_graph = json.load(f)
data_graph = flow_graph["data"]
nodes = data_graph["nodes"]
# Substitute ZeroShotPrompt with PromptTemplate
# nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
# Add input variables
# nodes = payload.extract_input_variables(nodes)
# Nodes, edges and root node
edges = data_graph["edges"]
graph = Graph(nodes, edges)
if build:
langchain_object = graph.build()
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
return langchain_object
return graph

View file

@ -146,7 +146,7 @@ class CSVAgentNode(FrontendNode):
),
],
)
description: str = """Construct a json agent from a CSV and tools."""
description: str = """Construct a CSV agent from a CSV and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
@ -194,7 +194,7 @@ class InitializeAgentNode(FrontendNode):
),
],
)
description: str = """Construct a json agent from an LLM and tools."""
description: str = """Construct a zero shot agent from an LLM and tools."""
base_classes: list[str] = ["AgentExecutor", "function"]
def to_dict(self):

View file

@ -117,14 +117,30 @@ class FrontendNode(BaseModel):
) -> None:
"""Handles specific field values for certain fields."""
if key == "headers":
field.value = """{'Authorization':
'Bearer <token>'}"""
if name == "OpenAI" and key == "model_name":
field.options = constants.OPENAI_MODELS
field.is_list = True
elif name == "ChatOpenAI" and key == "model_name":
field.options = constants.CHAT_OPENAI_MODELS
field.value = """{'Authorization': 'Bearer <token>'}"""
FrontendNode._handle_model_specific_field_values(field, key, name)
FrontendNode._handle_api_key_specific_field_values(field, key, name)
@staticmethod
def _handle_model_specific_field_values(
field: TemplateField, key: str, name: Optional[str] = None
) -> None:
"""Handles specific field values related to models."""
model_dict = {
"OpenAI": constants.OPENAI_MODELS,
"ChatOpenAI": constants.CHAT_OPENAI_MODELS,
"Anthropic": constants.ANTHROPIC_MODELS,
"ChatAnthropic": constants.ANTHROPIC_MODELS,
}
if name in model_dict and key == "model_name":
field.options = model_dict[name]
field.is_list = True
@staticmethod
def _handle_api_key_specific_field_values(
field: TemplateField, key: str, name: Optional[str] = None
) -> None:
"""Handles specific field values related to API keys."""
if "api_key" in key and "OpenAI" in str(name):
field.display_name = "OpenAI API Key"
field.required = False

View file

@ -12,6 +12,18 @@ class LLMFrontendNode(FrontendNode):
field.name.title().replace("Openai", "OpenAI").replace("_", " ")
).replace("Api", "API")
@staticmethod
def format_azure_field(field: TemplateField):
if field.name == "model_name":
field.show = False # Azure uses deployment_name instead of model_name.
if field.name == "openai_api_type":
field.show = False
field.password = False
field.value = "azure"
if field.name == "openai_api_version":
field.password = False
field.value = "2023-03-15-preview"
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
display_names_dict = {
@ -34,7 +46,8 @@ class LLMFrontendNode(FrontendNode):
field.required = True
field.show = True
field.is_list = True
field.options = ["text-generation", "text2text-generation"]
field.options = ["text-generation", "text2text-generation", "summarization"]
field.value = field.options[0]
field.advanced = True
if display_name := display_names_dict.get(field.name):
@ -43,8 +56,16 @@ class LLMFrontendNode(FrontendNode):
field.field_type = "code"
field.advanced = True
field.show = True
elif field.name in ["model_name", "temperature", "model_file", "model_type"]:
elif field.name in [
"model_name",
"temperature",
"model_file",
"model_type",
"deployment_name",
]:
field.advanced = False
field.show = True
LLMFrontendNode.format_openai_field(field)
if name and "azure" in name.lower():
LLMFrontendNode.format_azure_field(field)

View file

@ -59,6 +59,52 @@ class ToolNode(FrontendNode):
return super().to_dict()
class PythonFunctionToolNode(FrontendNode):
name: str = "PythonFunctionTool"
template: Template = Template(
type_name="PythonFunctionTool",
fields=[
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
value="",
name="name",
advanced=False,
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
value="",
name="description",
advanced=False,
),
TemplateField(
field_type="code",
required=True,
placeholder="",
is_list=False,
show=True,
value=DEFAULT_PYTHON_FUNCTION,
name="code",
advanced=False,
),
],
)
description: str = "Python function to be executed."
base_classes: list[str] = ["Tool"]
def to_dict(self):
return super().to_dict()
class PythonFunctionNode(FrontendNode):
name: str = "PythonFunction"
template: Template = Template(

View file

@ -7,6 +7,20 @@ OPENAI_MODELS = [
]
CHAT_OPENAI_MODELS = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
ANTHROPIC_MODELS = [
"claude-v1", # largest model, ideal for a wide range of more complex tasks.
"claude-v1-100k", # An enhanced version of claude-v1 with a 100,000 token (roughly 75,000 word) context window.
"claude-instant-v1", # A smaller model with far lower latency, sampling at roughly 40 words/sec!
"claude-instant-v1-100k", # Like claude-instant-v1 with a 100,000 token context window but retains its performance.
# Specific sub-versions of the above models:
"claude-v1.3", # Vs claude-v1.2: better instruction-following, code, and non-English dialogue and writing.
"claude-v1.3-100k", # An enhanced version of claude-v1.3 with a 100,000 token (roughly 75,000 word) context window.
"claude-v1.2", # Vs claude-v1.1: small adv in general helpfulness, instruction following, coding, and other tasks.
"claude-v1.0", # An earlier version of claude-v1.
"claude-instant-v1.1", # Latest version of claude-instant-v1. Better than claude-instant-v1.0 at most tasks.
"claude-instant-v1.1-100k", # Version of claude-instant-v1.1 with a 100K token context window.
"claude-instant-v1.0", # An earlier version of claude-instant-v1.
]
DEFAULT_PYTHON_FUNCTION = """
def python_function(text: str) -> str:

View file

@ -302,7 +302,9 @@ def format_dict(d, name: Optional[str] = None):
elif name == "ChatOpenAI" and key == "model_name":
value["options"] = constants.CHAT_OPENAI_MODELS
value["list"] = True
elif (name == "Anthropic" or name == "ChatAnthropic") and key == "model_name":
value["options"] = constants.ANTHROPIC_MODELS
value["list"] = True
return d

File diff suppressed because it is too large Load diff

View file

@ -13,6 +13,7 @@
"@radix-ui/react-label": "^2.0.2",
"@radix-ui/react-slot": "^1.0.2",
"@radix-ui/react-switch": "^1.0.3",
"@radix-ui/react-tooltip": "^1.0.6",
"@tabler/icons-react": "^2.18.0",
"@tailwindcss/forms": "^0.5.3",
"@tailwindcss/line-clamp": "^0.4.4",

View file

@ -172,7 +172,7 @@ export default function App() {
className="absolute left-7 bottom-2 flex h-6 cursor-pointer flex-col items-center justify-start overflow-hidden rounded-lg bg-gray-800 px-2 text-center font-sans text-xs tracking-wide text-gray-300 transition-all duration-500 ease-in-out hover:h-12 dark:bg-gray-100 dark:text-gray-800"
>
{version && <div className="mt-1"> LangFlow v{version}</div>}
<div className="mt-2">Created by Logspace</div>
<div className={version ? "mt-2" : "mt-1"}>Created by Logspace</div>
</a>
</div>
);

View file

@ -1,6 +1,11 @@
import { Handle, Position, useUpdateNodeInternals } from "reactflow";
import Tooltip from "../../../../components/TooltipComponent";
import { classNames, isValidConnection } from "../../../../utils";
import {
classNames,
groupByFamily,
isValidConnection,
toFirstUpperCase,
} from "../../../../utils";
import { useContext, useEffect, useRef, useState } from "react";
import InputComponent from "../../../../components/inputComponent";
import ToggleComponent from "../../../../components/toggleComponent";
@ -15,6 +20,10 @@ import InputFileComponent from "../../../../components/inputFileComponent";
import { TabsContext } from "../../../../contexts/tabsContext";
import IntComponent from "../../../../components/intComponent";
import PromptAreaComponent from "../../../../components/promptComponent";
import { nodeNames, nodeIcons } from "../../../../utils";
import React from "react";
import { nodeColors } from "../../../../utils";
import ShadTooltip from "../../../../components/ShadTooltipComponent";
export default function ParameterComponent({
left,
@ -28,6 +37,7 @@ export default function ParameterComponent({
required = false,
}: ParameterComponentType) {
const ref = useRef(null);
const refHtml = useRef(null);
const updateNodeInternals = useUpdateNodeInternals();
const [position, setPosition] = useState(0);
useEffect(() => {
@ -48,6 +58,48 @@ export default function ParameterComponent({
let disabled =
reactFlowInstance?.getEdges().some((e) => e.targetHandle === id) ?? false;
const { save } = useContext(TabsContext);
const [myData, setMyData] = useState(useContext(typesContext).data);
useEffect(() => {
const groupedObj = groupByFamily(myData, tooltipTitle);
refHtml.current = groupedObj.map((item, i) => (
<span
key={item}
className={classNames(
i > 0 ? "items-center flex mt-3" : "items-center flex"
)}
>
<div
className="h-5 w-5"
style={{
color: nodeColors[item.family],
}}
>
{React.createElement(nodeIcons[item.family])}
</div>
<span className="ps-2 text-gray-950">
{nodeNames[item.family] ?? ""}{" "}
<span className={classNames(left ? "hidden" : "")}>
{" "}
-&nbsp;
{item.type.split(", ").length > 2
? item.type.split(", ").map((el, i) => (
<>
<span key={el}>
{i == item.type.split(", ").length - 1
? el
: (el += `, `)}
</span>
{i % 2 == 0 && i > 0 && <br></br>}
</>
))
: item.type}
</span>
</span>
</span>
));
}, [tooltipTitle]);
return (
<div
@ -69,7 +121,11 @@ export default function ParameterComponent({
type === "int") ? (
<></>
) : (
<Tooltip title={tooltipTitle + (required ? " (required)" : "")}>
<ShadTooltip
delayDuration={0}
content={refHtml.current}
side={left ? "left" : "right"}
>
<Handle
type={left ? "target" : "source"}
position={left ? Position.Left : Position.Right}
@ -86,7 +142,7 @@ export default function ParameterComponent({
top: position,
}}
></Handle>
</Tooltip>
</ShadTooltip>
)}
{left === true &&

View file

@ -28,11 +28,11 @@ import NodeModal from "../../modals/NodeModal";
import { useCallback } from "react";
import { TabsContext } from "../../contexts/tabsContext";
import { debounce } from "../../utils";
import TooltipReact from "../../components/ReactTooltipComponent";
import Tooltip from "../../components/TooltipComponent";
import { NodeToolbar } from "reactflow";
import NodeToolbarComponent from "../../pages/FlowPage/components/nodeToolbarComponent";
import ShadTooltip from "../../components/ShadTooltipComponent";
export default function GenericNode({
data,
selected,
@ -102,39 +102,45 @@ export default function GenericNode({
}
return (
<>
<NodeToolbar>
<NodeToolbarComponent
data={data}
openPopUp={openPopUp}
deleteNode={deleteNode}
></NodeToolbarComponent>
</NodeToolbar>
<div
className={classNames(
selected ? "border border-blue-500" : "border dark:border-gray-700",
"prompt-node relative flex w-96 flex-col justify-center rounded-lg bg-white dark:bg-gray-900"
)}
>
<div className="flex w-full items-center justify-between gap-8 rounded-t-lg border-b bg-gray-50 p-4 dark:border-b-gray-700 dark:bg-gray-800 dark:text-white ">
<div className="flex w-full items-center gap-2 truncate text-lg">
<Icon
className="h-10 w-10 rounded p-1"
style={{
color: nodeColors[types[data.type]] ?? nodeColors.unknown,
}}
/>
<div className="ml-2 truncate">
<TooltipReact
delayShow={1000}
selector={`node-selector-${data.type}`}
htmlContent={data.type}
position="top"
>
<div className="ml-2 truncate">{data.type}</div>
</TooltipReact>
</div>
<>
<NodeToolbar>
<NodeToolbarComponent
data={data}
openPopUp={openPopUp}
deleteNode={deleteNode}
></NodeToolbarComponent>
</NodeToolbar>
<div
className={classNames(
selected ? "border border-blue-500" : "border dark:border-gray-700",
"prompt-node relative flex w-96 flex-col justify-center rounded-lg bg-white dark:bg-gray-900"
)}
>
<div className="flex w-full items-center justify-between gap-8 rounded-t-lg border-b bg-gray-50 p-4 dark:border-b-gray-700 dark:bg-gray-800 dark:text-white ">
<div className="flex w-full items-center gap-2 truncate text-lg">
<Icon
className="h-10 w-10 rounded p-1"
style={{
color: nodeColors[types[data.type]] ?? nodeColors.unknown,
}}
/>
<div className="ml-2 truncate">
<ShadTooltip delayDuration={1500} content={data.type}>
<div className="ml-2 truncate">{data.type}</div>
</ShadTooltip>
</div>
</div>
<div className="flex gap-3">
<button
className="relative"
onClick={(event) => {
event.preventDefault();
openPopUp(<NodeModal data={data} />);
}}
>
</button>
</div>
<div className="flex gap-3">
<div>
@ -216,29 +222,25 @@ export default function GenericNode({
{data.node.template[t].show &&
!data.node.template[t].advanced ? (
<ParameterComponent
data={data}
color={
nodeColors[types[data.node.template[t].type]] ??
nodeColors.unknown
}
title={
data.node.template[t].display_name
? data.node.template[t].display_name
: data.node.template[t].name
? toTitleCase(data.node.template[t].name)
: toTitleCase(t)
}
name={t}
tooltipTitle={
"Type: " +
data.node.template[t].type +
(data.node.template[t].list ? " list" : "")
}
required={data.node.template[t].required}
id={data.node.template[t].type + "|" + t + "|" + data.id}
left={true}
type={data.node.template[t].type}
/>
data={data}
color={
nodeColors[types[data.node.template[t].type]] ??
nodeColors.unknown
}
title={
data.node.template[t].display_name
? data.node.template[t].display_name
: data.node.template[t].name
? toTitleCase(data.node.template[t].name)
: toTitleCase(t)
}
name={t}
tooltipTitle={data.node.template[t].type}
required={data.node.template[t].required}
id={data.node.template[t].type + "|" + t + "|" + data.id}
left={true}
type={data.node.template[t].type}
/>
) : (
<></>
)}
@ -255,15 +257,15 @@ export default function GenericNode({
{/* <div className="px-5 py-2 mt-2 dark:text-white text-center">
Output
</div> */}
<ParameterComponent
data={data}
color={nodeColors[types[data.type]] ?? nodeColors.unknown}
title={data.type}
tooltipTitle={`Type: ${data.node.base_classes.join(" | ")}`}
id={[data.type, data.id, ...data.node.base_classes].join("|")}
type={data.node.base_classes.join("|")}
left={false}
/>
<ParameterComponent
data={data}
color={nodeColors[types[data.type]] ?? nodeColors.unknown}
title={data.type}
tooltipTitle={`${data.node.base_classes.join("\n")}`}
id={[data.type, data.id, ...data.node.base_classes].join("|")}
type={data.node.base_classes.join("|")}
left={false}
/>
</>
</div>
</div>

View file

@ -1,6 +1,6 @@
import { Disclosure } from "@headlessui/react";
import { ChevronLeftIcon } from "@heroicons/react/24/outline";
import { useContext } from "react";
import { useContext, useState } from "react";
import { Link } from "react-router-dom";
import { classNames } from "../../utils";
import { locationContext } from "../../contexts/locationContext";
@ -13,6 +13,7 @@ export default function ExtraSidebar() {
extraNavigation,
extraComponent,
} = useContext(locationContext);
return (
<>
<aside
@ -21,10 +22,8 @@ export default function ExtraSidebar() {
} flex-shrink-0 flex overflow-hidden flex-col border-r dark:border-r-gray-700 transition-all duration-500`}
>
<div className="w-52 dark:bg-gray-800 border dark:border-gray-700 overflow-y-auto scrollbar-hide h-full flex flex-col items-start">
<div className="flex pt-1 px-4 justify-between align-middle w-full">
<span className="text-gray-900 dark:text-white py-[2px] font-medium ">
{extraNavigation.title}
</span>
<div className="flex px-4 justify-between align-middle w-full">
<span className="text-gray-900 dark:text-white py-[2px] font-medium "></span>
</div>
<div className="flex flex-grow flex-col w-full">
{extraNavigation.options ? (

View file

@ -37,13 +37,15 @@ const TooltipReact: FC<TooltipProps> = ({
id={selector}
content={content}
className={classNames(
"!bg-white !text-xs !font-normal !text-gray-700 !shadow-md !opacity-100 z-20",
"!bg-white !text-xs !font-normal !text-gray-700 !shadow-md !opacity-100 z-[9999]",
className
)}
place={position}
clickable={clickable}
isOpen={disabled ? false : undefined}
delayShow={delayShow}
positionStrategy="absolute"
float={true}
>
{htmlContent && htmlContent}
</ReactTooltip>

View file

@ -0,0 +1,25 @@
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "../ui/tooltip";
const ShadTooltip = (props) => {
return (
<TooltipProvider>
<Tooltip delayDuration={props.delayDuration}>
<TooltipTrigger asChild>{props.children}</TooltipTrigger>
<TooltipContent
side={props.side}
avoidCollisions={false}
sticky="always"
>
{props.content}
</TooltipContent>
</Tooltip>
</TooltipProvider>
);
};
export default ShadTooltip;

View file

@ -0,0 +1,29 @@
"use client";
import * as React from "react";
import * as TooltipPrimitive from "@radix-ui/react-tooltip";
import { cn } from "../../utils";
const TooltipProvider = TooltipPrimitive.Provider;
const Tooltip = TooltipPrimitive.Root;
const TooltipTrigger = TooltipPrimitive.Trigger;
const TooltipContent = React.forwardRef<
React.ElementRef<typeof TooltipPrimitive.Content>,
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>
>(({ className, sideOffset = 4, ...props }, ref) => (
<TooltipPrimitive.Content
ref={ref}
sideOffset={sideOffset}
className={cn(
"z-50 overflow-hidden rounded-md border bg-popover px-3 py-1.5 text-sm text-popover-foreground shadow-md animate-in fade-in-50 data-[side=bottom]:slide-in-from-top-1 data-[side=left]:slide-in-from-right-1 data-[side=right]:slide-in-from-left-1 data-[side=top]:slide-in-from-bottom-1",
className
)}
{...props}
/>
));
TooltipContent.displayName = TooltipPrimitive.Content.displayName;
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider };

View file

@ -13,13 +13,16 @@ const initialValue = {
export const darkContext = createContext<darkContextType>(initialValue);
export function DarkProvider({ children }) {
const [dark, setDark] = useState(false);
const [dark, setDark] = useState(
JSON.parse(window.localStorage.getItem("isDark")) ?? false
);
useEffect(() => {
if (dark) {
document.getElementById("body").classList.add("dark");
} else {
document.getElementById("body").classList.remove("dark");
}
window.localStorage.setItem("isDark", dark.toString());
}, [dark]);
return (
<darkContext.Provider

View file

@ -50,7 +50,7 @@ export const TabsContext = createContext<TabsContextType>(
);
export function TabsProvider({ children }: { children: ReactNode }) {
const { setNoticeData } = useContext(alertContext);
const { setErrorData, setNoticeData } = useContext(alertContext);
const [tabIndex, setTabIndex] = useState(0);
const [flows, setFlows] = useState<Array<FlowType>>([]);
const [id, setId] = useState(uuidv4());
@ -99,25 +99,25 @@ export function TabsProvider({ children }: { children: ReactNode }) {
edge.style = { stroke: "#555555" };
});
flow.data.nodes.forEach((node) => {
if (Object.keys(templates[node.data.type]["template"]).length > 0) {
node.data.node.base_classes =
templates[node.data.type]["base_classes"];
const template = templates[node.data.type];
if (!template) {
setErrorData({ title: `Unknown node type: ${node.data.type}` });
return;
}
if (Object.keys(template["template"]).length > 0) {
node.data.node.base_classes = template["base_classes"];
flow.data.edges.forEach((edge) => {
if (edge.source === node.id) {
edge.sourceHandle = edge.sourceHandle
.split("|")
.slice(0, 2)
.concat(templates[node.data.type]["base_classes"])
.concat(template["base_classes"])
.join("|");
}
});
node.data.node.description =
templates[node.data.type]["description"];
node.data.node.description = template["description"];
node.data.node.template = updateTemplate(
templates[node.data.type][
"template"
] as unknown as APITemplateType,
template["template"] as unknown as APITemplateType,
node.data.node.template as APITemplateType
);
}
@ -325,21 +325,25 @@ export function TabsProvider({ children }: { children: ReactNode }) {
edge.animated = edge.targetHandle.split("|")[0] === "Text";
});
data.nodes.forEach((node) => {
if (Object.keys(templates[node.data.type]["template"]).length > 0) {
node.data.node.base_classes =
templates[node.data.type]["base_classes"];
const template = templates[node.data.type];
if (!template) {
setErrorData({ title: `Unknown node type: ${node.data.type}` });
return;
}
if (Object.keys(template["template"]).length > 0) {
node.data.node.base_classes = template["base_classes"];
flow.data.edges.forEach((edge) => {
if (edge.source === node.id) {
edge.sourceHandle = edge.sourceHandle
.split("|")
.slice(0, 2)
.concat(templates[node.data.type]["base_classes"])
.concat(template["base_classes"])
.join("|");
}
});
node.data.node.description = templates[node.data.type]["description"];
node.data.node.description = template["description"];
node.data.node.template = updateTemplate(
templates[node.data.type]["template"] as unknown as APITemplateType,
template["template"] as unknown as APITemplateType,
node.data.node.template as APITemplateType
);
}

View file

@ -14,13 +14,13 @@ export async function sendAll(data: sendAllProps) {
export async function checkCode(
code: string
): Promise<AxiosResponse<errorsTypeAPI>> {
return await axios.post("/validate/code", { code });
return await axios.post("api/v1/validate/code", { code });
}
export async function checkPrompt(
template: string
): Promise<AxiosResponse<PromptTypeAPI>> {
return await axios.post("/validate/prompt", { template });
return await axios.post("api/v1/validate/prompt", { template });
}
export async function getExamples(): Promise<FlowType[]> {

View file

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="utf-8"?>
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" viewBox="-170.333 113.047 600 67.4" width="600" height="67.4">
<defs>
<style type="text/css">
.st0{fill:#1F1F1E;}
</style>
</defs>
<path class="st0" d="M -23.533 126.747 L -1.633 126.747 L -1.633 179.347 L 12.367 179.347 L 12.367 126.747 L 34.267 126.747 L 34.267 114.147 L -23.533 114.147 L -23.533 126.747 Z M -48.133 159.747 L -77.433 114.147 L -93.233 114.147 L -93.233 179.247 L -79.733 179.247 L -79.733 133.647 L -50.433 179.347 L -34.633 179.347 L -34.633 114.247 L -48.133 114.247 L -48.133 159.747 Z M 90.067 140.147 L 59.367 140.147 L 59.367 114.147 L 45.367 114.147 L 45.367 179.247 L 59.367 179.247 L 59.367 152.647 L 90.067 152.647 L 90.067 179.247 L 104.067 179.247 L 104.067 114.147 L 90.067 114.147 L 90.067 140.147 Z M -144.333 114.147 L -170.333 179.247 L -155.833 179.247 L -150.533 165.547 L -123.333 165.547 L -118.033 179.247 L -103.533 179.247 L -129.533 114.147 L -144.333 114.147 Z M -145.833 153.547 L -136.933 130.647 L -128.033 153.547 L -145.833 153.547 Z M 219.667 113.047 C 200.867 113.047 187.567 127.047 187.567 146.847 C 187.567 166.447 200.867 180.447 219.667 180.447 C 238.367 180.447 251.567 166.447 251.567 146.847 C 251.567 127.047 238.367 113.047 219.667 113.047 Z M 219.667 167.447 C 208.667 167.447 201.967 159.647 201.967 146.847 C 201.967 133.947 208.667 126.047 219.667 126.047 C 230.567 126.047 237.167 133.847 237.167 146.847 C 237.167 159.547 230.567 167.447 219.667 167.447 Z M 414.767 157.447 C 412.367 163.747 407.467 167.447 400.867 167.447 C 389.867 167.447 383.167 159.647 383.167 146.847 C 383.167 133.947 389.867 126.047 400.867 126.047 C 407.467 126.047 412.267 129.647 414.767 136.047 L 429.567 136.047 C 425.967 122.047 415.067 113.047 400.867 113.047 C 382.067 113.047 368.767 127.047 368.767 146.847 C 368.767 166.447 382.067 180.447 400.867 180.447 C 415.067 180.447 425.967 171.347 429.667 157.447 L 414.767 157.447 Z M 325.867 114.147 L 351.867 179.247 L 366.067 179.247 L 340.067 114.147 L 325.867 114.147 Z M 296.367 114.147 L 264.567 114.147 L 264.567 179.247 L 278.567 179.247 L 278.567 155.647 L 296.467 155.647 C 311.267 155.647 320.267 147.847 320.267 134.847 C 320.267 121.947 311.167 114.147 296.367 114.147 Z M 295.767 143.147 L 278.567 143.147 L 278.567 126.747 L 295.767 126.747 C 302.667 126.747 306.267 129.547 306.267 134.947 C 306.267 140.347 302.667 143.147 295.767 143.147 Z M 176.867 134.047 C 176.867 121.747 167.867 114.247 153.067 114.247 L 121.267 114.247 L 121.267 179.347 L 135.267 179.347 L 135.267 153.847 L 150.767 153.847 L 164.767 179.347 L 180.167 179.347 L 164.667 151.947 C 172.367 148.847 176.867 142.647 176.867 134.047 Z M 135.167 126.747 L 152.367 126.747 C 159.267 126.747 162.867 129.247 162.867 134.047 C 162.867 138.747 159.267 141.347 152.367 141.347 L 135.167 141.347 L 135.167 126.747 Z"></path>
</svg>

After

Width:  |  Height:  |  Size: 2.9 KiB

View file

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" viewBox="-126.9 247.9 207.161 212.728" width="207.161" height="212.728">
<defs>
<style type="text/css">
.st0{fill:#1F1F1E;}
</style>
</defs>
<path class="st0" d="M 19.9 260.5 L 41.8 260.5 L 41.8 313.1 L 55.8 313.1 L 55.8 260.5 L 77.7 260.5 L 77.7 247.9 L 19.9 247.9 L 19.9 260.5 Z M -4.7 293.5 L -34 247.9 L -49.8 247.9 L -49.8 313 L -36.3 313 L -36.3 267.4 L -7 313.1 L 8.8 313.1 L 8.8 248 L -4.7 248 L -4.7 293.5 Z M -100.9 247.9 L -126.9 313 L -112.4 313 L -107.1 299.3 L -79.9 299.3 L -74.6 313 L -60.1 313 L -86.1 247.9 L -100.9 247.9 Z M -102.4 287.3 L -93.5 264.4 L -84.6 287.3 L -102.4 287.3 Z"></path>
<path class="st0" d="M 38.246 437.628 C 35.846 443.928 30.946 447.628 24.346 447.628 C 13.346 447.628 6.646 439.828 6.646 427.028 C 6.646 414.128 13.346 406.228 24.346 406.228 C 30.946 406.228 35.746 409.828 38.246 416.228 L 53.046 416.228 C 49.446 402.228 38.546 393.228 24.346 393.228 C 5.546 393.228 -7.754 407.228 -7.754 427.028 C -7.754 446.628 5.546 460.628 24.346 460.628 C 38.546 460.628 49.446 451.528 53.146 437.628 L 38.246 437.628 Z M -50.654 394.328 L -24.654 459.428 L -10.454 459.428 L -36.454 394.328 L -50.654 394.328 Z M -80.154 394.328 L -111.954 394.328 L -111.954 459.428 L -97.954 459.428 L -97.954 435.828 L -80.054 435.828 C -65.254 435.828 -56.254 428.028 -56.254 415.028 C -56.254 402.128 -65.354 394.328 -80.154 394.328 Z M -80.754 423.328 L -97.954 423.328 L -97.954 406.928 L -80.754 406.928 C -73.854 406.928 -70.254 409.728 -70.254 415.128 C -70.254 420.528 -73.854 423.328 -80.754 423.328 Z"></path>
<path class="st0" d="M -81.239 347.704 L -111.939 347.704 L -111.939 321.704 L -125.939 321.704 L -125.939 386.804 L -111.939 386.804 L -111.939 360.204 L -81.239 360.204 L -81.239 386.804 L -67.239 386.804 L -67.239 321.704 L -81.239 321.704 L -81.239 347.704 Z M 48.361 320.604 C 29.561 320.604 16.261 334.604 16.261 354.404 C 16.261 374.004 29.561 388.004 48.361 388.004 C 67.061 388.004 80.261 374.004 80.261 354.404 C 80.261 334.604 67.061 320.604 48.361 320.604 Z M 48.361 375.004 C 37.361 375.004 30.661 367.204 30.661 354.404 C 30.661 341.504 37.361 333.604 48.361 333.604 C 59.261 333.604 65.861 341.404 65.861 354.404 C 65.861 367.104 59.261 375.004 48.361 375.004 Z M 5.561 341.604 C 5.561 329.304 -3.439 321.804 -18.239 321.804 L -50.039 321.804 L -50.039 386.904 L -36.039 386.904 L -36.039 361.404 L -20.539 361.404 L -6.539 386.904 L 8.861 386.904 L -6.639 359.504 C 1.061 356.404 5.561 350.204 5.561 341.604 Z M -36.139 334.304 L -18.939 334.304 C -12.039 334.304 -8.439 336.804 -8.439 341.604 C -8.439 346.304 -12.039 348.904 -18.939 348.904 L -36.139 348.904 L -36.139 334.304 Z"></path>
</svg>

After

Width:  |  Height:  |  Size: 2.7 KiB

View file

@ -0,0 +1,9 @@
import React, { forwardRef } from "react";
import { ReactComponent as AnthropicSVG } from "./anthropic_box.svg";
export const AnthropicIcon = forwardRef<
SVGSVGElement,
React.PropsWithChildren<{}>
>((props, ref) => {
return <AnthropicSVG ref={ref} {...props} />;
});

View file

@ -3,99 +3,162 @@
@tailwind utilities;
@layer base {
:root {
--background: 0 0% 100%;
--foreground: 222.2 47.4% 11.2%;
--muted: 210 40% 96.1%;
--muted-foreground: 215.4 16.3% 46.9%;
--popover: 0 0% 100%;
--popover-foreground: 222.2 47.4% 11.2%;
--card: 0 0% 100%;
--card-foreground: 222.2 47.4% 11.2%;
--border: 214.3 31.8% 91.4%;
--input: 214.3 31.8% 91.4%;
--primary: 222.2 47.4% 11.2%;
--primary-foreground: 210 40% 98%;
--secondary: 210 40% 96.1%;
--secondary-foreground: 222.2 47.4% 11.2%;
--accent: 210 40% 96.1%;
--accent-foreground: 222.2 47.4% 11.2%;
--destructive: 0 100% 50%;
--destructive-foreground: 210 40% 98%;
--ring: 215 20.2% 65.1%;
--radius: 0.5rem;
}
.dark {
--background: 224 71% 4%;
--foreground: 213 31% 91%;
--muted: 223 47% 11%;
--muted-foreground: 215.4 16.3% 56.9%;
--popover: 224 71% 4%;
--popover-foreground: 215 20.2% 65.1%;
--card: 224 71% 4%;
--card-foreground: 213 31% 91%;
--border: 216 34% 17%;
--input: 216 34% 17%;
--primary: 210 40% 98%;
--primary-foreground: 222.2 47.4% 1.2%;
--secondary: 222.2 47.4% 11.2%;
--secondary-foreground: 210 40% 98%;
--accent: 216 34% 17%;
--accent-foreground: 210 40% 98%;
--destructive: 0 63% 31%;
--destructive-foreground: 210 40% 98%;
--ring: 216 34% 17%;
--radius: 0.5rem;
}
}
@layer base {
* {
@apply border-border;
}
body {
@apply bg-background text-foreground;
font-feature-settings: "rlig" 1, "calt" 1;
}
:root {
--background: 0 0% 100%;
--foreground: 222.2 47.4% 11.2%;
--muted: 210 40% 96.1%;
--muted-foreground: 215.4 16.3% 46.9%;
--popover: 0 0% 100%;
--popover-foreground: 222.2 47.4% 11.2%;
--card: 0 0% 100%;
--card-foreground: 222.2 47.4% 11.2%;
--border: 214.3 31.8% 91.4%;
--input: 214.3 31.8% 91.4%;
--primary: 222.2 47.4% 11.2%;
--primary-foreground: 210 40% 98%;
--secondary: 210 40% 96.1%;
--secondary-foreground: 222.2 47.4% 11.2%;
--accent: 210 40% 96.1%;
--accent-foreground: 222.2 47.4% 11.2%;
--destructive: 0 100% 50%;
--destructive-foreground: 210 40% 98%;
--ring: 215 20.2% 65.1%;
--radius: 0.5rem;
}
.dark {
--background: 224 71% 4%;
--foreground: 213 31% 91%;
--muted: 223 47% 11%;
--muted-foreground: 215.4 16.3% 56.9%;
--popover: 224 71% 4%;
--popover-foreground: 215 20.2% 65.1%;
--card: 224 71% 4%;
--card-foreground: 213 31% 91%;
--border: 216 34% 17%;
--input: 216 34% 17%;
--primary: 210 40% 98%;
--primary-foreground: 222.2 47.4% 1.2%;
--secondary: 222.2 47.4% 11.2%;
--secondary-foreground: 210 40% 98%;
--accent: 216 34% 17%;
--accent-foreground: 210 40% 98%;
--destructive: 0 63% 31%;
--destructive-foreground: 210 40% 98%;
--ring: 216 34% 17%;
--radius: 0.5rem;
}
}
:root {
--background: 0 0% 100%;
--foreground: 222.2 47.4% 11.2%;
--muted: 210 40% 96.1%;
--muted-foreground: 215.4 16.3% 46.9%;
--popover: 0 0% 100%;
--popover-foreground: 222.2 47.4% 11.2%;
--card: 0 0% 100%;
--card-foreground: 222.2 47.4% 11.2%;
--border: 214.3 31.8% 91.4%;
--input: 214.3 31.8% 91.4%;
--primary: 222.2 47.4% 11.2%;
--primary-foreground: 210 40% 98%;
--secondary: 210 40% 96.1%;
--secondary-foreground: 222.2 47.4% 11.2%;
--accent: 210 40% 96.1%;
--accent-foreground: 222.2 47.4% 11.2%;
--destructive: 0 100% 50%;
--destructive-foreground: 210 40% 98%;
--ring: 215 20.2% 65.1%;
--radius: 0.5rem;
}
.dark {
-background: 224 71% 4%;
-foreground: 213 31% 91%;
-muted: 223 47% 11%;
-muted-foreground: 215.4 16.3% 56.9%;
-popover: 224 71% 4%;
-popover-foreground: 215 20.2% 65.1%;
-card: 224 71% 4%;
-card-foreground: 213 31% 91%;
-border: 216 34% 17%;
-input: 216 34% 17%;
-primary: 210 40% 98%;
-primary-foreground: 222.2 47.4% 1.2%;
-secondary: 222.2 47.4% 11.2%;
-secondary-foreground: 210 40% 98%;
-accent: 216 34% 17%;
-accent-foreground: 210 40% 98%;
-destructive: 0 63% 31%;
-destructive-foreground: 210 40% 98%;
-ring: 216 34% 17%;
-radius: 0.5rem;
}
@layer base {
* {
@apply border-border;
}
body {
@apply bg-background text-foreground;
font-feature-settings: "rlig" 1, "calt" 1;
}
}
@layer base {
* {
@apply border-border;
}
body {
@apply bg-background text-foreground;
font-feature-settings: "rlig" 1, "calt" 1;
}
}
body {
margin: 0;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen",
"Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue",
sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
margin: 0;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen",
"Ubuntu",
"Cantarell",
"Fira Sans",
"Droid Sans",
"Helvetica Neue",
sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
code {
font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New",
monospace;
font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New",
monospace;
}
/* The style below sets the cursor property of the element with the class .react-flow__pane to the default cursor.
The cursor: default; property value restores the browser's default cursor style for the targeted element. By applying this style, the element will no longer have a custom cursor appearance such as "grab" or any other custom cursor defined elsewhere in the application. Instead, it will revert to the default cursor style determined by the browser, typically an arrow-shaped cursor. */
.react-flow__pane {
cursor: default;
}
}

View file

@ -124,7 +124,7 @@ flow("Hey, have you heard of LangFlow?")`;
</button>
))}
</div>
<div className="overflow-hidden px-4 py-2 sm:p-4 sm:pb-0 sm:pt-2 w-full h-full rounded-lg shadow bg-white dark:bg-gray-800">
<div className="overflow-hidden px-4 sm:p-4 sm:pb-0 sm:pt-0 w-full h-full rounded-lg shadow bg-white dark:bg-gray-800">
<div className="items-center mb-2">
<div className="float-right">
<button

View file

@ -16,9 +16,11 @@ import Convert from "ansi-to-html";
export default function ChatMessage({
chat,
lockChat,
lastMessage,
}: {
chat: ChatMessageType;
lockChat: boolean;
lastMessage: boolean;
}) {
const convert = new Convert({ newline: true });
const [message, setMessage] = useState("");
@ -48,7 +50,7 @@ export default function ChatMessage({
"absolute transition-opacity duration-500 scale-150 " +
(lockChat ? "opacity-100" : "opacity-0")
}
src={AiIcon}
src={lastMessage ? AiIcon : AiIconStill}
/>
<img
className={

View file

@ -33,8 +33,15 @@ export default function ChatModal({
const ws = useRef<WebSocket | null>(null);
const [lockChat, setLockChat] = useState(false);
const isOpen = useRef(open);
const messagesRef = useRef(null);
const id = useRef(flow.id);
useEffect(() => {
if (messagesRef.current) {
messagesRef.current.scrollTop = messagesRef.current.scrollHeight;
}
}, [chatHistory]);
useEffect(() => {
isOpen.current = open;
}, [open]);
@ -175,10 +182,10 @@ export default function ChatModal({
try {
const urlWs =
process.env.NODE_ENV === "development"
? `ws://localhost:7860/chat/${id.current}`
? `ws://localhost:7860/api/v1/chat/${id.current}`
: `${window.location.protocol === "https:" ? "wss" : "ws"}://${
window.location.host
}/chat/${id.current}`;
}api/v1/chat/${id.current}`;
const newWs = new WebSocket(urlWs);
newWs.onopen = () => {
console.log("WebSocket connection established!");
@ -208,19 +215,11 @@ export default function ChatModal({
}
};
ws.current = newWs;
} catch {
} catch (error) {
if (flow.id === "") {
connectWS();
} else {
setErrorData({
title: "There was an error on web connection, please: ",
list: [
"Refresh the page",
"Use a new flow tab",
"Check if the backend is up",
],
});
}
console.log(error);
}
}
@ -229,7 +228,7 @@ export default function ChatModal({
return () => {
console.log("unmount");
console.log(ws);
if (ws) {
if (ws.current) {
ws.current.close();
}
};
@ -237,8 +236,9 @@ export default function ChatModal({
useEffect(() => {
if (
ws.current.readyState === ws.current.CLOSED ||
ws.current.readyState === ws.current.CLOSING
ws.current &&
(ws.current.readyState === ws.current.CLOSED ||
ws.current.readyState === ws.current.CLOSING)
) {
connectWS();
setLockChat(false);
@ -283,7 +283,9 @@ export default function ChatModal({
errors.concat(
template[t].required &&
template[t].show &&
(!template[t].value || template[t].value === "") &&
(template[t].value === undefined ||
template[t].value === null ||
template[t].value === "") &&
!reactFlowInstance
.getEdges()
.some(
@ -401,10 +403,18 @@ export default function ChatModal({
<HiX className="w-5 h-5" />
</button>
</div>
<div className="w-full h-full bg-white dark:bg-gray-800 border-t dark:border-t-gray-600 flex-col flex items-center overflow-scroll scrollbar-hide">
<div
ref={messagesRef}
className="w-full h-full bg-white dark:bg-gray-800 border-t dark:border-t-gray-600 flex-col flex items-center overflow-scroll scrollbar-hide"
>
{chatHistory.length > 0 ? (
chatHistory.map((c, i) => (
<ChatMessage lockChat={lockChat} chat={c} key={i} />
<ChatMessage
lockChat={lockChat}
chat={c}
lastMessage={chatHistory.length - 1 == i ? true : false}
key={i}
/>
))
) : (
<div className="flex flex-col h-full text-center justify-center w-full items-center align-middle">

View file

@ -5,6 +5,7 @@ import { DisclosureComponentType } from "../../../../types/components";
export default function DisclosureComponent({
button: { title, Icon, buttons = [] },
children,
openDisc,
}: DisclosureComponentType) {
return (
<Disclosure as="div" key={title}>
@ -27,14 +28,14 @@ export default function DisclosureComponent({
<div>
<ChevronRightIcon
className={`${
open ? "rotate-90 transform" : ""
open || openDisc ? "rotate-90 transform" : ""
} h-4 w-4 text-gray-800 dark:text-white`}
/>
</div>
</div>
</Disclosure.Button>
</div>
<Disclosure.Panel as="div" className="-mt-px">
<Disclosure.Panel as="div" className="-mt-px" static={openDisc}>
{children}
</Disclosure.Panel>
</>

View file

@ -1,13 +1,21 @@
import { Bars2Icon } from "@heroicons/react/24/outline";
import DisclosureComponent from "../DisclosureComponent";
import { nodeColors, nodeIcons, nodeNames } from "../../../../utils";
import { useContext, useEffect, useState } from "react";
import {
classNames,
nodeColors,
nodeIcons,
nodeNames,
} from "../../../../utils";
import { useContext, useEffect, useState, useRef } from "react";
import { typesContext } from "../../../../contexts/typesContext";
import { APIClassType, APIObjectType } from "../../../../types/api";
import TooltipReact from "../../../../components/ReactTooltipComponent";
import { MagnifyingGlassIcon } from "@heroicons/react/24/outline";
import ShadTooltip from "../../../../components/ShadTooltipComponent";
export default function ExtraSidebar() {
const { data } = useContext(typesContext);
const [dataFilter, setFilterData] = useState(data);
const [search, setSearch] = useState("");
function onDragStart(
event: React.DragEvent<any>,
@ -24,66 +32,102 @@ export default function ExtraSidebar() {
event.dataTransfer.setData("json", JSON.stringify(data));
}
function handleSearchInput(e: string) {
setFilterData((_) => {
let ret = {};
Object.keys(data).forEach((d: keyof APIObjectType, i) => {
ret[d] = {};
let keys = Object.keys(data[d]).filter((nd) =>
nd.toLowerCase().includes(e.toLowerCase())
);
keys.forEach((element) => {
ret[d][element] = data[d][element];
});
});
return ret;
});
}
return (
<div className="mt-1 w-full">
{Object.keys(data)
.sort()
.map((d: keyof APIObjectType, i) => (
<DisclosureComponent
key={i}
button={{
title: nodeNames[d] ?? nodeNames.unknown,
Icon: nodeIcons[d] ?? nodeIcons.unknown,
}}
>
<div className="p-2 flex flex-col gap-2">
{Object.keys(data[d])
.sort()
.map((t: string, k) => (
<TooltipReact
selector={t}
htmlContent={t}
position="right"
delayShow={1500}
key={k}
>
<div key={k} data-tooltip-id={t}>
<div
draggable
className={" cursor-grab border-l-8 rounded-l-md"}
style={{
borderLeftColor: nodeColors[d] ?? nodeColors.unknown,
}}
onDragStart={(event) =>
onDragStart(event, {
type: t,
node: data[d][t],
})
}
onDragEnd={() => {
document.body.removeChild(
document.getElementsByClassName(
"cursor-grabbing"
)[0]
);
}}
<>
<div className="relative mt-2 flex items-center mb-2 mx-2">
<input
type="text"
name="search"
id="search"
placeholder="Search nodes"
className="dark:text-white focus:outline-none block w-full rounded-md py-1.5 ps-3 pr-9 text-gray-900 shadow-sm ring-1 ring-inset ring-gray-300 placeholder:text-gray-400 sm:text-sm sm:leading-6 dark:ring-0 dark:bg-[#2d3747] dark:focus:outline-none"
onChange={(e) => {
handleSearchInput(e.target.value);
setSearch(e.target.value);
}}
/>
<div className="absolute inset-y-0 right-0 flex py-1.5 pr-3 items-center">
<MagnifyingGlassIcon className="h-5 w-5 dark:text-white"></MagnifyingGlassIcon>
</div>
</div>
<div className="mt-1 w-full">
{Object.keys(dataFilter)
.sort()
.map((d: keyof APIObjectType, i) =>
Object.keys(dataFilter[d]).length > 0 ? (
<DisclosureComponent
openDisc={search.length == 0 ? false : true}
key={i}
button={{
title: nodeNames[d] ?? nodeNames.unknown,
Icon: nodeIcons[d] ?? nodeIcons.unknown,
}}
>
<div className="p-2 flex flex-col gap-2">
{Object.keys(dataFilter[d])
.sort()
.map((t: string, k) => (
<ShadTooltip
content={t}
delayDuration={1500}
side="right"
>
<div className="flex w-full justify-between text-sm px-3 py-1 bg-white dark:bg-gray-800 items-center border-dashed border-gray-400 dark:border-gray-600 border-l-0 rounded-md rounded-l-none border">
<span className="text-black dark:text-white w-36 pr-1 truncate text-xs">
{t}
</span>
<Bars2Icon className="w-4 h-6 text-gray-400 dark:text-gray-600" />
<div key={k} data-tooltip-id={t}>
<div
draggable
className={" cursor-grab border-l-8 rounded-l-md"}
style={{
borderLeftColor:
nodeColors[d] ?? nodeColors.unknown,
}}
onDragStart={(event) =>
onDragStart(event, {
type: t,
node: data[d][t],
})
}
onDragEnd={() => {
document.body.removeChild(
document.getElementsByClassName(
"cursor-grabbing"
)[0]
);
}}
>
<div className="flex w-full justify-between text-sm px-3 py-1 bg-white dark:bg-gray-800 items-center border-dashed border-gray-400 dark:border-gray-600 border-l-0 rounded-md rounded-l-none border">
<span className="text-black dark:text-white w-36 pr-1 truncate text-xs">
{t}
</span>
<Bars2Icon className="w-4 h-6 text-gray-400 dark:text-gray-600" />
</div>
</div>
</div>
</div>
</div>
</TooltipReact>
))}
{Object.keys(data[d]).length === 0 && (
<div className="text-gray-400 text-center">Coming soon</div>
)}
</div>
</DisclosureComponent>
))}
</div>
</ShadTooltip>
))}
</div>
</DisclosureComponent>
) : (
<div key={i}></div>
)
)}
</div>
</>
);
}

View file

@ -77,7 +77,7 @@ const NodeToolbarComponent = (props) => {
props.openPopUp(<NodeModal data={props.data} />);
}}
>
<div className=" absolute right-1 top-0 text-red-600">
<div className=" absolute right-1 top-[-2px] text-red-600">
{Object.keys(props.data.node.template).some(
(t) =>
props.data.node.template[t].advanced &&

View file

@ -116,7 +116,7 @@ export default function TabsManagerComponent() {
</button>
</div>
</div>
<div className="w-full h-full">
<div className="w-full h-full dark:bg-gray-800">
<ReactFlowProvider>
{flows[tabIndex] ? (
<FlowPage flow={flows[tabIndex]}></FlowPage>

View file

@ -349,7 +349,9 @@ export default function FlowPage({ flow }: { flow: FlowType }) {
onSelectionChange={onSelectionChange}
nodesDraggable={!disableCopyPaste}
panOnDrag={!disableCopyPaste}
zoomOnDoubleClick={!disableCopyPaste}
selectNodesOnDrag={false}
className="theme-attribution"
>
<Background className="dark:bg-gray-900" />
<Controls className="[&>button]:text-black [&>button]:dark:bg-gray-800 hover:[&>button]:dark:bg-gray-700 [&>button]:dark:text-gray-400 [&>button]:dark:fill-gray-400 [&>button]:dark:border-gray-600"></Controls>

View file

@ -3,6 +3,7 @@ import {
ReactElement,
ReactFragment,
ReactNode,
SVGProps,
} from "react";
import { NodeDataType } from "../flow/index";
export type InputComponentType = {
@ -60,6 +61,7 @@ export type FileComponentType = {
export type DisclosureComponentType = {
children: ReactNode;
openDisc: boolean;
button: {
title: string;
Icon: ForwardRefExoticComponent<React.SVGProps<SVGSVGElement>>;

View file

@ -21,6 +21,7 @@ import { FlowType, NodeType } from "./types/flow";
import { APITemplateType, TemplateVariableType } from "./types/api";
import _ from "lodash";
import { ChromaIcon } from "./icons/ChromaIcon";
import { AnthropicIcon } from "./icons/Anthropic";
import { AirbyteIcon } from "./icons/Airbyte";
import { AzIcon } from "./icons/AzLogo";
import { BingIcon } from "./icons/Bing";
@ -159,6 +160,8 @@ export const nodeIcons: {
AirbyteJSONLoader: AirbyteIcon,
// SerpAPIWrapper: SerperIcon,
// AZLyricsLoader: AzIcon,
Anthropic: AnthropicIcon,
ChatAnthropic: AnthropicIcon,
BingSearchAPIWrapper: BingIcon,
BingSearchRun: BingIcon,
Cohere: CohereIcon,
@ -642,3 +645,58 @@ export function updateIds(newFlow, getNodeId) {
e.targetHandle;
});
}
export function groupByFamily(data, baseClasses) {
let arrOfParent: string[] = [];
let arrOfType: { family: string; type: string }[] = [];
Object.keys(data).map((d) => {
Object.keys(data[d]).map((n) => {
if (
data[d][n].base_classes.some((r) => baseClasses.split("\n").includes(r))
) {
arrOfParent.push(d);
}
});
});
let uniq = arrOfParent.filter(
(item, index) => arrOfParent.indexOf(item) === index
);
Object.keys(data).map((d) => {
Object.keys(data[d]).map((n) => {
baseClasses.split("\n").forEach((tol) => {
data[d][n].base_classes.forEach((data) => {
if (tol == data) {
arrOfType.push({
family: d,
type: data,
});
}
});
});
});
});
let groupedBy = arrOfType.filter((object, index, self) => {
const foundIndex = self.findIndex(
(o) => o.family === object.family && o.type === object.type
);
return foundIndex === index;
});
let groupedObj = groupedBy.reduce((result, item) => {
const existingGroup = result.find((group) => group.family === item.family);
if (existingGroup) {
existingGroup.type += `, ${item.type}`;
} else {
result.push({ family: item.family, type: item.type });
}
return result;
}, []);
return groupedObj;
}

View file

@ -19,7 +19,8 @@
"isolatedModules": true,
"noEmit": true,
"jsx": "react-jsx",
"noImplicitAny": false
"noImplicitAny": false,
"baseUrl": "."
},
"include": [
"src"

View file

@ -11,7 +11,7 @@ const apiRoutes = [
];
// Use environment variable to determine the target.
const target = process.env.VITE_PROXY_TARGET || "http://127.0.0.1:7860";
const target = process.env.VITE_PROXY_TARGET || "http://127.0.0.1:7860/api/v1";
const proxyTargets = apiRoutes.reduce((proxyObj, route) => {
proxyObj[route] = {

View file

@ -2,6 +2,7 @@ import json
from pathlib import Path
from typing import AsyncGenerator
from langflow.graph.graph.base import Graph
import pytest
from fastapi.testclient import TestClient
from httpx import AsyncClient
@ -46,7 +47,6 @@ def client():
def get_graph(_type="basic"):
"""Get a graph from a json file"""
from langflow.graph.graph import Graph
if _type == "basic":
path = pytest.BASIC_EXAMPLE_PATH

View file

@ -197,7 +197,7 @@
"y": 136.29836646158452
},
"data": {
"type": "PythonFunction",
"type": "PythonFunctionTool",
"node": {
"template": {
"code": {
@ -210,6 +210,26 @@
"type": "str",
"list": false
},
"description": {
"required": true,
"placeholder": "",
"show": true,
"multiline": true,
"value": "My description",
"name": "description",
"type": "str",
"list": false
},
"name": {
"required": true,
"placeholder": "",
"show": true,
"multiline": true,
"value": "My Tool",
"name": "name",
"type": "str",
"list": false
},
"_type": "python_function"
},
"description": "Python function to be executed.",

View file

@ -5,7 +5,7 @@ from langflow.settings import settings
# check that all agents are in settings.agents
# are in json_response["agents"]
def test_agents_settings(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
@ -13,7 +13,7 @@ def test_agents_settings(client: TestClient):
def test_zero_shot_agent(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
@ -52,7 +52,7 @@ def test_zero_shot_agent(client: TestClient):
def test_json_agent(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
@ -87,7 +87,7 @@ def test_json_agent(client: TestClient):
def test_csv_agent(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
@ -126,7 +126,7 @@ def test_csv_agent(client: TestClient):
def test_initialize_agent(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]

View file

@ -1,10 +1,10 @@
import json
from langflow.graph import Graph
from langflow.processing.process import load_or_build_langchain_object
import pytest
from langflow.interface.run import (
build_graph,
build_langchain_object_with_caching,
load_or_build_langchain_object,
)
@ -62,7 +62,7 @@ def test_build_langchain_object_with_caching(basic_data_graph):
# Test build_graph
def test_build_graph(basic_data_graph):
graph = build_graph(basic_data_graph)
graph = Graph.from_payload(basic_data_graph)
assert graph is not None
assert len(graph.nodes) == len(basic_data_graph["nodes"])
assert len(graph.edges) == len(basic_data_graph["edges"])

View file

@ -3,7 +3,7 @@ from langflow.settings import settings
def test_chains_settings(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -12,7 +12,7 @@ def test_chains_settings(client: TestClient):
# Test the ConversationChain object
def test_conversation_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -94,7 +94,7 @@ def test_conversation_chain(client: TestClient):
def test_llm_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -152,7 +152,7 @@ def test_llm_chain(client: TestClient):
def test_llm_checker_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -228,7 +228,7 @@ def test_llm_checker_chain(client: TestClient):
def test_llm_math_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -306,7 +306,7 @@ def test_llm_math_chain(client: TestClient):
def test_series_character_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -368,7 +368,7 @@ def test_series_character_chain(client: TestClient):
def test_mid_journey_prompt_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -407,7 +407,7 @@ def test_mid_journey_prompt_chain(client: TestClient):
def test_time_travel_guide_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]

View file

@ -1,13 +1,32 @@
# Test this:
from langflow.interface.importing.utils import get_function
import pytest
from langflow.interface.tools.custom import PythonFunction
from langflow.interface.tools.custom import PythonFunctionTool, PythonFunction
from langflow.utils import constants
def test_python_function_tool():
"""Test Python function"""
code = constants.DEFAULT_PYTHON_FUNCTION
func = get_function(code)
func = PythonFunctionTool(name="Test", description="Testing", code=code, func=func)
assert func("text") == "text"
# the tool decorator should raise an error if
# the function is not str -> str
# This raises ValidationError
with pytest.raises(SyntaxError):
code = pytest.CODE_WITH_SYNTAX_ERROR
func = get_function(code)
func = PythonFunctionTool(
name="Test", description="Testing", code=code, func=func
)
def test_python_function():
"""Test Python function"""
func = PythonFunction(code=constants.DEFAULT_PYTHON_FUNCTION)
assert func.get_function()("text") == "text"
assert get_function(func.code)("text") == "text"
# the tool decorator should raise an error if
# the function is not str -> str

View file

@ -4,7 +4,7 @@ from langflow.interface.tools.constants import CUSTOM_TOOLS
def test_get_all(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
# We need to test the custom nodes
@ -21,7 +21,7 @@ import math
def square(x):
return x ** 2
"""
response1 = client.post("/validate/code", json={"code": code1})
response1 = client.post("api/v1/validate/code", json={"code": code1})
assert response1.status_code == 200
assert response1.json() == {"imports": {"errors": []}, "function": {"errors": []}}
@ -32,7 +32,7 @@ import non_existent_module
def square(x):
return x ** 2
"""
response2 = client.post("/validate/code", json={"code": code2})
response2 = client.post("api/v1/validate/code", json={"code": code2})
assert response2.status_code == 200
assert response2.json() == {
"imports": {"errors": ["No module named 'non_existent_module'"]},
@ -46,7 +46,7 @@ import math
def square(x)
return x ** 2
"""
response3 = client.post("/validate/code", json={"code": code3})
response3 = client.post("api/v1/validate/code", json={"code": code3})
assert response3.status_code == 200
assert response3.json() == {
"imports": {"errors": []},
@ -54,11 +54,11 @@ def square(x)
}
# Test case with invalid JSON payload
response4 = client.post("/validate/code", json={"invalid_key": code1})
response4 = client.post("api/v1/validate/code", json={"invalid_key": code1})
assert response4.status_code == 422
# Test case with an empty code string
response5 = client.post("/validate/code", json={"code": ""})
response5 = client.post("api/v1/validate/code", json={"code": ""})
assert response5.status_code == 200
assert response5.json() == {"imports": {"errors": []}, "function": {"errors": []}}
@ -69,7 +69,7 @@ import math
def square(x)
return x ** 2
"""
response6 = client.post("/validate/code", json={"code": code6})
response6 = client.post("api/v1/validate/code", json={"code": code6})
assert response6.status_code == 200
assert response6.json() == {
"imports": {"errors": []},
@ -95,13 +95,13 @@ INVALID_PROMPT = "This is an invalid prompt without any input variable."
def test_valid_prompt(client: TestClient):
response = client.post("/validate/prompt", json={"template": VALID_PROMPT})
response = client.post("api/v1/validate/prompt", json={"template": VALID_PROMPT})
assert response.status_code == 200
assert response.json() == {"input_variables": ["product"]}
def test_invalid_prompt(client: TestClient):
response = client.post("/validate/prompt", json={"template": INVALID_PROMPT})
response = client.post("api/v1/validate/prompt", json={"template": INVALID_PROMPT})
assert response.status_code == 200
assert response.json() == {"input_variables": []}
@ -116,7 +116,7 @@ def test_invalid_prompt(client: TestClient):
],
)
def test_various_prompts(client, prompt, expected_input_variables):
response = client.post("/validate/prompt", json={"template": prompt})
response = client.post("api/v1/validate/prompt", json={"template": prompt})
assert response.status_code == 200
assert response.json() == {
"input_variables": expected_input_variables,

View file

@ -1,20 +1,22 @@
from typing import Type, Union
from langflow.graph.edge.base import Edge
from langflow.graph.vertex.base import Vertex
import pytest
from langchain.chains.base import Chain
from langchain.llms.fake import FakeListLLM
from langflow.graph import Edge, Graph, Node
from langflow.graph.nodes import (
AgentNode,
ChainNode,
FileToolNode,
LLMNode,
PromptNode,
ToolkitNode,
ToolNode,
WrapperNode,
from langflow.graph import Graph
from langflow.graph.vertex.types import (
AgentVertex,
ChainVertex,
FileToolVertex,
LLMVertex,
PromptVertex,
ToolkitVertex,
ToolVertex,
WrapperVertex,
)
from langflow.interface.run import get_result_and_thought
from langflow.processing.process import get_result_and_thought
from langflow.utils.payload import get_root_node
# Test cases for the graph module
@ -23,7 +25,7 @@ from langflow.utils.payload import get_root_node
# BASIC_EXAMPLE_PATH, COMPLEX_EXAMPLE_PATH, OPENAPI_EXAMPLE_PATH
def get_node_by_type(graph, node_type: Type[Node]) -> Union[Node, None]:
def get_node_by_type(graph, node_type: Type[Vertex]) -> Union[Vertex, None]:
"""Get a node by type"""
return next((node for node in graph.nodes if isinstance(node, node_type)), None)
@ -33,7 +35,7 @@ def test_graph_structure(basic_graph):
assert len(basic_graph.nodes) > 0
assert len(basic_graph.edges) > 0
for node in basic_graph.nodes:
assert isinstance(node, Node)
assert isinstance(node, Vertex)
for edge in basic_graph.edges:
assert isinstance(edge, Edge)
assert edge.source in basic_graph.nodes
@ -156,14 +158,16 @@ def test_get_node_neighbors_complex(complex_graph):
tool_neighbors = complex_graph.get_nodes_with_target(tool)
assert tool_neighbors is not None
# Check if there is a PythonFunction in the tool's neighbors
assert any("PythonFunction" in neighbor.data["type"] for neighbor in tool_neighbors)
assert any(
"PythonFunctionTool" in neighbor.data["type"] for neighbor in tool_neighbors
)
def test_get_node(basic_graph):
"""Test getting a single node"""
node_id = basic_graph.nodes[0].id
node = basic_graph.get_node(node_id)
assert isinstance(node, Node)
assert isinstance(node, Vertex)
assert node.id == node_id
@ -172,7 +176,7 @@ def test_build_nodes(basic_graph):
assert len(basic_graph.nodes) == len(basic_graph._nodes)
for node in basic_graph.nodes:
assert isinstance(node, Node)
assert isinstance(node, Vertex)
def test_build_edges(basic_graph):
@ -180,8 +184,8 @@ def test_build_edges(basic_graph):
assert len(basic_graph.edges) == len(basic_graph._edges)
for edge in basic_graph.edges:
assert isinstance(edge, Edge)
assert isinstance(edge.source, Node)
assert isinstance(edge.target, Node)
assert isinstance(edge.source, Vertex)
assert isinstance(edge.target, Vertex)
def test_get_root_node(basic_graph, complex_graph):
@ -189,13 +193,13 @@ def test_get_root_node(basic_graph, complex_graph):
assert isinstance(basic_graph, Graph)
root = get_root_node(basic_graph)
assert root is not None
assert isinstance(root, Node)
assert isinstance(root, Vertex)
assert root.data["type"] == "TimeTravelGuideChain"
# For complex example, the root node is a ZeroShotAgent too
assert isinstance(complex_graph, Graph)
root = get_root_node(complex_graph)
assert root is not None
assert isinstance(root, Node)
assert isinstance(root, Vertex)
assert root.data["type"] == "ZeroShotAgent"
@ -237,11 +241,10 @@ def test_build_params(basic_graph):
assert "memory" in root.params
def test_build(basic_graph, complex_graph, openapi_graph):
def test_build(basic_graph, complex_graph):
"""Test Node's build method"""
assert_agent_was_built(basic_graph)
assert_agent_was_built(complex_graph)
assert_agent_was_built(openapi_graph)
def assert_agent_was_built(graph):
@ -255,14 +258,14 @@ def assert_agent_was_built(graph):
def test_agent_node_build(complex_graph):
agent_node = get_node_by_type(complex_graph, AgentNode)
agent_node = get_node_by_type(complex_graph, AgentVertex)
assert agent_node is not None
built_object = agent_node.build()
assert built_object is not None
def test_tool_node_build(complex_graph):
tool_node = get_node_by_type(complex_graph, ToolNode)
tool_node = get_node_by_type(complex_graph, ToolVertex)
assert tool_node is not None
built_object = tool_node.build()
assert built_object is not None
@ -270,7 +273,7 @@ def test_tool_node_build(complex_graph):
def test_chain_node_build(complex_graph):
chain_node = get_node_by_type(complex_graph, ChainNode)
chain_node = get_node_by_type(complex_graph, ChainVertex)
assert chain_node is not None
built_object = chain_node.build()
assert built_object is not None
@ -278,7 +281,7 @@ def test_chain_node_build(complex_graph):
def test_prompt_node_build(complex_graph):
prompt_node = get_node_by_type(complex_graph, PromptNode)
prompt_node = get_node_by_type(complex_graph, PromptVertex)
assert prompt_node is not None
built_object = prompt_node.build()
assert built_object is not None
@ -286,7 +289,7 @@ def test_prompt_node_build(complex_graph):
def test_llm_node_build(basic_graph):
llm_node = get_node_by_type(basic_graph, LLMNode)
llm_node = get_node_by_type(basic_graph, LLMVertex)
assert llm_node is not None
built_object = llm_node.build()
assert built_object is not None
@ -294,7 +297,7 @@ def test_llm_node_build(basic_graph):
def test_toolkit_node_build(openapi_graph):
toolkit_node = get_node_by_type(openapi_graph, ToolkitNode)
toolkit_node = get_node_by_type(openapi_graph, ToolkitVertex)
assert toolkit_node is not None
built_object = toolkit_node.build()
assert built_object is not None
@ -302,7 +305,7 @@ def test_toolkit_node_build(openapi_graph):
def test_file_tool_node_build(openapi_graph):
file_tool_node = get_node_by_type(openapi_graph, FileToolNode)
file_tool_node = get_node_by_type(openapi_graph, FileToolVertex)
assert file_tool_node is not None
built_object = file_tool_node.build()
assert built_object is not None
@ -310,7 +313,7 @@ def test_file_tool_node_build(openapi_graph):
def test_wrapper_node_build(openapi_graph):
wrapper_node = get_node_by_type(openapi_graph, WrapperNode)
wrapper_node = get_node_by_type(openapi_graph, WrapperVertex)
assert wrapper_node is not None
built_object = wrapper_node.build()
assert built_object is not None
@ -325,7 +328,7 @@ def test_get_result_and_thought(basic_graph):
message = "Hello"
# Find the node that is an LLMNode and change the
# _built_object to a FakeListLLM
llm_node = get_node_by_type(basic_graph, LLMNode)
llm_node = get_node_by_type(basic_graph, LLMVertex)
assert llm_node is not None
llm_node._built_object = FakeListLLM(responses=responses)
llm_node._built = True

View file

@ -3,107 +3,107 @@ from langflow.settings import settings
def test_llms_settings(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
llms = json_response["llms"]
assert set(llms.keys()) == set(settings.llms)
def test_hugging_face_hub(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
language_models = json_response["llms"]
# def test_hugging_face_hub(client: TestClient):
# response = client.get("api/v1/all")
# assert response.status_code == 200
# json_response = response.json()
# language_models = json_response["llms"]
model = language_models["HuggingFaceHub"]
template = model["template"]
# model = language_models["HuggingFaceHub"]
# template = model["template"]
assert template["cache"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "cache",
"type": "bool",
"list": False,
"advanced": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
"advanced": False,
}
assert template["client"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "client",
"type": "Any",
"list": False,
"advanced": False,
}
assert template["repo_id"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": "gpt2",
"password": False,
"name": "repo_id",
"type": "str",
"list": False,
"advanced": False,
}
assert template["task"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"options": ["text-generation", "text2text-generation"],
"name": "task",
"type": "str",
"list": True,
"advanced": True,
}
assert template["model_kwargs"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "model_kwargs",
"type": "code",
"list": False,
"advanced": True,
}
assert template["huggingfacehub_api_token"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": True,
"name": "huggingfacehub_api_token",
"display_name": "HuggingFace Hub API Token",
"type": "str",
"list": False,
"advanced": False,
}
# assert template["cache"] == {
# "required": False,
# "placeholder": "",
# "show": False,
# "multiline": False,
# "password": False,
# "name": "cache",
# "type": "bool",
# "list": False,
# "advanced": False,
# }
# assert template["verbose"] == {
# "required": False,
# "placeholder": "",
# "show": False,
# "multiline": False,
# "value": False,
# "password": False,
# "name": "verbose",
# "type": "bool",
# "list": False,
# "advanced": False,
# }
# assert template["client"] == {
# "required": False,
# "placeholder": "",
# "show": False,
# "multiline": False,
# "password": False,
# "name": "client",
# "type": "Any",
# "list": False,
# "advanced": False,
# }
# assert template["repo_id"] == {
# "required": False,
# "placeholder": "",
# "show": True,
# "multiline": False,
# "value": "gpt2",
# "password": False,
# "name": "repo_id",
# "type": "str",
# "list": False,
# "advanced": False,
# }
# assert template["task"] == {
# "required": True,
# "placeholder": "",
# "show": True,
# "multiline": False,
# "password": False,
# "options": ["text-generation", "text2text-generation"],
# "name": "task",
# "type": "str",
# "list": True,
# "advanced": True,
# }
# assert template["model_kwargs"] == {
# "required": False,
# "placeholder": "",
# "show": True,
# "multiline": False,
# "password": False,
# "name": "model_kwargs",
# "type": "code",
# "list": False,
# "advanced": True,
# }
# assert template["huggingfacehub_api_token"] == {
# "required": False,
# "placeholder": "",
# "show": True,
# "multiline": False,
# "password": True,
# "name": "huggingfacehub_api_token",
# "display_name": "HuggingFace Hub API Token",
# "type": "str",
# "list": False,
# "advanced": False,
# }
def test_openai(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
language_models = json_response["llms"]
@ -333,7 +333,7 @@ def test_openai(client: TestClient):
def test_chat_open_ai(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
language_models = json_response["llms"]
@ -482,3 +482,78 @@ def test_chat_open_ai(client: TestClient):
"ChatOpenAI",
"BaseLanguageModel",
}
# Commenting this out for now, as it requires to activate the nodes
# def test_azure_open_ai(client: TestClient):
# response = client.get("/all")
# assert response.status_code == 200
# json_response = response.json()
# language_models = json_response["llms"]
# model = language_models["AzureOpenAI"]
# template = model["template"]
# assert template["model_name"]["show"] is False
# assert template["deployment_name"] == {
# "required": False,
# "placeholder": "",
# "show": True,
# "multiline": False,
# "value": "",
# "password": False,
# "name": "deployment_name",
# "advanced": False,
# "type": "str",
# "list": False,
# }
# def test_azure_chat_open_ai(client: TestClient):
# response = client.get("/all")
# assert response.status_code == 200
# json_response = response.json()
# language_models = json_response["llms"]
# model = language_models["AzureChatOpenAI"]
# template = model["template"]
# assert template["model_name"]["show"] is False
# assert template["deployment_name"] == {
# "required": False,
# "placeholder": "",
# "show": True,
# "multiline": False,
# "value": "",
# "password": False,
# "name": "deployment_name",
# "advanced": False,
# "type": "str",
# "list": False,
# }
# assert template["openai_api_type"] == {
# "required": False,
# "placeholder": "",
# "show": False,
# "multiline": False,
# "value": "azure",
# "password": False,
# "name": "openai_api_type",
# "display_name": "OpenAI API Type",
# "advanced": False,
# "type": "str",
# "list": False,
# }
# assert template["openai_api_version"] == {
# "required": False,
# "placeholder": "",
# "show": True,
# "multiline": False,
# "value": "2023-03-15-preview",
# "password": False,
# "name": "openai_api_version",
# "display_name": "OpenAI API Version",
# "advanced": False,
# "type": "str",
# "list": False,
# }

View file

@ -2,7 +2,7 @@ import json
import pytest
from langchain.chains.base import Chain
from langflow import load_flow_from_json
from langflow.processing.process import load_flow_from_json
from langflow.graph import Graph
from langflow.utils.payload import get_root_node

View file

@ -3,7 +3,7 @@ from langflow.settings import settings
def test_prompts_settings(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
@ -11,7 +11,7 @@ def test_prompts_settings(client: TestClient):
def test_prompt_template(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
@ -89,7 +89,7 @@ def test_prompt_template(client: TestClient):
def test_few_shot_prompt_template(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
@ -168,7 +168,7 @@ def test_few_shot_prompt_template(client: TestClient):
def test_zero_shot_prompt(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]

View file

@ -5,7 +5,7 @@ from langflow.settings import settings
# check that all agents are in settings.agents
# are in json_response["agents"]
def test_vectorstores_settings(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
vectorstores = json_response["vectorstores"]

View file

@ -5,17 +5,17 @@ from fastapi.testclient import TestClient
def test_websocket_connection(client: TestClient):
with client.websocket_connect("/chat/test_client") as websocket:
with client.websocket_connect("api/v1/chat/test_client") as websocket:
assert websocket.scope["client"] == ["testclient", 50000]
assert websocket.scope["path"] == "/chat/test_client"
assert websocket.scope["path"] == "/api/v1/chat/test_client"
def test_chat_history(client: TestClient):
# Mock the process_graph function to return a specific value
with patch("langflow.api.chat_manager.process_graph") as mock_process_graph:
with patch("langflow.chat.manager.process_graph") as mock_process_graph:
mock_process_graph.return_value = ("Hello, I'm a mock response!", "")
with client.websocket_connect("/chat/test_client") as websocket:
with client.websocket_connect("api/v1/chat/test_client") as websocket:
# First message should be the history
history = websocket.receive_json()
assert history == [] # Empty history