diff --git a/.githooks/pre-commit b/.githooks/pre-commit index f5008c586..ef67eaa37 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -1,2 +1,6 @@ +#!/bin/sh -make format \ No newline at end of file +added_files=$(git diff --name-only --cached --diff-filter=d) + +make format +git add ${added_files} \ No newline at end of file diff --git a/Makefile b/Makefile index 15337f65b..baf4220ff 100644 --- a/Makefile +++ b/Makefile @@ -43,6 +43,7 @@ install_backend: poetry install backend: + make install_backend poetry run uvicorn langflow.main:app --port 7860 --reload --log-level debug build_frontend: diff --git a/poetry.lock b/poetry.lock index fe1014755..43a305b1a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -148,6 +148,27 @@ files = [ {file = "aiostream-0.4.5.tar.gz", hash = "sha256:3ecbf87085230fbcd9605c32ca20c4fb41af02c71d076eab246ea22e35947d88"}, ] +[[package]] +name = "anthropic" +version = "0.2.10" +description = "Library for accessing the anthropic API" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anthropic-0.2.10-py3-none-any.whl", hash = "sha256:a007496207fd186b0bcb9592b00ca130069d2a427f3d6f602a61dbbd1ac6316e"}, + {file = "anthropic-0.2.10.tar.gz", hash = "sha256:e4da061a86d8ffb86072c0b0feaf219a3a4f7dfddd4224df9ba769e469498c19"}, +] + +[package.dependencies] +aiohttp = "*" +httpx = "*" +requests = "*" +tokenizers = "*" + +[package.extras] +dev = ["black (>=22.3.0)", "pytest"] + [[package]] name = "anyio" version = "3.7.0" @@ -1228,14 +1249,14 @@ importlib-resources = {version = ">=5.0", markers = "python_version < \"3.10\""} [[package]] name = "fastapi" -version = "0.95.2" +version = "0.96.0" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "fastapi-0.95.2-py3-none-any.whl", hash = "sha256:d374dbc4ef2ad9b803899bd3360d34c534adc574546e25314ab72c0c4411749f"}, - {file = "fastapi-0.95.2.tar.gz", hash = "sha256:4d9d3e8c71c73f11874bcf5e33626258d143252e329a01002f767306c64fb982"}, + {file = "fastapi-0.96.0-py3-none-any.whl", hash = "sha256:b8e11fe81e81eab4e1504209917338e0b80f783878a42c2b99467e5e1019a1e9"}, + {file = "fastapi-0.96.0.tar.gz", hash = "sha256:71232d47c2787446991c81c41c249f8a16238d52d779c0e6b43927d3773dbe3c"}, ] [package.dependencies] @@ -2356,20 +2377,21 @@ test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] [[package]] name = "langchain" -version = "0.0.186" +version = "0.0.194" description = "Building applications with LLMs through composability" category = "main" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langchain-0.0.186-py3-none-any.whl", hash = "sha256:c53ac8943351602dbde84759d32d57fe2e6599279576935a004688e43ee8ffbf"}, - {file = "langchain-0.0.186.tar.gz", hash = "sha256:36d6d3872727a6f7d6db1b05b13caac35fed19a0d395d2264ed82aae53cfddfd"}, + {file = "langchain-0.0.194-py3-none-any.whl", hash = "sha256:b1d47f96c3556eebb5b330492e64fed1f5585c943be7e1fe675ff31a84b010e3"}, + {file = "langchain-0.0.194.tar.gz", hash = "sha256:480c9cbce12161b3aece3b6fdf03f533c157539ae6243712b61b0d2558f9a96c"}, ] [package.dependencies] aiohttp = ">=3.8.3,<4.0.0" async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} dataclasses-json = ">=0.5.7,<0.6.0" +langchainplus-sdk = ">=0.0.6" numexpr = ">=2.8.4,<3.0.0" numpy = ">=1,<2" openapi-schema-pydantic = ">=1.2,<2.0" @@ -2380,12 +2402,12 @@ SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" [package.extras] -all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.2.6,<0.3.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.3.0,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=2.8.6,<3.0.0)", "elasticsearch (>=8,<9)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.1.dev3,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "momento (>=1.5.0,<2.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.1.2,<2.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"] +all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.2.6,<0.3.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.3.0,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=2.8.6,<3.0.0)", "elasticsearch (>=8,<9)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.1.dev3,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "momento (>=1.5.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.1.2,<2.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.6.1,<0.7.0)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"] azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "openai (>=0,<1)"] cohere = ["cohere (>=3,<4)"] docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "chardet (>=5.1.0,<6.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "psychicapi (>=0.2,<0.3)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "telethon (>=1.28.5,<2.0.0)", "tqdm (>=4.48.0)", "zep-python (>=0.30,<0.31)"] +extended-testing = ["atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "chardet (>=5.1.0,<6.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "psychicapi (>=0.5,<0.6)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "telethon (>=1.28.5,<2.0.0)", "tqdm (>=4.48.0)", "zep-python (>=0.31)"] llms = ["anthropic (>=0.2.6,<0.3.0)", "cohere (>=3,<4)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.4.0)"] qdrant = ["qdrant-client (>=1.1.2,<2.0.0)"] @@ -2418,6 +2440,23 @@ typing-inspect = "0.8.0" [package.extras] test = ["psutil", "pytest", "pytest-asyncio"] +[[package]] +name = "langchainplus-sdk" +version = "0.0.6" +description = "Client library to connect to the LangChainPlus LLM Tracing and Evaluation Platform." +category = "main" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "langchainplus_sdk-0.0.6-py3-none-any.whl", hash = "sha256:43fe01c66442b88403c969b8812f6be81e023c0d2a6d5d3572a8d87961438658"}, + {file = "langchainplus_sdk-0.0.6.tar.gz", hash = "sha256:c911a98fd2d02baa48f742b7d700fd6a55f11c9a545ee5d66b08825940c9a32e"}, +] + +[package.dependencies] +pydantic = ">=1,<2" +requests = ">=2,<3" +tenacity = ">=8.1.0,<9.0.0" + [[package]] name = "linkify-it-py" version = "2.0.2" @@ -6257,4 +6296,4 @@ deploy = ["langchain-serve"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "0b72cff85b2228a6f41d81ac2207cecf1d94c6adb914a3ef4fb19774d757f9f6" +content-hash = "94e4a4ca5ec150ef6f673d4a3ab4964a876ce795ae398b78c76719695c63758e" diff --git a/pyproject.toml b/pyproject.toml index c3e8ca789..436c0684c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,14 +22,14 @@ langflow = "langflow.__main__:main" [tool.poetry.dependencies] python = ">=3.9,<3.12" -fastapi = "^0.95.0" +fastapi = "^0.96.0" uvicorn = "^0.20.0" beautifulsoup4 = "^4.11.2" google-search-results = "^2.4.1" google-api-python-client = "^2.79.0" typer = "^0.7.0" gunicorn = "^20.1.0" -langchain = "^0.0.186" +langchain = "^0.0.194" openai = "^0.27.7" types-pyyaml = "^6.0.12.8" dill = "^0.3.6" @@ -58,6 +58,7 @@ sentence-transformers = "^2.2.2" ctransformers = "^0.2.2" cohere = "^4.6.0" faiss-cpu = "^1.7.4" +anthropic = "^0.2.9" [tool.poetry.group.dev.dependencies] @@ -77,6 +78,15 @@ types-pillow = "^9.5.0.2" [tool.poetry.extras] deploy = ["langchain-serve"] +[tool.pytest.ini_options] +minversion = "6.0" +addopts = "-ra" +testpaths = ["tests", "integration"] +console_output_style = "progress" +filterwarnings = ["ignore::DeprecationWarning"] +log_cli = true + + [tool.ruff] line-length = 120 diff --git a/src/backend/langflow/__init__.py b/src/backend/langflow/__init__.py index 35fe814d2..17b1d940c 100644 --- a/src/backend/langflow/__init__.py +++ b/src/backend/langflow/__init__.py @@ -1,4 +1,4 @@ from langflow.cache import cache_manager -from langflow.interface.loading import load_flow_from_json +from langflow.processing.process import load_flow_from_json __all__ = ["load_flow_from_json", "cache_manager"] diff --git a/src/backend/langflow/api/__init__.py b/src/backend/langflow/api/__init__.py index e69de29bb..f887c47e1 100644 --- a/src/backend/langflow/api/__init__.py +++ b/src/backend/langflow/api/__init__.py @@ -0,0 +1,3 @@ +from langflow.api.router import router + +__all__ = ["router"] diff --git a/src/backend/langflow/api/router.py b/src/backend/langflow/api/router.py new file mode 100644 index 000000000..23b5aa1c5 --- /dev/null +++ b/src/backend/langflow/api/router.py @@ -0,0 +1,8 @@ +# Router for base api +from fastapi import APIRouter +from langflow.api.v1 import chat_router, endpoints_router, validate_router + +router = APIRouter(prefix="/api/v1", tags=["api"]) +router.include_router(chat_router) +router.include_router(endpoints_router) +router.include_router(validate_router) diff --git a/src/backend/langflow/api/v1/__init__.py b/src/backend/langflow/api/v1/__init__.py new file mode 100644 index 000000000..d835b4535 --- /dev/null +++ b/src/backend/langflow/api/v1/__init__.py @@ -0,0 +1,5 @@ +from langflow.api.v1.endpoints import router as endpoints_router +from langflow.api.v1.validate import router as validate_router +from langflow.api.v1.chat import router as chat_router + +__all__ = ["chat_router", "endpoints_router", "validate_router"] diff --git a/src/backend/langflow/api/base.py b/src/backend/langflow/api/v1/base.py similarity index 96% rename from src/backend/langflow/api/base.py rename to src/backend/langflow/api/v1/base.py index 8cddc52e4..6941bedf3 100644 --- a/src/backend/langflow/api/base.py +++ b/src/backend/langflow/api/v1/base.py @@ -1,6 +1,6 @@ from pydantic import BaseModel, validator -from langflow.graph.utils import extract_input_variables_from_prompt +from langflow.interface.utils import extract_input_variables_from_prompt class CacheResponse(BaseModel): diff --git a/src/backend/langflow/api/callback.py b/src/backend/langflow/api/v1/callback.py similarity index 95% rename from src/backend/langflow/api/callback.py rename to src/backend/langflow/api/v1/callback.py index d63e107c4..b58393d7b 100644 --- a/src/backend/langflow/api/callback.py +++ b/src/backend/langflow/api/v1/callback.py @@ -3,7 +3,7 @@ from typing import Any from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler -from langflow.api.schemas import ChatResponse +from langflow.api.v1.schemas import ChatResponse # https://github.com/hwchase17/chat-langchain/blob/master/callback.py diff --git a/src/backend/langflow/api/chat.py b/src/backend/langflow/api/v1/chat.py similarity index 93% rename from src/backend/langflow/api/chat.py rename to src/backend/langflow/api/v1/chat.py index 4afa6c22f..7df4c65ed 100644 --- a/src/backend/langflow/api/chat.py +++ b/src/backend/langflow/api/v1/chat.py @@ -6,7 +6,7 @@ from fastapi import ( status, ) -from langflow.api.chat_manager import ChatManager +from langflow.chat.manager import ChatManager from langflow.utils.logger import logger router = APIRouter() diff --git a/src/backend/langflow/api/endpoints.py b/src/backend/langflow/api/v1/endpoints.py similarity index 87% rename from src/backend/langflow/api/endpoints.py rename to src/backend/langflow/api/v1/endpoints.py index 021a81ca8..1e9b0deb1 100644 --- a/src/backend/langflow/api/endpoints.py +++ b/src/backend/langflow/api/v1/endpoints.py @@ -3,13 +3,13 @@ from importlib.metadata import version from fastapi import APIRouter, HTTPException -from langflow.api.schemas import ( +from langflow.api.v1.schemas import ( ExportedFlow, GraphData, PredictRequest, PredictResponse, ) -from langflow.interface.run import process_graph_cached + from langflow.interface.types import build_langchain_types_dict # build router @@ -25,6 +25,8 @@ def get_all(): @router.post("/predict", response_model=PredictResponse) async def get_load(predict_request: PredictRequest): try: + from langflow.processing.process import process_graph_cached + exported_flow: ExportedFlow = predict_request.exported_flow graph_data: GraphData = exported_flow.data data = graph_data.dict() @@ -40,8 +42,3 @@ async def get_load(predict_request: PredictRequest): @router.get("/version") def get_version(): return {"version": version("langflow")} - - -@router.get("/health") -def get_health(): - return {"status": "OK"} diff --git a/src/backend/langflow/api/schemas.py b/src/backend/langflow/api/v1/schemas.py similarity index 100% rename from src/backend/langflow/api/schemas.py rename to src/backend/langflow/api/v1/schemas.py diff --git a/src/backend/langflow/api/validate.py b/src/backend/langflow/api/v1/validate.py similarity index 87% rename from src/backend/langflow/api/validate.py rename to src/backend/langflow/api/v1/validate.py index 0e2a7752c..009cb9a30 100644 --- a/src/backend/langflow/api/validate.py +++ b/src/backend/langflow/api/v1/validate.py @@ -2,15 +2,15 @@ import json from fastapi import APIRouter, HTTPException -from langflow.api.base import ( +from langflow.api.v1.base import ( Code, CodeValidationResponse, Prompt, PromptValidationResponse, validate_prompt, ) -from langflow.graph.nodes import VectorStoreNode -from langflow.interface.run import build_graph +from langflow.graph.vertex.types import VectorStoreVertex +from langflow.graph import Graph from langflow.utils.logger import logger from langflow.utils.validate import validate_code @@ -44,12 +44,12 @@ def post_validate_prompt(prompt: Prompt): def post_validate_node(node_id: str, data: dict): try: # build graph - graph = build_graph(data) + graph = Graph.from_payload(data) # validate node node = graph.get_node(node_id) if node is None: raise ValueError(f"Node {node_id} not found") - if not isinstance(node, VectorStoreNode): + if not isinstance(node, VectorStoreVertex): node.build() return json.dumps({"valid": True, "params": str(node._built_object_repr())}) except Exception as e: diff --git a/src/backend/langflow/chat/__init__.py b/src/backend/langflow/chat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/backend/langflow/api/chat_manager.py b/src/backend/langflow/chat/manager.py similarity index 85% rename from src/backend/langflow/api/chat_manager.py rename to src/backend/langflow/chat/manager.py index 8b1c7a621..d24057b68 100644 --- a/src/backend/langflow/api/chat_manager.py +++ b/src/backend/langflow/chat/manager.py @@ -1,21 +1,18 @@ -import asyncio -import json from collections import defaultdict -from typing import Dict, List - from fastapi import WebSocket, status - -from langflow.api.schemas import ChatMessage, ChatResponse, FileResponse +from langflow.api.v1.schemas import ChatMessage, ChatResponse, FileResponse from langflow.cache import cache_manager from langflow.cache.manager import Subject -from langflow.interface.run import ( - get_result_and_steps, - load_or_build_langchain_object, -) -from langflow.interface.utils import pil_to_base64, try_setting_streaming_options +from langflow.chat.utils import process_graph +from langflow.interface.utils import pil_to_base64 from langflow.utils.logger import logger +import asyncio +import json +from typing import Dict, List + + class ChatHistory(Subject): def __init__(self): super().__init__() @@ -191,33 +188,3 @@ class ChatManager: except Exception as e: logger.exception(e) self.disconnect(client_id) - - -async def process_graph( - graph_data: Dict, - is_first_message: bool, - chat_message: ChatMessage, - websocket: WebSocket, -): - langchain_object = load_or_build_langchain_object(graph_data, is_first_message) - langchain_object = try_setting_streaming_options(langchain_object, websocket) - logger.debug("Loaded langchain object") - - if langchain_object is None: - # Raise user facing error - raise ValueError( - "There was an error loading the langchain_object. Please, check all the nodes and try again." - ) - - # Generate result and thought - try: - logger.debug("Generating result and thought") - result, intermediate_steps = await get_result_and_steps( - langchain_object, chat_message.message or "", websocket=websocket - ) - logger.debug("Generated result and intermediate_steps") - return result, intermediate_steps - except Exception as e: - # Log stack trace - logger.exception(e) - raise e diff --git a/src/backend/langflow/chat/utils.py b/src/backend/langflow/chat/utils.py new file mode 100644 index 000000000..410a442be --- /dev/null +++ b/src/backend/langflow/chat/utils.py @@ -0,0 +1,41 @@ +from fastapi import WebSocket +from langflow.api.v1.schemas import ChatMessage +from langflow.processing.process import ( + load_or_build_langchain_object, +) +from langflow.processing.base import get_result_and_steps +from langflow.interface.utils import try_setting_streaming_options +from langflow.utils.logger import logger + + +from typing import Dict + + +async def process_graph( + graph_data: Dict, + is_first_message: bool, + chat_message: ChatMessage, + websocket: WebSocket, +): + langchain_object = load_or_build_langchain_object(graph_data, is_first_message) + langchain_object = try_setting_streaming_options(langchain_object, websocket) + logger.debug("Loaded langchain object") + + if langchain_object is None: + # Raise user facing error + raise ValueError( + "There was an error loading the langchain_object. Please, check all the nodes and try again." + ) + + # Generate result and thought + try: + logger.debug("Generating result and thought") + result, intermediate_steps = await get_result_and_steps( + langchain_object, chat_message.message or "", websocket=websocket + ) + logger.debug("Generated result and intermediate_steps") + return result, intermediate_steps + except Exception as e: + # Log stack trace + logger.exception(e) + raise e diff --git a/src/backend/langflow/config.yaml b/src/backend/langflow/config.yaml index c83e18646..5e02e2984 100644 --- a/src/backend/langflow/config.yaml +++ b/src/backend/langflow/config.yaml @@ -51,10 +51,13 @@ embeddings: llms: - OpenAI # - AzureOpenAI + # - AzureChatOpenAI - ChatOpenAI - LlamaCpp - CTransformers - Cohere + - Anthropic + - ChatAnthropic - HuggingFaceHub memories: - ConversationBufferMemory @@ -74,12 +77,14 @@ toolkits: - JsonToolkit - VectorStoreInfo - VectorStoreRouterToolkit + - VectorStoreToolkit tools: - Search - PAL-MATH - Calculator - Serper Search - Tool + - PythonFunctionTool - PythonFunction - JsonSpec - News API diff --git a/src/backend/langflow/custom/customs.py b/src/backend/langflow/custom/customs.py index ee266b0ee..92e1fc2d8 100644 --- a/src/backend/langflow/custom/customs.py +++ b/src/backend/langflow/custom/customs.py @@ -4,6 +4,7 @@ from langflow.template import frontend_node CUSTOM_NODES = { "prompts": {"ZeroShotPrompt": frontend_node.prompts.ZeroShotPromptNode()}, "tools": { + "PythonFunctionTool": frontend_node.tools.PythonFunctionToolNode(), "PythonFunction": frontend_node.tools.PythonFunctionNode(), "Tool": frontend_node.tools.ToolNode(), }, diff --git a/src/backend/langflow/graph/__init__.py b/src/backend/langflow/graph/__init__.py index 097b7a695..a68e844ee 100644 --- a/src/backend/langflow/graph/__init__.py +++ b/src/backend/langflow/graph/__init__.py @@ -1,4 +1,35 @@ -from langflow.graph.base import Edge, Node -from langflow.graph.graph import Graph +from langflow.graph.edge.base import Edge +from langflow.graph.graph.base import Graph +from langflow.graph.vertex.base import Vertex +from langflow.graph.vertex.types import ( + AgentVertex, + ChainVertex, + DocumentLoaderVertex, + EmbeddingVertex, + LLMVertex, + MemoryVertex, + PromptVertex, + TextSplitterVertex, + ToolVertex, + ToolkitVertex, + VectorStoreVertex, + WrapperVertex, +) -__all__ = ["Graph", "Node", "Edge"] +__all__ = [ + "Graph", + "Vertex", + "Edge", + "AgentVertex", + "ChainVertex", + "DocumentLoaderVertex", + "EmbeddingVertex", + "LLMVertex", + "MemoryVertex", + "PromptVertex", + "TextSplitterVertex", + "ToolVertex", + "ToolkitVertex", + "VectorStoreVertex", + "WrapperVertex", +] diff --git a/src/backend/langflow/graph/edge/__init__.py b/src/backend/langflow/graph/edge/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/backend/langflow/graph/edge/base.py b/src/backend/langflow/graph/edge/base.py new file mode 100644 index 000000000..08f084a5c --- /dev/null +++ b/src/backend/langflow/graph/edge/base.py @@ -0,0 +1,52 @@ +from langflow.utils.logger import logger +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from langflow.graph.vertex.base import Vertex + + +class Edge: + def __init__(self, source: "Vertex", target: "Vertex"): + self.source: "Vertex" = source + self.target: "Vertex" = target + self.validate_edge() + + def validate_edge(self) -> None: + # Validate that the outputs of the source node are valid inputs + # for the target node + self.source_types = self.source.output + self.target_reqs = self.target.required_inputs + self.target.optional_inputs + # Both lists contain strings and sometimes a string contains the value we are + # looking for e.g. comgin_out=["Chain"] and target_reqs=["LLMChain"] + # so we need to check if any of the strings in source_types is in target_reqs + self.valid = any( + output in target_req + for output in self.source_types + for target_req in self.target_reqs + ) + # Get what type of input the target node is expecting + + self.matched_type = next( + ( + output + for output in self.source_types + for target_req in self.target_reqs + if output in target_req + ), + None, + ) + no_matched_type = self.matched_type is None + if no_matched_type: + logger.debug(self.source_types) + logger.debug(self.target_reqs) + if no_matched_type: + raise ValueError( + f"Edge between {self.source.vertex_type} and {self.target.vertex_type} " + f"has no matched type" + ) + + def __repr__(self) -> str: + return ( + f"Edge(source={self.source.id}, target={self.target.id}, valid={self.valid}" + f", matched_type={self.matched_type})" + ) diff --git a/src/backend/langflow/graph/graph/__init__.py b/src/backend/langflow/graph/graph/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/backend/langflow/graph/graph.py b/src/backend/langflow/graph/graph/base.py similarity index 50% rename from src/backend/langflow/graph/graph.py rename to src/backend/langflow/graph/graph/base.py index b289d5c31..5fd00d09b 100644 --- a/src/backend/langflow/graph/graph.py +++ b/src/backend/langflow/graph/graph/base.py @@ -1,38 +1,20 @@ from typing import Dict, List, Type, Union -from langflow.graph.base import Edge, Node -from langflow.graph.nodes import ( - AgentNode, - ChainNode, - DocumentLoaderNode, - EmbeddingNode, - FileToolNode, - LLMNode, - MemoryNode, - PromptNode, - TextSplitterNode, - ToolkitNode, - ToolNode, - VectorStoreNode, - WrapperNode, +from langflow.graph.edge.base import Edge +from langflow.graph.graph.constants import VERTEX_TYPE_MAP +from langflow.graph.vertex.base import Vertex +from langflow.graph.vertex.types import ( + FileToolVertex, + LLMVertex, + ToolkitVertex, ) -from langflow.interface.agents.base import agent_creator -from langflow.interface.chains.base import chain_creator -from langflow.interface.document_loaders.base import documentloader_creator -from langflow.interface.embeddings.base import embedding_creator -from langflow.interface.llms.base import llm_creator -from langflow.interface.memories.base import memory_creator -from langflow.interface.prompts.base import prompt_creator -from langflow.interface.text_splitters.base import textsplitter_creator -from langflow.interface.toolkits.base import toolkits_creator -from langflow.interface.tools.base import tool_creator from langflow.interface.tools.constants import FILE_TOOLS -from langflow.interface.vector_store.base import vectorstore_creator -from langflow.interface.wrappers.base import wrapper_creator from langflow.utils import payload class Graph: + """A class representing a graph of nodes and edges.""" + def __init__( self, nodes: List[Dict[str, Union[str, Dict[str, Union[str, List[str]]]]]], @@ -42,8 +24,30 @@ class Graph: self._edges = edges self._build_graph() + @classmethod + @classmethod + def from_payload(cls, payload: Dict) -> "Graph": + """ + Creates a graph from a payload. + + Args: + payload (Dict): The payload to create the graph from. + + Returns: + Graph: The created graph. + """ + if "data" in payload: + payload = payload["data"] + try: + nodes = payload["nodes"] + edges = payload["edges"] + return cls(nodes, edges) + except KeyError as exc: + raise ValueError("Invalid payload") from exc + def _build_graph(self) -> None: - self.nodes = self._build_nodes() + """Builds the graph from the nodes and edges.""" + self.nodes = self._build_vertices() self.edges = self._build_edges() for edge in self.edges: edge.source.add_edge(edge) @@ -51,17 +55,25 @@ class Graph: # This is a hack to make sure that the LLM node is sent to # the toolkit node + self._build_node_params() + # remove invalid nodes + self._remove_invalid_nodes() + + def _build_node_params(self) -> None: + """Identifies and handles the LLM node within the graph.""" llm_node = None for node in self.nodes: node._build_params() - - if isinstance(node, LLMNode): + if isinstance(node, LLMVertex): llm_node = node - for node in self.nodes: - if isinstance(node, ToolkitNode): - node.params["llm"] = llm_node - # remove invalid nodes + if llm_node: + for node in self.nodes: + if isinstance(node, ToolkitVertex): + node.params["llm"] = llm_node + + def _remove_invalid_nodes(self) -> None: + """Removes invalid nodes from the graph.""" self.nodes = [ node for node in self.nodes @@ -69,28 +81,33 @@ class Graph: or (len(self.nodes) == 1 and len(self.edges) == 0) ] - def _validate_node(self, node: Node) -> bool: + def _validate_node(self, node: Vertex) -> bool: + """Validates a node.""" # All nodes that do not have edges are invalid return len(node.edges) > 0 - def get_node(self, node_id: str) -> Union[None, Node]: + def get_node(self, node_id: str) -> Union[None, Vertex]: + """Returns a node by id.""" return next((node for node in self.nodes if node.id == node_id), None) - def get_nodes_with_target(self, node: Node) -> List[Node]: - connected_nodes: List[Node] = [ + def get_nodes_with_target(self, node: Vertex) -> List[Vertex]: + """Returns the nodes connected to a node.""" + connected_nodes: List[Vertex] = [ edge.source for edge in self.edges if edge.target == node ] return connected_nodes - def build(self) -> List[Node]: + def build(self) -> List[Vertex]: + """Builds the graph.""" # Get root node root_node = payload.get_root_node(self) if root_node is None: raise ValueError("No root node found") return root_node.build() - def get_node_neighbors(self, node: Node) -> Dict[Node, int]: - neighbors: Dict[Node, int] = {} + def get_node_neighbors(self, node: Vertex) -> Dict[Vertex, int]: + """Returns the neighbors of a node.""" + neighbors: Dict[Vertex, int] = {} for edge in self.edges: if edge.source == node: neighbor = edge.target @@ -105,6 +122,7 @@ class Graph: return neighbors def _build_edges(self) -> List[Edge]: + """Builds the edges of the graph.""" # Edge takes two nodes as arguments, so we need to build the nodes first # and then build the edges # if we can't find a node, we raise an error @@ -120,43 +138,31 @@ class Graph: edges.append(Edge(source, target)) return edges - def _get_node_class(self, node_type: str, node_lc_type: str) -> Type[Node]: - node_type_map: Dict[str, Type[Node]] = { - **{t: PromptNode for t in prompt_creator.to_list()}, - **{t: AgentNode for t in agent_creator.to_list()}, - **{t: ChainNode for t in chain_creator.to_list()}, - **{t: ToolNode for t in tool_creator.to_list()}, - **{t: ToolkitNode for t in toolkits_creator.to_list()}, - **{t: WrapperNode for t in wrapper_creator.to_list()}, - **{t: LLMNode for t in llm_creator.to_list()}, - **{t: MemoryNode for t in memory_creator.to_list()}, - **{t: EmbeddingNode for t in embedding_creator.to_list()}, - **{t: VectorStoreNode for t in vectorstore_creator.to_list()}, - **{t: DocumentLoaderNode for t in documentloader_creator.to_list()}, - **{t: TextSplitterNode for t in textsplitter_creator.to_list()}, - } - + def _get_vertex_class(self, node_type: str, node_lc_type: str) -> Type[Vertex]: + """Returns the node class based on the node type.""" if node_type in FILE_TOOLS: - return FileToolNode - if node_type in node_type_map: - return node_type_map[node_type] - if node_lc_type in node_type_map: - return node_type_map[node_lc_type] - return Node + return FileToolVertex + if node_type in VERTEX_TYPE_MAP: + return VERTEX_TYPE_MAP[node_type] + return ( + VERTEX_TYPE_MAP[node_lc_type] if node_lc_type in VERTEX_TYPE_MAP else Vertex + ) - def _build_nodes(self) -> List[Node]: - nodes: List[Node] = [] + def _build_vertices(self) -> List[Vertex]: + """Builds the vertices of the graph.""" + nodes: List[Vertex] = [] for node in self._nodes: node_data = node["data"] node_type: str = node_data["type"] # type: ignore node_lc_type: str = node_data["node"]["template"]["_type"] # type: ignore - NodeClass = self._get_node_class(node_type, node_lc_type) - nodes.append(NodeClass(node)) + VertexClass = self._get_vertex_class(node_type, node_lc_type) + nodes.append(VertexClass(node)) return nodes - def get_children_by_node_type(self, node: Node, node_type: str) -> List[Node]: + def get_children_by_node_type(self, node: Vertex, node_type: str) -> List[Vertex]: + """Returns the children of a node based on the node type.""" children = [] node_types = [node.data["type"]] if "node" in node.data: diff --git a/src/backend/langflow/graph/graph/constants.py b/src/backend/langflow/graph/graph/constants.py new file mode 100644 index 000000000..ff1317d39 --- /dev/null +++ b/src/backend/langflow/graph/graph/constants.py @@ -0,0 +1,49 @@ +from langflow.graph.vertex.base import Vertex +from langflow.graph.vertex.types import ( + AgentVertex, + ChainVertex, + DocumentLoaderVertex, + EmbeddingVertex, + LLMVertex, + MemoryVertex, + PromptVertex, + TextSplitterVertex, + ToolVertex, + ToolkitVertex, + VectorStoreVertex, + WrapperVertex, +) +from langflow.interface.agents.base import agent_creator +from langflow.interface.chains.base import chain_creator +from langflow.interface.document_loaders.base import documentloader_creator +from langflow.interface.embeddings.base import embedding_creator +from langflow.interface.llms.base import llm_creator +from langflow.interface.memories.base import memory_creator +from langflow.interface.prompts.base import prompt_creator +from langflow.interface.text_splitters.base import textsplitter_creator +from langflow.interface.toolkits.base import toolkits_creator +from langflow.interface.tools.base import tool_creator +from langflow.interface.vector_store.base import vectorstore_creator +from langflow.interface.wrappers.base import wrapper_creator + + +from typing import Dict, Type + + +DIRECT_TYPES = ["str", "bool", "code", "int", "float", "Any", "prompt"] + + +VERTEX_TYPE_MAP: Dict[str, Type[Vertex]] = { + **{t: PromptVertex for t in prompt_creator.to_list()}, + **{t: AgentVertex for t in agent_creator.to_list()}, + **{t: ChainVertex for t in chain_creator.to_list()}, + **{t: ToolVertex for t in tool_creator.to_list()}, + **{t: ToolkitVertex for t in toolkits_creator.to_list()}, + **{t: WrapperVertex for t in wrapper_creator.to_list()}, + **{t: LLMVertex for t in llm_creator.to_list()}, + **{t: MemoryVertex for t in memory_creator.to_list()}, + **{t: EmbeddingVertex for t in embedding_creator.to_list()}, + **{t: VectorStoreVertex for t in vectorstore_creator.to_list()}, + **{t: DocumentLoaderVertex for t in documentloader_creator.to_list()}, + **{t: TextSplitterVertex for t in textsplitter_creator.to_list()}, +} diff --git a/src/backend/langflow/graph/graph/utils.py b/src/backend/langflow/graph/graph/utils.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/backend/langflow/graph/utils.py b/src/backend/langflow/graph/utils.py index 6d56e933e..b78b2f961 100644 --- a/src/backend/langflow/graph/utils.py +++ b/src/backend/langflow/graph/utils.py @@ -1,4 +1,6 @@ -import re +from typing import Any, Union + +from langflow.interface.utils import extract_input_variables_from_prompt def validate_prompt(prompt: str): @@ -14,6 +16,12 @@ def fix_prompt(prompt: str): return prompt + " {input}" -def extract_input_variables_from_prompt(prompt: str) -> list[str]: - """Extract input variables from prompt.""" - return re.findall(r"{(.*?)}", prompt) +def flatten_list(list_of_lists: list[Union[list, Any]]) -> list: + """Flatten list of lists.""" + new_list = [] + for item in list_of_lists: + if isinstance(item, list): + new_list.extend(item) + else: + new_list.append(item) + return new_list diff --git a/src/backend/langflow/graph/vertex/__init__.py b/src/backend/langflow/graph/vertex/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/backend/langflow/graph/base.py b/src/backend/langflow/graph/vertex/base.py similarity index 76% rename from src/backend/langflow/graph/base.py rename to src/backend/langflow/graph/vertex/base.py index cc5e2902b..04dadab85 100644 --- a/src/backend/langflow/graph/base.py +++ b/src/backend/langflow/graph/vertex/base.py @@ -1,27 +1,27 @@ -# Description: Graph class for building a graph of nodes and edges -# Insights: -# - Defer prompts building to the last moment or when they have all the tools -# - Build each inner agent first, then build the outer agent - -import contextlib -import inspect -import types -import warnings -from typing import Any, Dict, List, Optional - from langflow.cache import base as cache_utils -from langflow.graph.constants import DIRECT_TYPES +from langflow.graph.vertex.constants import DIRECT_TYPES from langflow.interface import loading from langflow.interface.listing import ALL_TYPES_DICT from langflow.utils.logger import logger from langflow.utils.util import sync_to_async -class Node: +import contextlib +import inspect +import types +import warnings +from typing import Any, Dict, List, Optional +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from langflow.graph.edge.base import Edge + + +class Vertex: def __init__(self, data: Dict, base_type: Optional[str] = None) -> None: self.id: str = data["id"] self._data = data - self.edges: List[Edge] = [] + self.edges: List["Edge"] = [] self.base_type: Optional[str] = base_type self._parse_data() self._built_object = None @@ -48,12 +48,12 @@ class Node: ] template_dict = self.data["node"]["template"] - self.node_type = ( + self.vertex_type = ( self.data["type"] if "Tool" not in self.output else template_dict["_type"] ) if self.base_type is None: for base_type, value in ALL_TYPES_DICT.items(): - if self.node_type in value: + if self.vertex_type in value: self.base_type = base_type break @@ -113,7 +113,7 @@ class Node: if value["required"] and not edges: # If a required parameter is not found, raise an error raise ValueError( - f"Required input {key} for module {self.node_type} not found" + f"Required input {key} for module {self.vertex_type} not found" ) elif value["list"]: # If this is a list parameter, append all sources to a list @@ -128,7 +128,7 @@ class Node: # so we need to check if value has value new_value = value.get("value") if new_value is None: - warnings.warn(f"Value for {key} in {self.node_type} is None. ") + warnings.warn(f"Value for {key} in {self.vertex_type} is None. ") if value.get("type") == "int": with contextlib.suppress(TypeError, ValueError): new_value = int(new_value) # type: ignore @@ -148,12 +148,12 @@ class Node: # and continue # Another aspect is that the node_type is the class that we need to import # and instantiate with these built params - logger.debug(f"Building {self.node_type}") + logger.debug(f"Building {self.vertex_type}") # Build each node in the params dict for key, value in self.params.copy().items(): # Check if Node or list of Nodes and not self # to avoid recursion - if isinstance(value, Node): + if isinstance(value, Vertex): if value == self: del self.params[key] continue @@ -174,10 +174,16 @@ class Node: # turn result which is a function into a coroutine # so that it can be awaited self.params["coroutine"] = sync_to_async(result) + if isinstance(result, list): + # If the result is a list, then we need to extend the list + # with the result but first check if the key exists + # if it doesn't, then we need to create a new list + if isinstance(self.params[key], list): + self.params[key].extend(result) self.params[key] = result elif isinstance(value, list) and all( - isinstance(node, Node) for node in value + isinstance(node, Vertex) for node in value ): self.params[key] = [] for node in value: @@ -193,17 +199,17 @@ class Node: try: self._built_object = loading.instantiate_class( - node_type=self.node_type, + node_type=self.vertex_type, base_type=self.base_type, params=self.params, ) except Exception as exc: raise ValueError( - f"Error building node {self.node_type}: {str(exc)}" + f"Error building node {self.vertex_type}: {str(exc)}" ) from exc if self._built_object is None: - raise ValueError(f"Node type {self.node_type} not found") + raise ValueError(f"Node type {self.vertex_type} not found") self._built = True @@ -220,57 +226,10 @@ class Node: return f"Node(id={self.id}, data={self.data})" def __eq__(self, __o: object) -> bool: - return self.id == __o.id if isinstance(__o, Node) else False + return self.id == __o.id if isinstance(__o, Vertex) else False def __hash__(self) -> int: return id(self) def _built_object_repr(self): return repr(self._built_object) - - -class Edge: - def __init__(self, source: "Node", target: "Node"): - self.source: "Node" = source - self.target: "Node" = target - self.validate_edge() - - def validate_edge(self) -> None: - # Validate that the outputs of the source node are valid inputs - # for the target node - self.source_types = self.source.output - self.target_reqs = self.target.required_inputs + self.target.optional_inputs - # Both lists contain strings and sometimes a string contains the value we are - # looking for e.g. comgin_out=["Chain"] and target_reqs=["LLMChain"] - # so we need to check if any of the strings in source_types is in target_reqs - self.valid = any( - output in target_req - for output in self.source_types - for target_req in self.target_reqs - ) - # Get what type of input the target node is expecting - - self.matched_type = next( - ( - output - for output in self.source_types - for target_req in self.target_reqs - if output in target_req - ), - None, - ) - no_matched_type = self.matched_type is None - if no_matched_type: - logger.debug(self.source_types) - logger.debug(self.target_reqs) - if no_matched_type: - raise ValueError( - f"Edge between {self.source.node_type} and {self.target.node_type} " - f"has no matched type" - ) - - def __repr__(self) -> str: - return ( - f"Edge(source={self.source.id}, target={self.target.id}, valid={self.valid}" - f", matched_type={self.matched_type})" - ) diff --git a/src/backend/langflow/graph/constants.py b/src/backend/langflow/graph/vertex/constants.py similarity index 100% rename from src/backend/langflow/graph/constants.py rename to src/backend/langflow/graph/vertex/constants.py diff --git a/src/backend/langflow/graph/nodes.py b/src/backend/langflow/graph/vertex/types.py similarity index 72% rename from src/backend/langflow/graph/nodes.py rename to src/backend/langflow/graph/vertex/types.py index 189e40b5c..4eb20f416 100644 --- a/src/backend/langflow/graph/nodes.py +++ b/src/backend/langflow/graph/vertex/types.py @@ -1,22 +1,23 @@ from typing import Any, Dict, List, Optional, Union -from langflow.graph.base import Node -from langflow.graph.utils import extract_input_variables_from_prompt +from langflow.graph.vertex.base import Vertex +from langflow.graph.utils import flatten_list +from langflow.interface.utils import extract_input_variables_from_prompt -class AgentNode(Node): +class AgentVertex(Vertex): def __init__(self, data: Dict): super().__init__(data, base_type="agents") - self.tools: List[ToolNode] = [] - self.chains: List[ChainNode] = [] + self.tools: List[Union[ToolkitVertex, ToolVertex]] = [] + self.chains: List[ChainVertex] = [] def _set_tools_and_chains(self) -> None: for edge in self.edges: source_node = edge.source - if isinstance(source_node, ToolNode): + if isinstance(source_node, (ToolVertex, ToolkitVertex)): self.tools.append(source_node) - elif isinstance(source_node, ChainNode): + elif isinstance(source_node, ChainVertex): self.chains.append(source_node) def build(self, force: bool = False) -> Any: @@ -32,25 +33,130 @@ class AgentNode(Node): self._build() - #! Cannot deepcopy VectorStore, VectorStoreRouter, or SQL agents - if self.node_type in ["VectorStoreAgent", "VectorStoreRouterAgent", "SQLAgent"]: - return self._built_object return self._built_object -class ToolNode(Node): +class ToolVertex(Vertex): def __init__(self, data: Dict): super().__init__(data, base_type="tools") -class PromptNode(Node): +class LLMVertex(Vertex): + built_node_type = None + class_built_object = None + + def __init__(self, data: Dict): + super().__init__(data, base_type="llms") + + def build(self, force: bool = False) -> Any: + # LLM is different because some models might take up too much memory + # or time to load. So we only load them when we need them.ß + if self.vertex_type == self.built_node_type: + return self.class_built_object + if not self._built or force: + self._build() + self.built_node_type = self.vertex_type + self.class_built_object = self._built_object + # Avoid deepcopying the LLM + # that are loaded from a file + return self._built_object + + +class ToolkitVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="toolkits") + + +class FileToolVertex(ToolVertex): + def __init__(self, data: Dict): + super().__init__(data) + + +class WrapperVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="wrappers") + + def build(self, force: bool = False) -> Any: + if not self._built or force: + if "headers" in self.params: + self.params["headers"] = eval(self.params["headers"]) + self._build() + return self._built_object + + +class DocumentLoaderVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="documentloaders") + + def _built_object_repr(self): + # This built_object is a list of documents. Maybe we should + # show how many documents are in the list? + if self._built_object: + return f"""{self.vertex_type}({len(self._built_object)} documents) + Documents: {self._built_object[:3]}...""" + return f"{self.vertex_type}()" + + +class EmbeddingVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="embeddings") + + +class VectorStoreVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="vectorstores") + + def _built_object_repr(self): + return "Vector stores can take time to build. It will build on the first query." + + +class MemoryVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="memory") + + +class TextSplitterVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="textsplitters") + + def _built_object_repr(self): + # This built_object is a list of documents. Maybe we should + # show how many documents are in the list? + if self._built_object: + return f"""{self.vertex_type}({len(self._built_object)} documents) + \nDocuments: {self._built_object[:3]}...""" + return f"{self.vertex_type}()" + + +class ChainVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="chains") + + def build( + self, + force: bool = False, + tools: Optional[List[Union[ToolkitVertex, ToolVertex]]] = None, + ) -> Any: + if not self._built or force: + # Check if the chain requires a PromptVertex + for key, value in self.params.items(): + if isinstance(value, PromptVertex): + # Build the PromptVertex, passing the tools if available + self.params[key] = value.build(tools=tools, force=force) + + self._build() + + return self._built_object + + +class PromptVertex(Vertex): def __init__(self, data: Dict): super().__init__(data, base_type="prompts") def build( self, force: bool = False, - tools: Optional[Union[List[Node], List[ToolNode]]] = None, + tools: Optional[List[Union[ToolkitVertex, ToolVertex]]] = None, ) -> Any: if not self._built or force: if ( @@ -59,12 +165,16 @@ class PromptNode(Node): ): self.params["input_variables"] = [] # Check if it is a ZeroShotPrompt and needs a tool - if "ShotPrompt" in self.node_type: + if "ShotPrompt" in self.vertex_type: tools = ( [tool_node.build() for tool_node in tools] if tools is not None else [] ) + # flatten the list of tools if it is a list of lists + # first check if it is a list + if tools and isinstance(tools, list) and isinstance(tools[0], list): + tools = flatten_list(tools) self.params["tools"] = tools prompt_params = [ key @@ -81,113 +191,3 @@ class PromptNode(Node): self._build() return self._built_object - - -class ChainNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="chains") - - def build( - self, - force: bool = False, - tools: Optional[Union[List[Node], List[ToolNode]]] = None, - ) -> Any: - if not self._built or force: - # Check if the chain requires a PromptNode - for key, value in self.params.items(): - if isinstance(value, PromptNode): - # Build the PromptNode, passing the tools if available - self.params[key] = value.build(tools=tools, force=force) - - self._build() - - #! Cannot deepcopy SQLDatabaseChain - if self.node_type in ["SQLDatabaseChain"]: - return self._built_object - return self._built_object - - -class LLMNode(Node): - built_node_type = None - class_built_object = None - - def __init__(self, data: Dict): - super().__init__(data, base_type="llms") - - def build(self, force: bool = False) -> Any: - # LLM is different because some models might take up too much memory - # or time to load. So we only load them when we need them.ß - if self.node_type == self.built_node_type: - return self.class_built_object - if not self._built or force: - self._build() - self.built_node_type = self.node_type - self.class_built_object = self._built_object - # Avoid deepcopying the LLM - # that are loaded from a file - return self._built_object - - -class ToolkitNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="toolkits") - - -class FileToolNode(ToolNode): - def __init__(self, data: Dict): - super().__init__(data) - - -class WrapperNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="wrappers") - - def build(self, force: bool = False) -> Any: - if not self._built or force: - if "headers" in self.params: - self.params["headers"] = eval(self.params["headers"]) - self._build() - return self._built_object - - -class DocumentLoaderNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="documentloaders") - - def _built_object_repr(self): - # This built_object is a list of documents. Maybe we should - # show how many documents are in the list? - if self._built_object: - return f"""{self.node_type}({len(self._built_object)} documents) - Documents: {self._built_object[:3]}...""" - return f"{self.node_type}()" - - -class EmbeddingNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="embeddings") - - -class VectorStoreNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="vectorstores") - - def _built_object_repr(self): - return "Vector stores can take time to build. It will build on the first query." - - -class MemoryNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="memory") - - -class TextSplitterNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="textsplitters") - - def _built_object_repr(self): - # This built_object is a list of documents. Maybe we should - # show how many documents are in the list? - if self._built_object: - return f"""{self.node_type}({len(self._built_object)} documents)\nDocuments: {self._built_object[:3]}...""" - return f"{self.node_type}()" diff --git a/src/backend/langflow/interface/agents/custom.py b/src/backend/langflow/interface/agents/custom.py index 4654ef7cb..3aaa132d4 100644 --- a/src/backend/langflow/interface/agents/custom.py +++ b/src/backend/langflow/interface/agents/custom.py @@ -69,7 +69,7 @@ class JsonAgent(CustomAgentExecutor): @classmethod def from_toolkit_and_llm(cls, toolkit: JsonToolkit, llm: BaseLanguageModel): - tools = toolkit.get_tools() + tools = toolkit if isinstance(toolkit, list) else toolkit.get_tools() tool_names = {tool.name for tool in tools} prompt = ZeroShotAgent.create_prompt( tools, diff --git a/src/backend/langflow/interface/chains/custom.py b/src/backend/langflow/interface/chains/custom.py index cb76a53c8..ba4ba8b62 100644 --- a/src/backend/langflow/interface/chains/custom.py +++ b/src/backend/langflow/interface/chains/custom.py @@ -5,7 +5,7 @@ from langchain.memory.buffer import ConversationBufferMemory from langchain.schema import BaseMemory from pydantic import Field, root_validator -from langflow.graph.utils import extract_input_variables_from_prompt +from langflow.interface.utils import extract_input_variables_from_prompt DEFAULT_SUFFIX = """" Current conversation: diff --git a/src/backend/langflow/interface/custom_lists.py b/src/backend/langflow/interface/custom_lists.py index 0fea838b6..34bc0103e 100644 --- a/src/backend/langflow/interface/custom_lists.py +++ b/src/backend/langflow/interface/custom_lists.py @@ -11,12 +11,15 @@ from langchain import ( text_splitter, ) from langchain.agents import agent_toolkits -from langchain.chat_models import ChatOpenAI +from langchain.chat_models import AzureChatOpenAI, ChatOpenAI +from langchain.chat_models import ChatAnthropic from langflow.interface.importing.utils import import_class ## LLMs llm_type_to_cls_dict = llms.type_to_cls_dict +llm_type_to_cls_dict["anthropic-chat"] = ChatAnthropic # type: ignore +llm_type_to_cls_dict["azure-chat"] = AzureChatOpenAI # type: ignore llm_type_to_cls_dict["openai-chat"] = ChatOpenAI # type: ignore ## Chains diff --git a/src/backend/langflow/interface/importing/utils.py b/src/backend/langflow/interface/importing/utils.py index d08e52999..f65376d48 100644 --- a/src/backend/langflow/interface/importing/utils.py +++ b/src/backend/langflow/interface/importing/utils.py @@ -9,6 +9,7 @@ from langchain.base_language import BaseLanguageModel from langchain.chains.base import Chain from langchain.chat_models.base import BaseChatModel from langchain.tools import BaseTool +from langflow.utils import validate def import_module(module_path: str) -> Any: @@ -147,3 +148,10 @@ def import_utility(utility: str) -> Any: if utility == "SQLDatabase": return import_class(f"langchain.sql_database.{utility}") return import_class(f"langchain.utilities.{utility}") + + +def get_function(code): + """Get the function""" + function_name = validate.extract_function_name(code) + + return validate.create_function(code, function_name) diff --git a/src/backend/langflow/interface/loading.py b/src/backend/langflow/interface/loading.py index 69c697823..d0ec4b845 100644 --- a/src/backend/langflow/interface/loading.py +++ b/src/backend/langflow/interface/loading.py @@ -20,8 +20,7 @@ from langchain.llms.loading import load_llm_from_config from pydantic import ValidationError from langflow.interface.agents.custom import CUSTOM_AGENTS -from langflow.interface.importing.utils import import_by_type -from langflow.interface.run import fix_memory_inputs +from langflow.interface.importing.utils import get_function, import_by_type from langflow.interface.toolkits.base import toolkits_creator from langflow.interface.types import get_type_list from langflow.interface.utils import load_file_into_dict @@ -99,6 +98,10 @@ def instantiate_tool(node_type, class_object, params): if node_type == "JsonSpec": params["dict_"] = load_file_into_dict(params.pop("path")) return class_object(**params) + elif node_type == "PythonFunctionTool": + params["func"] = get_function(params.get("code")) + return class_object(**params) + # For backward compatibility elif node_type == "PythonFunction": function_string = params["code"] if isinstance(function_string, str): @@ -111,8 +114,11 @@ def instantiate_tool(node_type, class_object, params): def instantiate_toolkit(node_type, class_object, params): loaded_toolkit = class_object(**params) - if toolkits_creator.has_create_function(node_type): - return load_toolkits_executor(node_type, loaded_toolkit, params) + # Commenting this out for now to use toolkits as normal tools + # if toolkits_creator.has_create_function(node_type): + # return load_toolkits_executor(node_type, loaded_toolkit, params) + if isinstance(loaded_toolkit, BaseToolkit): + return loaded_toolkit.get_tools() return loaded_toolkit @@ -161,38 +167,6 @@ def instantiate_utility(node_type, class_object, params): return class_object(**params) -def load_flow_from_json(path: str, build=True): - """Load flow from json file""" - # This is done to avoid circular imports - from langflow.graph import Graph - - with open(path, "r", encoding="utf-8") as f: - flow_graph = json.load(f) - data_graph = flow_graph["data"] - nodes = data_graph["nodes"] - # Substitute ZeroShotPrompt with PromptTemplate - # nodes = replace_zero_shot_prompt_with_prompt_template(nodes) - # Add input variables - # nodes = payload.extract_input_variables(nodes) - - # Nodes, edges and root node - edges = data_graph["edges"] - graph = Graph(nodes, edges) - if build: - langchain_object = graph.build() - if hasattr(langchain_object, "verbose"): - langchain_object.verbose = True - - if hasattr(langchain_object, "return_intermediate_steps"): - # https://github.com/hwchase17/langchain/issues/2068 - # Deactivating until we have a frontend solution - # to display intermediate steps - langchain_object.return_intermediate_steps = False - fix_memory_inputs(langchain_object) - return langchain_object - return graph - - def replace_zero_shot_prompt_with_prompt_template(nodes): """Replace ZeroShotPrompt with PromptTemplate""" for node in nodes: diff --git a/src/backend/langflow/interface/prompts/custom.py b/src/backend/langflow/interface/prompts/custom.py index b1dbef370..286210271 100644 --- a/src/backend/langflow/interface/prompts/custom.py +++ b/src/backend/langflow/interface/prompts/custom.py @@ -3,7 +3,7 @@ from typing import Dict, List, Optional, Type from langchain.prompts import PromptTemplate from pydantic import root_validator -from langflow.graph.utils import extract_input_variables_from_prompt +from langflow.interface.utils import extract_input_variables_from_prompt # Steps to create a BaseCustomPrompt: # 1. Create a prompt template that endes with: diff --git a/src/backend/langflow/interface/run.py b/src/backend/langflow/interface/run.py index d24b6a0dc..89f71fd8b 100644 --- a/src/backend/langflow/interface/run.py +++ b/src/backend/langflow/interface/run.py @@ -1,12 +1,5 @@ -import contextlib -import io -from typing import Any, Dict, List, Tuple - -from langchain.schema import AgentAction - -from langflow.api.callback import AsyncStreamingLLMCallbackHandler, StreamingLLMCallbackHandler # type: ignore from langflow.cache.base import compute_dict_hash, load_cache, memoize_dict -from langflow.graph.graph import Graph +from langflow.graph import Graph from langflow.utils.logger import logger @@ -24,15 +17,6 @@ def load_langchain_object(data_graph, is_first_message=False): return computed_hash, langchain_object -def load_or_build_langchain_object(data_graph, is_first_message=False): - """ - Load langchain object from cache if it exists, otherwise build it. - """ - if is_first_message: - build_langchain_object_with_caching.clear_cache() - return build_langchain_object_with_caching(data_graph) - - @memoize_dict(maxsize=10) def build_langchain_object_with_caching(data_graph): """ @@ -40,16 +24,10 @@ def build_langchain_object_with_caching(data_graph): """ logger.debug("Building langchain object") - graph = build_graph(data_graph) + graph = Graph.from_payload(data_graph) return graph.build() -def build_graph(data_graph): - nodes = data_graph["nodes"] - edges = data_graph["edges"] - return Graph(nodes, edges) - - def build_langchain_object(data_graph): """ Build langchain object from data_graph. @@ -66,29 +44,6 @@ def build_langchain_object(data_graph): return graph.build() -def process_graph_cached(data_graph: Dict[str, Any], message: str): - """ - Process graph by extracting input variables and replacing ZeroShotPrompt - with PromptTemplate,then run the graph and return the result and thought. - """ - # Load langchain object - is_first_message = len(data_graph.get("chatHistory", [])) == 0 - langchain_object = load_or_build_langchain_object(data_graph, is_first_message) - logger.debug("Loaded langchain object") - - if langchain_object is None: - # Raise user facing error - raise ValueError( - "There was an error loading the langchain_object. Please, check all the nodes and try again." - ) - - # Generate result and thought - logger.debug("Generating result and thought") - result, thought = get_result_and_thought(langchain_object, message) - logger.debug("Generated result and thought") - return {"result": str(result), "thought": thought.strip()} - - def get_memory_key(langchain_object): """ Given a LangChain object, this function retrieves the current memory key from the object's memory attribute. @@ -124,147 +79,3 @@ def update_memory_keys(langchain_object, possible_new_mem_key): langchain_object.memory.input_key = input_key langchain_object.memory.output_key = output_key langchain_object.memory.memory_key = possible_new_mem_key - - -def fix_memory_inputs(langchain_object): - """ - Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the - object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the - get_memory_key function and updates the memory keys using the update_memory_keys function. - """ - if hasattr(langchain_object, "memory") and langchain_object.memory is not None: - try: - if langchain_object.memory.memory_key in langchain_object.input_variables: - return - except AttributeError: - input_variables = ( - langchain_object.prompt.input_variables - if hasattr(langchain_object, "prompt") - else langchain_object.input_keys - ) - if langchain_object.memory.memory_key in input_variables: - return - - possible_new_mem_key = get_memory_key(langchain_object) - if possible_new_mem_key is not None: - update_memory_keys(langchain_object, possible_new_mem_key) - - -async def get_result_and_steps(langchain_object, message: str, **kwargs): - """Get result and thought from extracted json""" - - try: - if hasattr(langchain_object, "verbose"): - langchain_object.verbose = True - chat_input = None - memory_key = "" - if hasattr(langchain_object, "memory") and langchain_object.memory is not None: - memory_key = langchain_object.memory.memory_key - - if hasattr(langchain_object, "input_keys"): - for key in langchain_object.input_keys: - if key not in [memory_key, "chat_history"]: - chat_input = {key: message} - else: - chat_input = message # type: ignore - - if hasattr(langchain_object, "return_intermediate_steps"): - # https://github.com/hwchase17/langchain/issues/2068 - # Deactivating until we have a frontend solution - # to display intermediate steps - langchain_object.return_intermediate_steps = True - - fix_memory_inputs(langchain_object) - try: - async_callbacks = [AsyncStreamingLLMCallbackHandler(**kwargs)] - output = await langchain_object.acall(chat_input, callbacks=async_callbacks) - except Exception as exc: - # make the error message more informative - logger.debug(f"Error: {str(exc)}") - sync_callbacks = [StreamingLLMCallbackHandler(**kwargs)] - output = langchain_object(chat_input, callbacks=sync_callbacks) - - intermediate_steps = ( - output.get("intermediate_steps", []) if isinstance(output, dict) else [] - ) - - result = ( - output.get(langchain_object.output_keys[0]) - if isinstance(output, dict) - else output - ) - thought = format_actions(intermediate_steps) if intermediate_steps else "" - except Exception as exc: - raise ValueError(f"Error: {str(exc)}") from exc - return result, thought - - -def get_result_and_thought(langchain_object, message: str): - """Get result and thought from extracted json""" - try: - if hasattr(langchain_object, "verbose"): - langchain_object.verbose = True - chat_input = None - memory_key = "" - if hasattr(langchain_object, "memory") and langchain_object.memory is not None: - memory_key = langchain_object.memory.memory_key - - if hasattr(langchain_object, "input_keys"): - for key in langchain_object.input_keys: - if key not in [memory_key, "chat_history"]: - chat_input = {key: message} - else: - chat_input = message # type: ignore - - if hasattr(langchain_object, "return_intermediate_steps"): - # https://github.com/hwchase17/langchain/issues/2068 - # Deactivating until we have a frontend solution - # to display intermediate steps - langchain_object.return_intermediate_steps = False - - fix_memory_inputs(langchain_object) - - with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer): - try: - # if hasattr(langchain_object, "acall"): - # output = await langchain_object.acall(chat_input) - # else: - output = langchain_object(chat_input) - except ValueError as exc: - # make the error message more informative - logger.debug(f"Error: {str(exc)}") - output = langchain_object.run(chat_input) - - intermediate_steps = ( - output.get("intermediate_steps", []) if isinstance(output, dict) else [] - ) - - result = ( - output.get(langchain_object.output_keys[0]) - if isinstance(output, dict) - else output - ) - if intermediate_steps: - thought = format_actions(intermediate_steps) - else: - thought = output_buffer.getvalue() - - except Exception as exc: - raise ValueError(f"Error: {str(exc)}") from exc - return result, thought - - -def format_actions(actions: List[Tuple[AgentAction, str]]) -> str: - """Format a list of (AgentAction, answer) tuples into a string.""" - output = [] - for action, answer in actions: - log = action.log - tool = action.tool - tool_input = action.tool_input - output.append(f"Log: {log}") - if "Action" not in log and "Action Input" not in log: - output.append(f"Tool: {tool}") - output.append(f"Tool Input: {tool_input}") - output.append(f"Answer: {answer}") - output.append("") # Add a blank line - return "\n".join(output) diff --git a/src/backend/langflow/interface/toolkits/base.py b/src/backend/langflow/interface/toolkits/base.py index cbe625f0d..be2345c02 100644 --- a/src/backend/langflow/interface/toolkits/base.py +++ b/src/backend/langflow/interface/toolkits/base.py @@ -42,24 +42,27 @@ class ToolkitCreator(LangChainTypeCreator): def get_signature(self, name: str) -> Optional[Dict]: try: - return build_template_from_class(name, self.type_to_loader_dict) + template = build_template_from_class(name, self.type_to_loader_dict) + # add Tool to base_classes + if "toolkit" in name.lower() and template: + template["base_classes"].append("Tool") + return template except ValueError as exc: - raise ValueError("Prompt not found") from exc + raise ValueError("Toolkit not found") from exc except AttributeError as exc: - logger.error(f"Prompt {name} not loaded: {exc}") + logger.error(f"Toolkit {name} not loaded: {exc}") return None def to_list(self) -> List[str]: return list(self.type_to_loader_dict.keys()) def get_create_function(self, name: str) -> Callable: - if loader_name := self.create_functions.get(name, None): - # import loader + if loader_name := self.create_functions.get(name): return import_module( f"from langchain.agents.agent_toolkits import {loader_name[0]}" ) else: - raise ValueError("Loader not found") + raise ValueError("Toolkit not found") def has_create_function(self, name: str) -> bool: # check if the function list is not empty diff --git a/src/backend/langflow/interface/tools/base.py b/src/backend/langflow/interface/tools/base.py index a8e7045c0..d6b114e4c 100644 --- a/src/backend/langflow/interface/tools/base.py +++ b/src/backend/langflow/interface/tools/base.py @@ -71,7 +71,8 @@ class ToolCreator(LangChainTypeCreator): for tool, tool_fcn in ALL_TOOLS_NAMES.items(): tool_params = get_tool_params(tool_fcn) - tool_name = tool_params.get("name", tool) + + tool_name = tool_params.get("name") or tool if tool_name in settings.tools or settings.dev: if tool_name == "JsonSpec": diff --git a/src/backend/langflow/interface/tools/constants.py b/src/backend/langflow/interface/tools/constants.py index f939d55ad..fea3c5237 100644 --- a/src/backend/langflow/interface/tools/constants.py +++ b/src/backend/langflow/interface/tools/constants.py @@ -9,10 +9,14 @@ from langchain.agents.load_tools import ( from langchain.tools.json.tool import JsonSpec from langflow.interface.importing.utils import import_class -from langflow.interface.tools.custom import PythonFunction +from langflow.interface.tools.custom import PythonFunctionTool, PythonFunction FILE_TOOLS = {"JsonSpec": JsonSpec} -CUSTOM_TOOLS = {"Tool": Tool, "PythonFunction": PythonFunction} +CUSTOM_TOOLS = { + "Tool": Tool, + "PythonFunctionTool": PythonFunctionTool, + "PythonFunction": PythonFunction, +} OTHER_TOOLS = {tool: import_class(f"langchain.tools.{tool}") for tool in tools.__all__} diff --git a/src/backend/langflow/interface/tools/custom.py b/src/backend/langflow/interface/tools/custom.py index 4c641f388..0e2e5ff57 100644 --- a/src/backend/langflow/interface/tools/custom.py +++ b/src/backend/langflow/interface/tools/custom.py @@ -1,8 +1,10 @@ from typing import Callable, Optional +from langflow.interface.importing.utils import get_function from pydantic import BaseModel, validator from langflow.utils import validate +from langchain.agents.tools import Tool class Function(BaseModel): @@ -31,6 +33,21 @@ class Function(BaseModel): return validate.create_function(self.code, function_name) +class PythonFunctionTool(Function, Tool): + """Python function""" + + name: str = "Custom Tool" + description: str + code: str + + def ___init__(self, name: str, description: str, code: str): + self.name = name + self.description = description + self.code = code + self.func = get_function(self.code) + super().__init__(name=name, description=description, func=self.func) + + class PythonFunction(Function): """Python function""" diff --git a/src/backend/langflow/interface/utils.py b/src/backend/langflow/interface/utils.py index 08d6ebde3..2777025ab 100644 --- a/src/backend/langflow/interface/utils.py +++ b/src/backend/langflow/interface/utils.py @@ -2,6 +2,7 @@ import base64 import json import os from io import BytesIO +import re import yaml from langchain.base_language import BaseLanguageModel @@ -52,3 +53,8 @@ def try_setting_streaming_options(langchain_object, websocket): llm.stream = True return langchain_object + + +def extract_input_variables_from_prompt(prompt: str) -> list[str]: + """Extract input variables from prompt.""" + return re.findall(r"{(.*?)}", prompt) diff --git a/src/backend/langflow/main.py b/src/backend/langflow/main.py index 56cc32e46..de39d8750 100644 --- a/src/backend/langflow/main.py +++ b/src/backend/langflow/main.py @@ -1,9 +1,7 @@ from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware -from langflow.api.chat import router as chat_router -from langflow.api.endpoints import router as endpoints_router -from langflow.api.validate import router as validate_router +from langflow.api import router def create_app(): @@ -14,6 +12,10 @@ def create_app(): "*", ] + @app.get("/health") + def get_health(): + return {"status": "OK"} + app.add_middleware( CORSMiddleware, allow_origins=origins, @@ -22,9 +24,7 @@ def create_app(): allow_headers=["*"], ) - app.include_router(endpoints_router) - app.include_router(validate_router) - app.include_router(chat_router) + app.include_router(router) return app diff --git a/src/backend/langflow/processing/__init__.py b/src/backend/langflow/processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/backend/langflow/processing/base.py b/src/backend/langflow/processing/base.py new file mode 100644 index 000000000..97b0d5be0 --- /dev/null +++ b/src/backend/langflow/processing/base.py @@ -0,0 +1,55 @@ +from langflow.api.v1.callback import ( + AsyncStreamingLLMCallbackHandler, + StreamingLLMCallbackHandler, +) +from langflow.processing.process import fix_memory_inputs, format_actions +from langflow.utils.logger import logger + + +async def get_result_and_steps(langchain_object, message: str, **kwargs): + """Get result and thought from extracted json""" + + try: + if hasattr(langchain_object, "verbose"): + langchain_object.verbose = True + chat_input = None + memory_key = "" + if hasattr(langchain_object, "memory") and langchain_object.memory is not None: + memory_key = langchain_object.memory.memory_key + + if hasattr(langchain_object, "input_keys"): + for key in langchain_object.input_keys: + if key not in [memory_key, "chat_history"]: + chat_input = {key: message} + else: + chat_input = message # type: ignore + + if hasattr(langchain_object, "return_intermediate_steps"): + # https://github.com/hwchase17/langchain/issues/2068 + # Deactivating until we have a frontend solution + # to display intermediate steps + langchain_object.return_intermediate_steps = True + + fix_memory_inputs(langchain_object) + try: + async_callbacks = [AsyncStreamingLLMCallbackHandler(**kwargs)] + output = await langchain_object.acall(chat_input, callbacks=async_callbacks) + except Exception as exc: + # make the error message more informative + logger.debug(f"Error: {str(exc)}") + sync_callbacks = [StreamingLLMCallbackHandler(**kwargs)] + output = langchain_object(chat_input, callbacks=sync_callbacks) + + intermediate_steps = ( + output.get("intermediate_steps", []) if isinstance(output, dict) else [] + ) + + result = ( + output.get(langchain_object.output_keys[0]) + if isinstance(output, dict) + else output + ) + thought = format_actions(intermediate_steps) if intermediate_steps else "" + except Exception as exc: + raise ValueError(f"Error: {str(exc)}") from exc + return result, thought diff --git a/src/backend/langflow/processing/process.py b/src/backend/langflow/processing/process.py new file mode 100644 index 000000000..3b8852e00 --- /dev/null +++ b/src/backend/langflow/processing/process.py @@ -0,0 +1,172 @@ +import contextlib +import io +from langchain.schema import AgentAction +import json +from langflow.interface.run import ( + build_langchain_object_with_caching, + get_memory_key, + update_memory_keys, +) +from langflow.utils.logger import logger +from langflow.graph import Graph + + +from typing import Any, Dict, List, Tuple + + +def fix_memory_inputs(langchain_object): + """ + Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the + object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the + get_memory_key function and updates the memory keys using the update_memory_keys function. + """ + if hasattr(langchain_object, "memory") and langchain_object.memory is not None: + try: + if langchain_object.memory.memory_key in langchain_object.input_variables: + return + except AttributeError: + input_variables = ( + langchain_object.prompt.input_variables + if hasattr(langchain_object, "prompt") + else langchain_object.input_keys + ) + if langchain_object.memory.memory_key in input_variables: + return + + possible_new_mem_key = get_memory_key(langchain_object) + if possible_new_mem_key is not None: + update_memory_keys(langchain_object, possible_new_mem_key) + + +def format_actions(actions: List[Tuple[AgentAction, str]]) -> str: + """Format a list of (AgentAction, answer) tuples into a string.""" + output = [] + for action, answer in actions: + log = action.log + tool = action.tool + tool_input = action.tool_input + output.append(f"Log: {log}") + if "Action" not in log and "Action Input" not in log: + output.append(f"Tool: {tool}") + output.append(f"Tool Input: {tool_input}") + output.append(f"Answer: {answer}") + output.append("") # Add a blank line + return "\n".join(output) + + +def get_result_and_thought(langchain_object, message: str): + """Get result and thought from extracted json""" + try: + if hasattr(langchain_object, "verbose"): + langchain_object.verbose = True + chat_input = None + memory_key = "" + if hasattr(langchain_object, "memory") and langchain_object.memory is not None: + memory_key = langchain_object.memory.memory_key + + if hasattr(langchain_object, "input_keys"): + for key in langchain_object.input_keys: + if key not in [memory_key, "chat_history"]: + chat_input = {key: message} + else: + chat_input = message # type: ignore + + if hasattr(langchain_object, "return_intermediate_steps"): + # https://github.com/hwchase17/langchain/issues/2068 + # Deactivating until we have a frontend solution + # to display intermediate steps + langchain_object.return_intermediate_steps = False + + fix_memory_inputs(langchain_object) + + with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer): + try: + # if hasattr(langchain_object, "acall"): + # output = await langchain_object.acall(chat_input) + # else: + output = langchain_object(chat_input) + except ValueError as exc: + # make the error message more informative + logger.debug(f"Error: {str(exc)}") + output = langchain_object.run(chat_input) + + intermediate_steps = ( + output.get("intermediate_steps", []) if isinstance(output, dict) else [] + ) + + result = ( + output.get(langchain_object.output_keys[0]) + if isinstance(output, dict) + else output + ) + if intermediate_steps: + thought = format_actions(intermediate_steps) + else: + thought = output_buffer.getvalue() + + except Exception as exc: + raise ValueError(f"Error: {str(exc)}") from exc + return result, thought + + +def load_or_build_langchain_object(data_graph, is_first_message=False): + """ + Load langchain object from cache if it exists, otherwise build it. + """ + if is_first_message: + build_langchain_object_with_caching.clear_cache() + return build_langchain_object_with_caching(data_graph) + + +def process_graph_cached(data_graph: Dict[str, Any], message: str): + """ + Process graph by extracting input variables and replacing ZeroShotPrompt + with PromptTemplate,then run the graph and return the result and thought. + """ + # Load langchain object + is_first_message = len(data_graph.get("chatHistory", [])) == 0 + langchain_object = load_or_build_langchain_object(data_graph, is_first_message) + logger.debug("Loaded langchain object") + + if langchain_object is None: + # Raise user facing error + raise ValueError( + "There was an error loading the langchain_object. Please, check all the nodes and try again." + ) + + # Generate result and thought + logger.debug("Generating result and thought") + result, thought = get_result_and_thought(langchain_object, message) + logger.debug("Generated result and thought") + return {"result": str(result), "thought": thought.strip()} + + +def load_flow_from_json(path: str, build=True): + """Load flow from json file""" + # This is done to avoid circular imports + + with open(path, "r", encoding="utf-8") as f: + flow_graph = json.load(f) + data_graph = flow_graph["data"] + nodes = data_graph["nodes"] + # Substitute ZeroShotPrompt with PromptTemplate + # nodes = replace_zero_shot_prompt_with_prompt_template(nodes) + # Add input variables + # nodes = payload.extract_input_variables(nodes) + + # Nodes, edges and root node + edges = data_graph["edges"] + graph = Graph(nodes, edges) + if build: + langchain_object = graph.build() + if hasattr(langchain_object, "verbose"): + langchain_object.verbose = True + + if hasattr(langchain_object, "return_intermediate_steps"): + # https://github.com/hwchase17/langchain/issues/2068 + # Deactivating until we have a frontend solution + # to display intermediate steps + langchain_object.return_intermediate_steps = False + fix_memory_inputs(langchain_object) + return langchain_object + return graph diff --git a/src/backend/langflow/template/frontend_node/agents.py b/src/backend/langflow/template/frontend_node/agents.py index e4fe40187..451dd7eca 100644 --- a/src/backend/langflow/template/frontend_node/agents.py +++ b/src/backend/langflow/template/frontend_node/agents.py @@ -146,7 +146,7 @@ class CSVAgentNode(FrontendNode): ), ], ) - description: str = """Construct a json agent from a CSV and tools.""" + description: str = """Construct a CSV agent from a CSV and tools.""" base_classes: list[str] = ["AgentExecutor"] def to_dict(self): @@ -194,7 +194,7 @@ class InitializeAgentNode(FrontendNode): ), ], ) - description: str = """Construct a json agent from an LLM and tools.""" + description: str = """Construct a zero shot agent from an LLM and tools.""" base_classes: list[str] = ["AgentExecutor", "function"] def to_dict(self): diff --git a/src/backend/langflow/template/frontend_node/base.py b/src/backend/langflow/template/frontend_node/base.py index a64195813..6d00cead0 100644 --- a/src/backend/langflow/template/frontend_node/base.py +++ b/src/backend/langflow/template/frontend_node/base.py @@ -117,14 +117,30 @@ class FrontendNode(BaseModel): ) -> None: """Handles specific field values for certain fields.""" if key == "headers": - field.value = """{'Authorization': - 'Bearer '}""" - if name == "OpenAI" and key == "model_name": - field.options = constants.OPENAI_MODELS - field.is_list = True - elif name == "ChatOpenAI" and key == "model_name": - field.options = constants.CHAT_OPENAI_MODELS + field.value = """{'Authorization': 'Bearer '}""" + FrontendNode._handle_model_specific_field_values(field, key, name) + FrontendNode._handle_api_key_specific_field_values(field, key, name) + + @staticmethod + def _handle_model_specific_field_values( + field: TemplateField, key: str, name: Optional[str] = None + ) -> None: + """Handles specific field values related to models.""" + model_dict = { + "OpenAI": constants.OPENAI_MODELS, + "ChatOpenAI": constants.CHAT_OPENAI_MODELS, + "Anthropic": constants.ANTHROPIC_MODELS, + "ChatAnthropic": constants.ANTHROPIC_MODELS, + } + if name in model_dict and key == "model_name": + field.options = model_dict[name] field.is_list = True + + @staticmethod + def _handle_api_key_specific_field_values( + field: TemplateField, key: str, name: Optional[str] = None + ) -> None: + """Handles specific field values related to API keys.""" if "api_key" in key and "OpenAI" in str(name): field.display_name = "OpenAI API Key" field.required = False diff --git a/src/backend/langflow/template/frontend_node/tools.py b/src/backend/langflow/template/frontend_node/tools.py index 2819be4d9..3094f3568 100644 --- a/src/backend/langflow/template/frontend_node/tools.py +++ b/src/backend/langflow/template/frontend_node/tools.py @@ -59,6 +59,52 @@ class ToolNode(FrontendNode): return super().to_dict() +class PythonFunctionToolNode(FrontendNode): + name: str = "PythonFunctionTool" + template: Template = Template( + type_name="PythonFunctionTool", + fields=[ + TemplateField( + field_type="str", + required=True, + placeholder="", + is_list=False, + show=True, + multiline=False, + value="", + name="name", + advanced=False, + ), + TemplateField( + field_type="str", + required=True, + placeholder="", + is_list=False, + show=True, + multiline=False, + value="", + name="description", + advanced=False, + ), + TemplateField( + field_type="code", + required=True, + placeholder="", + is_list=False, + show=True, + value=DEFAULT_PYTHON_FUNCTION, + name="code", + advanced=False, + ), + ], + ) + description: str = "Python function to be executed." + base_classes: list[str] = ["Tool"] + + def to_dict(self): + return super().to_dict() + + class PythonFunctionNode(FrontendNode): name: str = "PythonFunction" template: Template = Template( diff --git a/src/backend/langflow/utils/constants.py b/src/backend/langflow/utils/constants.py index 2d101ab98..1b6bbdcc3 100644 --- a/src/backend/langflow/utils/constants.py +++ b/src/backend/langflow/utils/constants.py @@ -7,6 +7,20 @@ OPENAI_MODELS = [ ] CHAT_OPENAI_MODELS = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"] +ANTHROPIC_MODELS = [ + "claude-v1", # largest model, ideal for a wide range of more complex tasks. + "claude-v1-100k", # An enhanced version of claude-v1 with a 100,000 token (roughly 75,000 word) context window. + "claude-instant-v1", # A smaller model with far lower latency, sampling at roughly 40 words/sec! + "claude-instant-v1-100k", # Like claude-instant-v1 with a 100,000 token context window but retains its performance. + # Specific sub-versions of the above models: + "claude-v1.3", # Vs claude-v1.2: better instruction-following, code, and non-English dialogue and writing. + "claude-v1.3-100k", # An enhanced version of claude-v1.3 with a 100,000 token (roughly 75,000 word) context window. + "claude-v1.2", # Vs claude-v1.1: small adv in general helpfulness, instruction following, coding, and other tasks. + "claude-v1.0", # An earlier version of claude-v1. + "claude-instant-v1.1", # Latest version of claude-instant-v1. Better than claude-instant-v1.0 at most tasks. + "claude-instant-v1.1-100k", # Version of claude-instant-v1.1 with a 100K token context window. + "claude-instant-v1.0", # An earlier version of claude-instant-v1. +] DEFAULT_PYTHON_FUNCTION = """ def python_function(text: str) -> str: diff --git a/src/backend/langflow/utils/util.py b/src/backend/langflow/utils/util.py index 293d31154..f4e4927d8 100644 --- a/src/backend/langflow/utils/util.py +++ b/src/backend/langflow/utils/util.py @@ -302,7 +302,9 @@ def format_dict(d, name: Optional[str] = None): elif name == "ChatOpenAI" and key == "model_name": value["options"] = constants.CHAT_OPENAI_MODELS value["list"] = True - + elif (name == "Anthropic" or name == "ChatAnthropic") and key == "model_name": + value["options"] = constants.ANTHROPIC_MODELS + value["list"] = True return d diff --git a/src/frontend/package-lock.json b/src/frontend/package-lock.json index 374236168..9000c133f 100644 --- a/src/frontend/package-lock.json +++ b/src/frontend/package-lock.json @@ -13,6 +13,7 @@ "@headlessui/react": "^1.7.10", "@heroicons/react": "^2.0.15", "@mui/material": "^5.11.9", + "@radix-ui/react-tooltip": "^1.0.6", "@tabler/icons-react": "^2.18.0", "@tailwindcss/forms": "^0.5.3", "@tailwindcss/line-clamp": "^0.4.4", @@ -20,7 +21,10 @@ "ansi-to-html": "^0.7.2", "axios": "^1.3.2", "base64-js": "^1.5.1", + "class-variance-authority": "^0.6.0", + "clsx": "^1.2.1", "lodash": "^4.17.21", + "lucide-react": "^0.233.0", "react": "^18.2.0", "react-ace": "^10.1.0", "react-cookie": "^4.1.1", @@ -37,6 +41,8 @@ "rehype-mathjax": "^4.0.2", "remark-gfm": "^3.0.1", "remark-math": "^5.1.1", + "tailwind-merge": "^1.13.0", + "tailwindcss-animate": "^1.0.5", "uuid": "^9.0.0", "vite-plugin-svgr": "^3.2.0", "web-vitals": "^2.1.4" @@ -911,6 +917,18 @@ "@floating-ui/core": "^1.2.6" } }, + "node_modules/@floating-ui/react-dom": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.0.tgz", + "integrity": "sha512-Ke0oU3SeuABC2C4OFu2mSAwHIP5WUiV98O9YWoHV4Q5aT6E9k06DV0Khi5uYspR8xmmBk08t8ZDcz3TR3ARkEg==", + "dependencies": { + "@floating-ui/dom": "^1.2.7" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, "node_modules/@headlessui/react": { "version": "1.7.10", "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.10.tgz", @@ -1274,6 +1292,407 @@ "url": "https://opencollective.com/popperjs" } }, + "node_modules/@radix-ui/primitive": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.0.1.tgz", + "integrity": "sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.0.3.tgz", + "integrity": "sha512-wSP+pHsB/jQRaL6voubsQ/ZlrGBHHrOjmBnr19hxYgtS0WvAFwZhK2WP/YY5yF9uKECCEEDGxuLxq1NBK51wFA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.0.1.tgz", + "integrity": "sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.0.1.tgz", + "integrity": "sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.4.tgz", + "integrity": "sha512-7UpBa/RKMoHJYjie1gkF1DlK8l1fdU/VKDpoS3rCCo8YBJR294GwcEHyxHw72yvphJ7ld0AXEcSLAzY2F/WyCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-escape-keydown": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.0.1.tgz", + "integrity": "sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.2.tgz", + "integrity": "sha512-1CnGGfFi/bbqtJZZ0P/NQY20xdG3E0LALJaLUEoKwPLwl6PPPfbeiCqMVQnhoFRAxjJj4RpBRJzDmUgsex2tSg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1", + "@radix-ui/react-use-rect": "1.0.1", + "@radix-ui/react-use-size": "1.0.1", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.3.tgz", + "integrity": "sha512-xLYZeHrWoPmA5mEKEfZZevoVRK/Q43GfzRXkWV6qawIWWK8t6ifIiLQdd7rmQ4Vk1bmI21XhqF9BN3jWf+phpA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.0.1.tgz", + "integrity": "sha512-UXLW4UAbIY5ZjcvzjfRFo5gxva8QirC9hF7wRE4U5gz+TP0DbRk+//qyuAQ1McDxBt1xNMBTaciFGvEmJvAZCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-1.0.3.tgz", + "integrity": "sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-slot": "1.0.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.0.2.tgz", + "integrity": "sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.6.tgz", + "integrity": "sha512-DmNFOiwEc2UDigsYj6clJENma58OelxD24O4IODoZ+3sQc3Zb+L8w1EP+y9laTuKCLAysPw4fD6/v0j4KNV8rg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-popper": "1.1.2", + "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-controllable-state": "1.0.1", + "@radix-ui/react-visually-hidden": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.0.1.tgz", + "integrity": "sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.0.1.tgz", + "integrity": "sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.0.3.tgz", + "integrity": "sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.0.1.tgz", + "integrity": "sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.0.1.tgz", + "integrity": "sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.0.1.tgz", + "integrity": "sha512-ibay+VqrgcaI6veAojjofPATwledXiSmX+C0KrBk/xgpX9rBzPV3OsfwlhQdUOFbh+LKQorLYT+xTXW9V8yd0g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.0.3.tgz", + "integrity": "sha512-D4w41yN5YRKtu464TLnByKzMDG/JlMPHtfZgQAu9v6mNakUqGUI9vUrfQKz8NK41VMm/xbZbh76NUTVtIYqOMA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.0.1.tgz", + "integrity": "sha512-fyrgCaedtvMg9NK3en0pnOYJdtfwxUcNolezkNPUsoX57X8oQk+NkqcvzHXD2uKNij6GXmWU9NDru2IWjrO4BQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, "node_modules/@reactflow/background": { "version": "11.1.7", "resolved": "https://registry.npmjs.org/@reactflow/background/-/background-11.1.7.tgz", @@ -2422,7 +2841,7 @@ "version": "18.2.4", "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.4.tgz", "integrity": "sha512-G2mHoTMTL4yoydITgOGwWdWMVd8sNgyEP85xVmMKAPUBwQWm9wBPQUmvbeF4V3WBY1P7mmL4BkjQ0SqUpf1snw==", - "dev": true, + "devOptional": true, "dependencies": { "@types/react": "*" } @@ -2947,6 +3366,25 @@ "node": ">= 6" } }, + "node_modules/class-variance-authority": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.6.0.tgz", + "integrity": "sha512-qdRDgfjx3GRb9fpwpSvn+YaidnT7IUJNe4wt5/SWwM+PmUwJUhQRk/8zAyNro0PmVfmen2635UboTjIBXXxy5A==", + "dependencies": { + "clsx": "1.2.1" + }, + "funding": { + "url": "https://joebell.co.uk" + }, + "peerDependencies": { + "typescript": ">= 4.5.5 < 6" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, "node_modules/classcat": { "version": "5.0.4", "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.4.tgz", @@ -4728,6 +5166,14 @@ "yallist": "^3.0.2" } }, + "node_modules/lucide-react": { + "version": "0.233.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.233.0.tgz", + "integrity": "sha512-r0jMHF0vPDq2wBbZ0B3rtIcBjDyWDKpHu+vAjD2OHn2WLUr3HN5IHovtO0EMgQXuSI7YrMZbjsEZWC2uBHr8nQ==", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0" + } + }, "node_modules/lz-string": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.4.4.tgz", @@ -7001,6 +7447,15 @@ "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==" }, + "node_modules/tailwind-merge": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-1.13.0.tgz", + "integrity": "sha512-mUTmDbcU+IhOvJ0c42eLQ/nRkvolTqfpVaVQRSxfJAv9TabS6Y2zW/1wKpKLdKzyL3Gh8j6NTLl6MWNmvOM6kA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, "node_modules/tailwindcss": { "version": "3.3.2", "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.2.tgz", @@ -7038,6 +7493,14 @@ "node": ">=14.0.0" } }, + "node_modules/tailwindcss-animate": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tailwindcss-animate/-/tailwindcss-animate-1.0.5.tgz", + "integrity": "sha512-UU3qrOJ4lFQABY+MVADmBm+0KW3xZyhMdRvejwtXqYOL7YjHYxmuREFAZdmVG5LPe5E9CAst846SLC4j5I3dcw==", + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders" + } + }, "node_modules/terser": { "version": "5.16.3", "resolved": "https://registry.npmjs.org/terser/-/terser-5.16.3.tgz", @@ -7156,7 +7619,7 @@ "version": "5.0.4", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.4.tgz", "integrity": "sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==", - "dev": true, + "devOptional": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" diff --git a/src/frontend/package.json b/src/frontend/package.json index bd846ce19..c7af46608 100644 --- a/src/frontend/package.json +++ b/src/frontend/package.json @@ -8,6 +8,7 @@ "@headlessui/react": "^1.7.10", "@heroicons/react": "^2.0.15", "@mui/material": "^5.11.9", + "@radix-ui/react-tooltip": "^1.0.6", "@tabler/icons-react": "^2.18.0", "@tailwindcss/forms": "^0.5.3", "@tailwindcss/line-clamp": "^0.4.4", @@ -15,7 +16,10 @@ "ansi-to-html": "^0.7.2", "axios": "^1.3.2", "base64-js": "^1.5.1", + "class-variance-authority": "^0.6.0", + "clsx": "^1.2.1", "lodash": "^4.17.21", + "lucide-react": "^0.233.0", "react": "^18.2.0", "react-ace": "^10.1.0", "react-cookie": "^4.1.1", @@ -32,6 +36,8 @@ "rehype-mathjax": "^4.0.2", "remark-gfm": "^3.0.1", "remark-math": "^5.1.1", + "tailwind-merge": "^1.13.0", + "tailwindcss-animate": "^1.0.5", "uuid": "^9.0.0", "vite-plugin-svgr": "^3.2.0", "web-vitals": "^2.1.4" diff --git a/src/frontend/src/App.tsx b/src/frontend/src/App.tsx index e00019822..9e84a6430 100644 --- a/src/frontend/src/App.tsx +++ b/src/frontend/src/App.tsx @@ -170,7 +170,7 @@ export default function App() { className="absolute left-7 bottom-2 flex h-6 cursor-pointer flex-col items-center justify-start overflow-hidden rounded-lg bg-gray-800 px-2 text-center font-sans text-xs tracking-wide text-gray-300 transition-all duration-500 ease-in-out hover:h-12 dark:bg-gray-100 dark:text-gray-800" > {version &&
⛓️ LangFlow v{version}
} -
Created by Logspace
+
Created by Logspace
); diff --git a/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx index 653248763..dc77c2877 100644 --- a/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx +++ b/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx @@ -1,6 +1,11 @@ import { Handle, Position, useUpdateNodeInternals } from "reactflow"; import Tooltip from "../../../../components/TooltipComponent"; -import { classNames, isValidConnection } from "../../../../utils"; +import { + classNames, + groupByFamily, + isValidConnection, + toFirstUpperCase, +} from "../../../../utils"; import { useContext, useEffect, useRef, useState } from "react"; import InputComponent from "../../../../components/inputComponent"; import ToggleComponent from "../../../../components/toggleComponent"; @@ -15,6 +20,10 @@ import InputFileComponent from "../../../../components/inputFileComponent"; import { TabsContext } from "../../../../contexts/tabsContext"; import IntComponent from "../../../../components/intComponent"; import PromptAreaComponent from "../../../../components/promptComponent"; +import { nodeNames, nodeIcons } from "../../../../utils"; +import React from "react"; +import { nodeColors } from "../../../../utils"; +import ShadTooltip from "../../../../components/ShadTooltipComponent"; export default function ParameterComponent({ left, @@ -28,6 +37,7 @@ export default function ParameterComponent({ required = false, }: ParameterComponentType) { const ref = useRef(null); + const refHtml = useRef(null); const updateNodeInternals = useUpdateNodeInternals(); const [position, setPosition] = useState(0); useEffect(() => { @@ -48,6 +58,48 @@ export default function ParameterComponent({ let disabled = reactFlowInstance?.getEdges().some((e) => e.targetHandle === id) ?? false; const { save } = useContext(TabsContext); + const [myData, setMyData] = useState(useContext(typesContext).data); + + useEffect(() => { + const groupedObj = groupByFamily(myData, tooltipTitle); + + refHtml.current = groupedObj.map((item, i) => ( + 0 ? "items-center flex mt-3" : "items-center flex" + )} + > +
+ {React.createElement(nodeIcons[item.family])} +
+ + {nodeNames[item.family] ?? ""}{" "} + + {" "} + -  + {item.type.split(", ").length > 2 + ? item.type.split(", ").map((el, i) => ( + <> + + {i == item.type.split(", ").length - 1 + ? el + : (el += `, `)} + + {i % 2 == 0 && i > 0 &&

} + + )) + : item.type} +
+
+
+ )); + }, [tooltipTitle]); return (
) : ( - + - + )} {left === true && diff --git a/src/frontend/src/CustomNodes/GenericNode/index.tsx b/src/frontend/src/CustomNodes/GenericNode/index.tsx index 1a7f93ae5..79a241160 100644 --- a/src/frontend/src/CustomNodes/GenericNode/index.tsx +++ b/src/frontend/src/CustomNodes/GenericNode/index.tsx @@ -28,8 +28,8 @@ import NodeModal from "../../modals/NodeModal"; import { useCallback } from "react"; import { TabsContext } from "../../contexts/tabsContext"; import { debounce } from "../../utils"; -import TooltipReact from "../../components/ReactTooltipComponent"; import Tooltip from "../../components/TooltipComponent"; +import ShadTooltip from "../../components/ShadTooltipComponent"; export default function GenericNode({ data, selected, @@ -115,14 +115,9 @@ export default function GenericNode({ }} />
- +
{data.type}
-
+
@@ -253,11 +248,7 @@ export default function GenericNode({ : toTitleCase(t) } name={t} - tooltipTitle={ - "Type: " + - data.node.template[t].type + - (data.node.template[t].list ? " list" : "") - } + tooltipTitle={data.node.template[t].type} required={data.node.template[t].required} id={data.node.template[t].type + "|" + t + "|" + data.id} left={true} @@ -283,7 +274,7 @@ export default function GenericNode({ data={data} color={nodeColors[types[data.type]] ?? nodeColors.unknown} title={data.type} - tooltipTitle={`Type: ${data.node.base_classes.join(" | ")}`} + tooltipTitle={`${data.node.base_classes.join("\n")}`} id={[data.type, data.id, ...data.node.base_classes].join("|")} type={data.node.base_classes.join("|")} left={false} diff --git a/src/frontend/src/components/ExtraSidebarComponent/index.tsx b/src/frontend/src/components/ExtraSidebarComponent/index.tsx index f5acd5a23..b06a58d9c 100644 --- a/src/frontend/src/components/ExtraSidebarComponent/index.tsx +++ b/src/frontend/src/components/ExtraSidebarComponent/index.tsx @@ -1,6 +1,6 @@ import { Disclosure } from "@headlessui/react"; import { ChevronLeftIcon } from "@heroicons/react/24/outline"; -import { useContext } from "react"; +import { useContext, useState } from "react"; import { Link } from "react-router-dom"; import { classNames } from "../../utils"; import { locationContext } from "../../contexts/locationContext"; @@ -13,6 +13,7 @@ export default function ExtraSidebar() { extraNavigation, extraComponent, } = useContext(locationContext); + return ( <>