Merge branch 'dev' into fix_llama
This commit is contained in:
commit
2b8333ee95
92 changed files with 2104 additions and 808 deletions
|
|
@ -1,2 +1,6 @@
|
|||
#!/bin/sh
|
||||
|
||||
make format
|
||||
added_files=$(git diff --name-only --cached --diff-filter=d)
|
||||
|
||||
make format
|
||||
git add ${added_files}
|
||||
1
Makefile
1
Makefile
|
|
@ -43,6 +43,7 @@ install_backend:
|
|||
poetry install
|
||||
|
||||
backend:
|
||||
make install_backend
|
||||
poetry run uvicorn langflow.main:app --port 7860 --reload --log-level debug
|
||||
|
||||
build_frontend:
|
||||
|
|
|
|||
57
poetry.lock
generated
57
poetry.lock
generated
|
|
@ -148,6 +148,27 @@ files = [
|
|||
{file = "aiostream-0.4.5.tar.gz", hash = "sha256:3ecbf87085230fbcd9605c32ca20c4fb41af02c71d076eab246ea22e35947d88"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anthropic"
|
||||
version = "0.2.10"
|
||||
description = "Library for accessing the anthropic API"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "anthropic-0.2.10-py3-none-any.whl", hash = "sha256:a007496207fd186b0bcb9592b00ca130069d2a427f3d6f602a61dbbd1ac6316e"},
|
||||
{file = "anthropic-0.2.10.tar.gz", hash = "sha256:e4da061a86d8ffb86072c0b0feaf219a3a4f7dfddd4224df9ba769e469498c19"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aiohttp = "*"
|
||||
httpx = "*"
|
||||
requests = "*"
|
||||
tokenizers = "*"
|
||||
|
||||
[package.extras]
|
||||
dev = ["black (>=22.3.0)", "pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "anyio"
|
||||
version = "3.7.0"
|
||||
|
|
@ -1228,14 +1249,14 @@ importlib-resources = {version = ">=5.0", markers = "python_version < \"3.10\""}
|
|||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.95.2"
|
||||
version = "0.96.0"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "fastapi-0.95.2-py3-none-any.whl", hash = "sha256:d374dbc4ef2ad9b803899bd3360d34c534adc574546e25314ab72c0c4411749f"},
|
||||
{file = "fastapi-0.95.2.tar.gz", hash = "sha256:4d9d3e8c71c73f11874bcf5e33626258d143252e329a01002f767306c64fb982"},
|
||||
{file = "fastapi-0.96.0-py3-none-any.whl", hash = "sha256:b8e11fe81e81eab4e1504209917338e0b80f783878a42c2b99467e5e1019a1e9"},
|
||||
{file = "fastapi-0.96.0.tar.gz", hash = "sha256:71232d47c2787446991c81c41c249f8a16238d52d779c0e6b43927d3773dbe3c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -2356,20 +2377,21 @@ test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"]
|
|||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "0.0.186"
|
||||
version = "0.0.194"
|
||||
description = "Building applications with LLMs through composability"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
files = [
|
||||
{file = "langchain-0.0.186-py3-none-any.whl", hash = "sha256:c53ac8943351602dbde84759d32d57fe2e6599279576935a004688e43ee8ffbf"},
|
||||
{file = "langchain-0.0.186.tar.gz", hash = "sha256:36d6d3872727a6f7d6db1b05b13caac35fed19a0d395d2264ed82aae53cfddfd"},
|
||||
{file = "langchain-0.0.194-py3-none-any.whl", hash = "sha256:b1d47f96c3556eebb5b330492e64fed1f5585c943be7e1fe675ff31a84b010e3"},
|
||||
{file = "langchain-0.0.194.tar.gz", hash = "sha256:480c9cbce12161b3aece3b6fdf03f533c157539ae6243712b61b0d2558f9a96c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aiohttp = ">=3.8.3,<4.0.0"
|
||||
async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""}
|
||||
dataclasses-json = ">=0.5.7,<0.6.0"
|
||||
langchainplus-sdk = ">=0.0.6"
|
||||
numexpr = ">=2.8.4,<3.0.0"
|
||||
numpy = ">=1,<2"
|
||||
openapi-schema-pydantic = ">=1.2,<2.0"
|
||||
|
|
@ -2380,12 +2402,12 @@ SQLAlchemy = ">=1.4,<3"
|
|||
tenacity = ">=8.1.0,<9.0.0"
|
||||
|
||||
[package.extras]
|
||||
all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.2.6,<0.3.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.3.0,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=2.8.6,<3.0.0)", "elasticsearch (>=8,<9)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.1.dev3,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "momento (>=1.5.0,<2.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.1.2,<2.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
|
||||
all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.2.6,<0.3.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.3.0,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=2.8.6,<3.0.0)", "elasticsearch (>=8,<9)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.1.dev3,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "momento (>=1.5.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.1.2,<2.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.6.1,<0.7.0)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
|
||||
azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "openai (>=0,<1)"]
|
||||
cohere = ["cohere (>=3,<4)"]
|
||||
docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"]
|
||||
embeddings = ["sentence-transformers (>=2,<3)"]
|
||||
extended-testing = ["atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "chardet (>=5.1.0,<6.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "psychicapi (>=0.2,<0.3)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "telethon (>=1.28.5,<2.0.0)", "tqdm (>=4.48.0)", "zep-python (>=0.30,<0.31)"]
|
||||
extended-testing = ["atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "chardet (>=5.1.0,<6.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "psychicapi (>=0.5,<0.6)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "telethon (>=1.28.5,<2.0.0)", "tqdm (>=4.48.0)", "zep-python (>=0.31)"]
|
||||
llms = ["anthropic (>=0.2.6,<0.3.0)", "cohere (>=3,<4)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
|
||||
openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.4.0)"]
|
||||
qdrant = ["qdrant-client (>=1.1.2,<2.0.0)"]
|
||||
|
|
@ -2418,6 +2440,23 @@ typing-inspect = "0.8.0"
|
|||
[package.extras]
|
||||
test = ["psutil", "pytest", "pytest-asyncio"]
|
||||
|
||||
[[package]]
|
||||
name = "langchainplus-sdk"
|
||||
version = "0.0.6"
|
||||
description = "Client library to connect to the LangChainPlus LLM Tracing and Evaluation Platform."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
files = [
|
||||
{file = "langchainplus_sdk-0.0.6-py3-none-any.whl", hash = "sha256:43fe01c66442b88403c969b8812f6be81e023c0d2a6d5d3572a8d87961438658"},
|
||||
{file = "langchainplus_sdk-0.0.6.tar.gz", hash = "sha256:c911a98fd2d02baa48f742b7d700fd6a55f11c9a545ee5d66b08825940c9a32e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pydantic = ">=1,<2"
|
||||
requests = ">=2,<3"
|
||||
tenacity = ">=8.1.0,<9.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "linkify-it-py"
|
||||
version = "2.0.2"
|
||||
|
|
@ -6257,4 +6296,4 @@ deploy = ["langchain-serve"]
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.9,<3.12"
|
||||
content-hash = "0b72cff85b2228a6f41d81ac2207cecf1d94c6adb914a3ef4fb19774d757f9f6"
|
||||
content-hash = "94e4a4ca5ec150ef6f673d4a3ab4964a876ce795ae398b78c76719695c63758e"
|
||||
|
|
|
|||
|
|
@ -22,14 +22,14 @@ langflow = "langflow.__main__:main"
|
|||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.9,<3.12"
|
||||
fastapi = "^0.95.0"
|
||||
fastapi = "^0.96.0"
|
||||
uvicorn = "^0.20.0"
|
||||
beautifulsoup4 = "^4.11.2"
|
||||
google-search-results = "^2.4.1"
|
||||
google-api-python-client = "^2.79.0"
|
||||
typer = "^0.7.0"
|
||||
gunicorn = "^20.1.0"
|
||||
langchain = "^0.0.186"
|
||||
langchain = "^0.0.194"
|
||||
openai = "^0.27.7"
|
||||
types-pyyaml = "^6.0.12.8"
|
||||
dill = "^0.3.6"
|
||||
|
|
@ -58,6 +58,7 @@ sentence-transformers = "^2.2.2"
|
|||
ctransformers = "^0.2.2"
|
||||
cohere = "^4.6.0"
|
||||
faiss-cpu = "^1.7.4"
|
||||
anthropic = "^0.2.9"
|
||||
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
|
|
@ -77,6 +78,15 @@ types-pillow = "^9.5.0.2"
|
|||
[tool.poetry.extras]
|
||||
deploy = ["langchain-serve"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = "6.0"
|
||||
addopts = "-ra"
|
||||
testpaths = ["tests", "integration"]
|
||||
console_output_style = "progress"
|
||||
filterwarnings = ["ignore::DeprecationWarning"]
|
||||
log_cli = true
|
||||
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 120
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from langflow.cache import cache_manager
|
||||
from langflow.interface.loading import load_flow_from_json
|
||||
from langflow.processing.process import load_flow_from_json
|
||||
|
||||
__all__ = ["load_flow_from_json", "cache_manager"]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
from langflow.api.router import router
|
||||
|
||||
__all__ = ["router"]
|
||||
8
src/backend/langflow/api/router.py
Normal file
8
src/backend/langflow/api/router.py
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
# Router for base api
|
||||
from fastapi import APIRouter
|
||||
from langflow.api.v1 import chat_router, endpoints_router, validate_router
|
||||
|
||||
router = APIRouter(prefix="/api/v1", tags=["api"])
|
||||
router.include_router(chat_router)
|
||||
router.include_router(endpoints_router)
|
||||
router.include_router(validate_router)
|
||||
5
src/backend/langflow/api/v1/__init__.py
Normal file
5
src/backend/langflow/api/v1/__init__.py
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
from langflow.api.v1.endpoints import router as endpoints_router
|
||||
from langflow.api.v1.validate import router as validate_router
|
||||
from langflow.api.v1.chat import router as chat_router
|
||||
|
||||
__all__ = ["chat_router", "endpoints_router", "validate_router"]
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
from pydantic import BaseModel, validator
|
||||
|
||||
from langflow.graph.utils import extract_input_variables_from_prompt
|
||||
from langflow.interface.utils import extract_input_variables_from_prompt
|
||||
|
||||
|
||||
class CacheResponse(BaseModel):
|
||||
|
|
@ -3,7 +3,7 @@ from typing import Any
|
|||
|
||||
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
|
||||
|
||||
from langflow.api.schemas import ChatResponse
|
||||
from langflow.api.v1.schemas import ChatResponse
|
||||
|
||||
|
||||
# https://github.com/hwchase17/chat-langchain/blob/master/callback.py
|
||||
|
|
@ -6,7 +6,7 @@ from fastapi import (
|
|||
status,
|
||||
)
|
||||
|
||||
from langflow.api.chat_manager import ChatManager
|
||||
from langflow.chat.manager import ChatManager
|
||||
from langflow.utils.logger import logger
|
||||
|
||||
router = APIRouter()
|
||||
|
|
@ -3,13 +3,13 @@ from importlib.metadata import version
|
|||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
|
||||
from langflow.api.schemas import (
|
||||
from langflow.api.v1.schemas import (
|
||||
ExportedFlow,
|
||||
GraphData,
|
||||
PredictRequest,
|
||||
PredictResponse,
|
||||
)
|
||||
from langflow.interface.run import process_graph_cached
|
||||
|
||||
from langflow.interface.types import build_langchain_types_dict
|
||||
|
||||
# build router
|
||||
|
|
@ -25,6 +25,8 @@ def get_all():
|
|||
@router.post("/predict", response_model=PredictResponse)
|
||||
async def get_load(predict_request: PredictRequest):
|
||||
try:
|
||||
from langflow.processing.process import process_graph_cached
|
||||
|
||||
exported_flow: ExportedFlow = predict_request.exported_flow
|
||||
graph_data: GraphData = exported_flow.data
|
||||
data = graph_data.dict()
|
||||
|
|
@ -40,8 +42,3 @@ async def get_load(predict_request: PredictRequest):
|
|||
@router.get("/version")
|
||||
def get_version():
|
||||
return {"version": version("langflow")}
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
def get_health():
|
||||
return {"status": "OK"}
|
||||
|
|
@ -2,15 +2,15 @@ import json
|
|||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
|
||||
from langflow.api.base import (
|
||||
from langflow.api.v1.base import (
|
||||
Code,
|
||||
CodeValidationResponse,
|
||||
Prompt,
|
||||
PromptValidationResponse,
|
||||
validate_prompt,
|
||||
)
|
||||
from langflow.graph.nodes import VectorStoreNode
|
||||
from langflow.interface.run import build_graph
|
||||
from langflow.graph.vertex.types import VectorStoreVertex
|
||||
from langflow.graph import Graph
|
||||
from langflow.utils.logger import logger
|
||||
from langflow.utils.validate import validate_code
|
||||
|
||||
|
|
@ -44,12 +44,12 @@ def post_validate_prompt(prompt: Prompt):
|
|||
def post_validate_node(node_id: str, data: dict):
|
||||
try:
|
||||
# build graph
|
||||
graph = build_graph(data)
|
||||
graph = Graph.from_payload(data)
|
||||
# validate node
|
||||
node = graph.get_node(node_id)
|
||||
if node is None:
|
||||
raise ValueError(f"Node {node_id} not found")
|
||||
if not isinstance(node, VectorStoreNode):
|
||||
if not isinstance(node, VectorStoreVertex):
|
||||
node.build()
|
||||
return json.dumps({"valid": True, "params": str(node._built_object_repr())})
|
||||
except Exception as e:
|
||||
0
src/backend/langflow/chat/__init__.py
Normal file
0
src/backend/langflow/chat/__init__.py
Normal file
|
|
@ -1,21 +1,18 @@
|
|||
import asyncio
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from typing import Dict, List
|
||||
|
||||
from fastapi import WebSocket, status
|
||||
|
||||
from langflow.api.schemas import ChatMessage, ChatResponse, FileResponse
|
||||
from langflow.api.v1.schemas import ChatMessage, ChatResponse, FileResponse
|
||||
from langflow.cache import cache_manager
|
||||
from langflow.cache.manager import Subject
|
||||
from langflow.interface.run import (
|
||||
get_result_and_steps,
|
||||
load_or_build_langchain_object,
|
||||
)
|
||||
from langflow.interface.utils import pil_to_base64, try_setting_streaming_options
|
||||
from langflow.chat.utils import process_graph
|
||||
from langflow.interface.utils import pil_to_base64
|
||||
from langflow.utils.logger import logger
|
||||
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
class ChatHistory(Subject):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
|
@ -191,33 +188,3 @@ class ChatManager:
|
|||
except Exception as e:
|
||||
logger.exception(e)
|
||||
self.disconnect(client_id)
|
||||
|
||||
|
||||
async def process_graph(
|
||||
graph_data: Dict,
|
||||
is_first_message: bool,
|
||||
chat_message: ChatMessage,
|
||||
websocket: WebSocket,
|
||||
):
|
||||
langchain_object = load_or_build_langchain_object(graph_data, is_first_message)
|
||||
langchain_object = try_setting_streaming_options(langchain_object, websocket)
|
||||
logger.debug("Loaded langchain object")
|
||||
|
||||
if langchain_object is None:
|
||||
# Raise user facing error
|
||||
raise ValueError(
|
||||
"There was an error loading the langchain_object. Please, check all the nodes and try again."
|
||||
)
|
||||
|
||||
# Generate result and thought
|
||||
try:
|
||||
logger.debug("Generating result and thought")
|
||||
result, intermediate_steps = await get_result_and_steps(
|
||||
langchain_object, chat_message.message or "", websocket=websocket
|
||||
)
|
||||
logger.debug("Generated result and intermediate_steps")
|
||||
return result, intermediate_steps
|
||||
except Exception as e:
|
||||
# Log stack trace
|
||||
logger.exception(e)
|
||||
raise e
|
||||
41
src/backend/langflow/chat/utils.py
Normal file
41
src/backend/langflow/chat/utils.py
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
from fastapi import WebSocket
|
||||
from langflow.api.v1.schemas import ChatMessage
|
||||
from langflow.processing.process import (
|
||||
load_or_build_langchain_object,
|
||||
)
|
||||
from langflow.processing.base import get_result_and_steps
|
||||
from langflow.interface.utils import try_setting_streaming_options
|
||||
from langflow.utils.logger import logger
|
||||
|
||||
|
||||
from typing import Dict
|
||||
|
||||
|
||||
async def process_graph(
|
||||
graph_data: Dict,
|
||||
is_first_message: bool,
|
||||
chat_message: ChatMessage,
|
||||
websocket: WebSocket,
|
||||
):
|
||||
langchain_object = load_or_build_langchain_object(graph_data, is_first_message)
|
||||
langchain_object = try_setting_streaming_options(langchain_object, websocket)
|
||||
logger.debug("Loaded langchain object")
|
||||
|
||||
if langchain_object is None:
|
||||
# Raise user facing error
|
||||
raise ValueError(
|
||||
"There was an error loading the langchain_object. Please, check all the nodes and try again."
|
||||
)
|
||||
|
||||
# Generate result and thought
|
||||
try:
|
||||
logger.debug("Generating result and thought")
|
||||
result, intermediate_steps = await get_result_and_steps(
|
||||
langchain_object, chat_message.message or "", websocket=websocket
|
||||
)
|
||||
logger.debug("Generated result and intermediate_steps")
|
||||
return result, intermediate_steps
|
||||
except Exception as e:
|
||||
# Log stack trace
|
||||
logger.exception(e)
|
||||
raise e
|
||||
|
|
@ -51,10 +51,13 @@ embeddings:
|
|||
llms:
|
||||
- OpenAI
|
||||
# - AzureOpenAI
|
||||
# - AzureChatOpenAI
|
||||
- ChatOpenAI
|
||||
- LlamaCpp
|
||||
- CTransformers
|
||||
- Cohere
|
||||
- Anthropic
|
||||
- ChatAnthropic
|
||||
- HuggingFaceHub
|
||||
memories:
|
||||
- ConversationBufferMemory
|
||||
|
|
@ -74,12 +77,14 @@ toolkits:
|
|||
- JsonToolkit
|
||||
- VectorStoreInfo
|
||||
- VectorStoreRouterToolkit
|
||||
- VectorStoreToolkit
|
||||
tools:
|
||||
- Search
|
||||
- PAL-MATH
|
||||
- Calculator
|
||||
- Serper Search
|
||||
- Tool
|
||||
- PythonFunctionTool
|
||||
- PythonFunction
|
||||
- JsonSpec
|
||||
- News API
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from langflow.template import frontend_node
|
|||
CUSTOM_NODES = {
|
||||
"prompts": {"ZeroShotPrompt": frontend_node.prompts.ZeroShotPromptNode()},
|
||||
"tools": {
|
||||
"PythonFunctionTool": frontend_node.tools.PythonFunctionToolNode(),
|
||||
"PythonFunction": frontend_node.tools.PythonFunctionNode(),
|
||||
"Tool": frontend_node.tools.ToolNode(),
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,4 +1,35 @@
|
|||
from langflow.graph.base import Edge, Node
|
||||
from langflow.graph.graph import Graph
|
||||
from langflow.graph.edge.base import Edge
|
||||
from langflow.graph.graph.base import Graph
|
||||
from langflow.graph.vertex.base import Vertex
|
||||
from langflow.graph.vertex.types import (
|
||||
AgentVertex,
|
||||
ChainVertex,
|
||||
DocumentLoaderVertex,
|
||||
EmbeddingVertex,
|
||||
LLMVertex,
|
||||
MemoryVertex,
|
||||
PromptVertex,
|
||||
TextSplitterVertex,
|
||||
ToolVertex,
|
||||
ToolkitVertex,
|
||||
VectorStoreVertex,
|
||||
WrapperVertex,
|
||||
)
|
||||
|
||||
__all__ = ["Graph", "Node", "Edge"]
|
||||
__all__ = [
|
||||
"Graph",
|
||||
"Vertex",
|
||||
"Edge",
|
||||
"AgentVertex",
|
||||
"ChainVertex",
|
||||
"DocumentLoaderVertex",
|
||||
"EmbeddingVertex",
|
||||
"LLMVertex",
|
||||
"MemoryVertex",
|
||||
"PromptVertex",
|
||||
"TextSplitterVertex",
|
||||
"ToolVertex",
|
||||
"ToolkitVertex",
|
||||
"VectorStoreVertex",
|
||||
"WrapperVertex",
|
||||
]
|
||||
|
|
|
|||
0
src/backend/langflow/graph/edge/__init__.py
Normal file
0
src/backend/langflow/graph/edge/__init__.py
Normal file
52
src/backend/langflow/graph/edge/base.py
Normal file
52
src/backend/langflow/graph/edge/base.py
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
from langflow.utils.logger import logger
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langflow.graph.vertex.base import Vertex
|
||||
|
||||
|
||||
class Edge:
|
||||
def __init__(self, source: "Vertex", target: "Vertex"):
|
||||
self.source: "Vertex" = source
|
||||
self.target: "Vertex" = target
|
||||
self.validate_edge()
|
||||
|
||||
def validate_edge(self) -> None:
|
||||
# Validate that the outputs of the source node are valid inputs
|
||||
# for the target node
|
||||
self.source_types = self.source.output
|
||||
self.target_reqs = self.target.required_inputs + self.target.optional_inputs
|
||||
# Both lists contain strings and sometimes a string contains the value we are
|
||||
# looking for e.g. comgin_out=["Chain"] and target_reqs=["LLMChain"]
|
||||
# so we need to check if any of the strings in source_types is in target_reqs
|
||||
self.valid = any(
|
||||
output in target_req
|
||||
for output in self.source_types
|
||||
for target_req in self.target_reqs
|
||||
)
|
||||
# Get what type of input the target node is expecting
|
||||
|
||||
self.matched_type = next(
|
||||
(
|
||||
output
|
||||
for output in self.source_types
|
||||
for target_req in self.target_reqs
|
||||
if output in target_req
|
||||
),
|
||||
None,
|
||||
)
|
||||
no_matched_type = self.matched_type is None
|
||||
if no_matched_type:
|
||||
logger.debug(self.source_types)
|
||||
logger.debug(self.target_reqs)
|
||||
if no_matched_type:
|
||||
raise ValueError(
|
||||
f"Edge between {self.source.vertex_type} and {self.target.vertex_type} "
|
||||
f"has no matched type"
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"Edge(source={self.source.id}, target={self.target.id}, valid={self.valid}"
|
||||
f", matched_type={self.matched_type})"
|
||||
)
|
||||
0
src/backend/langflow/graph/graph/__init__.py
Normal file
0
src/backend/langflow/graph/graph/__init__.py
Normal file
|
|
@ -1,38 +1,20 @@
|
|||
from typing import Dict, List, Type, Union
|
||||
|
||||
from langflow.graph.base import Edge, Node
|
||||
from langflow.graph.nodes import (
|
||||
AgentNode,
|
||||
ChainNode,
|
||||
DocumentLoaderNode,
|
||||
EmbeddingNode,
|
||||
FileToolNode,
|
||||
LLMNode,
|
||||
MemoryNode,
|
||||
PromptNode,
|
||||
TextSplitterNode,
|
||||
ToolkitNode,
|
||||
ToolNode,
|
||||
VectorStoreNode,
|
||||
WrapperNode,
|
||||
from langflow.graph.edge.base import Edge
|
||||
from langflow.graph.graph.constants import VERTEX_TYPE_MAP
|
||||
from langflow.graph.vertex.base import Vertex
|
||||
from langflow.graph.vertex.types import (
|
||||
FileToolVertex,
|
||||
LLMVertex,
|
||||
ToolkitVertex,
|
||||
)
|
||||
from langflow.interface.agents.base import agent_creator
|
||||
from langflow.interface.chains.base import chain_creator
|
||||
from langflow.interface.document_loaders.base import documentloader_creator
|
||||
from langflow.interface.embeddings.base import embedding_creator
|
||||
from langflow.interface.llms.base import llm_creator
|
||||
from langflow.interface.memories.base import memory_creator
|
||||
from langflow.interface.prompts.base import prompt_creator
|
||||
from langflow.interface.text_splitters.base import textsplitter_creator
|
||||
from langflow.interface.toolkits.base import toolkits_creator
|
||||
from langflow.interface.tools.base import tool_creator
|
||||
from langflow.interface.tools.constants import FILE_TOOLS
|
||||
from langflow.interface.vector_store.base import vectorstore_creator
|
||||
from langflow.interface.wrappers.base import wrapper_creator
|
||||
from langflow.utils import payload
|
||||
|
||||
|
||||
class Graph:
|
||||
"""A class representing a graph of nodes and edges."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
nodes: List[Dict[str, Union[str, Dict[str, Union[str, List[str]]]]]],
|
||||
|
|
@ -42,8 +24,30 @@ class Graph:
|
|||
self._edges = edges
|
||||
self._build_graph()
|
||||
|
||||
@classmethod
|
||||
@classmethod
|
||||
def from_payload(cls, payload: Dict) -> "Graph":
|
||||
"""
|
||||
Creates a graph from a payload.
|
||||
|
||||
Args:
|
||||
payload (Dict): The payload to create the graph from.
|
||||
|
||||
Returns:
|
||||
Graph: The created graph.
|
||||
"""
|
||||
if "data" in payload:
|
||||
payload = payload["data"]
|
||||
try:
|
||||
nodes = payload["nodes"]
|
||||
edges = payload["edges"]
|
||||
return cls(nodes, edges)
|
||||
except KeyError as exc:
|
||||
raise ValueError("Invalid payload") from exc
|
||||
|
||||
def _build_graph(self) -> None:
|
||||
self.nodes = self._build_nodes()
|
||||
"""Builds the graph from the nodes and edges."""
|
||||
self.nodes = self._build_vertices()
|
||||
self.edges = self._build_edges()
|
||||
for edge in self.edges:
|
||||
edge.source.add_edge(edge)
|
||||
|
|
@ -51,17 +55,25 @@ class Graph:
|
|||
|
||||
# This is a hack to make sure that the LLM node is sent to
|
||||
# the toolkit node
|
||||
self._build_node_params()
|
||||
# remove invalid nodes
|
||||
self._remove_invalid_nodes()
|
||||
|
||||
def _build_node_params(self) -> None:
|
||||
"""Identifies and handles the LLM node within the graph."""
|
||||
llm_node = None
|
||||
for node in self.nodes:
|
||||
node._build_params()
|
||||
|
||||
if isinstance(node, LLMNode):
|
||||
if isinstance(node, LLMVertex):
|
||||
llm_node = node
|
||||
|
||||
for node in self.nodes:
|
||||
if isinstance(node, ToolkitNode):
|
||||
node.params["llm"] = llm_node
|
||||
# remove invalid nodes
|
||||
if llm_node:
|
||||
for node in self.nodes:
|
||||
if isinstance(node, ToolkitVertex):
|
||||
node.params["llm"] = llm_node
|
||||
|
||||
def _remove_invalid_nodes(self) -> None:
|
||||
"""Removes invalid nodes from the graph."""
|
||||
self.nodes = [
|
||||
node
|
||||
for node in self.nodes
|
||||
|
|
@ -69,28 +81,33 @@ class Graph:
|
|||
or (len(self.nodes) == 1 and len(self.edges) == 0)
|
||||
]
|
||||
|
||||
def _validate_node(self, node: Node) -> bool:
|
||||
def _validate_node(self, node: Vertex) -> bool:
|
||||
"""Validates a node."""
|
||||
# All nodes that do not have edges are invalid
|
||||
return len(node.edges) > 0
|
||||
|
||||
def get_node(self, node_id: str) -> Union[None, Node]:
|
||||
def get_node(self, node_id: str) -> Union[None, Vertex]:
|
||||
"""Returns a node by id."""
|
||||
return next((node for node in self.nodes if node.id == node_id), None)
|
||||
|
||||
def get_nodes_with_target(self, node: Node) -> List[Node]:
|
||||
connected_nodes: List[Node] = [
|
||||
def get_nodes_with_target(self, node: Vertex) -> List[Vertex]:
|
||||
"""Returns the nodes connected to a node."""
|
||||
connected_nodes: List[Vertex] = [
|
||||
edge.source for edge in self.edges if edge.target == node
|
||||
]
|
||||
return connected_nodes
|
||||
|
||||
def build(self) -> List[Node]:
|
||||
def build(self) -> List[Vertex]:
|
||||
"""Builds the graph."""
|
||||
# Get root node
|
||||
root_node = payload.get_root_node(self)
|
||||
if root_node is None:
|
||||
raise ValueError("No root node found")
|
||||
return root_node.build()
|
||||
|
||||
def get_node_neighbors(self, node: Node) -> Dict[Node, int]:
|
||||
neighbors: Dict[Node, int] = {}
|
||||
def get_node_neighbors(self, node: Vertex) -> Dict[Vertex, int]:
|
||||
"""Returns the neighbors of a node."""
|
||||
neighbors: Dict[Vertex, int] = {}
|
||||
for edge in self.edges:
|
||||
if edge.source == node:
|
||||
neighbor = edge.target
|
||||
|
|
@ -105,6 +122,7 @@ class Graph:
|
|||
return neighbors
|
||||
|
||||
def _build_edges(self) -> List[Edge]:
|
||||
"""Builds the edges of the graph."""
|
||||
# Edge takes two nodes as arguments, so we need to build the nodes first
|
||||
# and then build the edges
|
||||
# if we can't find a node, we raise an error
|
||||
|
|
@ -120,43 +138,31 @@ class Graph:
|
|||
edges.append(Edge(source, target))
|
||||
return edges
|
||||
|
||||
def _get_node_class(self, node_type: str, node_lc_type: str) -> Type[Node]:
|
||||
node_type_map: Dict[str, Type[Node]] = {
|
||||
**{t: PromptNode for t in prompt_creator.to_list()},
|
||||
**{t: AgentNode for t in agent_creator.to_list()},
|
||||
**{t: ChainNode for t in chain_creator.to_list()},
|
||||
**{t: ToolNode for t in tool_creator.to_list()},
|
||||
**{t: ToolkitNode for t in toolkits_creator.to_list()},
|
||||
**{t: WrapperNode for t in wrapper_creator.to_list()},
|
||||
**{t: LLMNode for t in llm_creator.to_list()},
|
||||
**{t: MemoryNode for t in memory_creator.to_list()},
|
||||
**{t: EmbeddingNode for t in embedding_creator.to_list()},
|
||||
**{t: VectorStoreNode for t in vectorstore_creator.to_list()},
|
||||
**{t: DocumentLoaderNode for t in documentloader_creator.to_list()},
|
||||
**{t: TextSplitterNode for t in textsplitter_creator.to_list()},
|
||||
}
|
||||
|
||||
def _get_vertex_class(self, node_type: str, node_lc_type: str) -> Type[Vertex]:
|
||||
"""Returns the node class based on the node type."""
|
||||
if node_type in FILE_TOOLS:
|
||||
return FileToolNode
|
||||
if node_type in node_type_map:
|
||||
return node_type_map[node_type]
|
||||
if node_lc_type in node_type_map:
|
||||
return node_type_map[node_lc_type]
|
||||
return Node
|
||||
return FileToolVertex
|
||||
if node_type in VERTEX_TYPE_MAP:
|
||||
return VERTEX_TYPE_MAP[node_type]
|
||||
return (
|
||||
VERTEX_TYPE_MAP[node_lc_type] if node_lc_type in VERTEX_TYPE_MAP else Vertex
|
||||
)
|
||||
|
||||
def _build_nodes(self) -> List[Node]:
|
||||
nodes: List[Node] = []
|
||||
def _build_vertices(self) -> List[Vertex]:
|
||||
"""Builds the vertices of the graph."""
|
||||
nodes: List[Vertex] = []
|
||||
for node in self._nodes:
|
||||
node_data = node["data"]
|
||||
node_type: str = node_data["type"] # type: ignore
|
||||
node_lc_type: str = node_data["node"]["template"]["_type"] # type: ignore
|
||||
|
||||
NodeClass = self._get_node_class(node_type, node_lc_type)
|
||||
nodes.append(NodeClass(node))
|
||||
VertexClass = self._get_vertex_class(node_type, node_lc_type)
|
||||
nodes.append(VertexClass(node))
|
||||
|
||||
return nodes
|
||||
|
||||
def get_children_by_node_type(self, node: Node, node_type: str) -> List[Node]:
|
||||
def get_children_by_node_type(self, node: Vertex, node_type: str) -> List[Vertex]:
|
||||
"""Returns the children of a node based on the node type."""
|
||||
children = []
|
||||
node_types = [node.data["type"]]
|
||||
if "node" in node.data:
|
||||
49
src/backend/langflow/graph/graph/constants.py
Normal file
49
src/backend/langflow/graph/graph/constants.py
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
from langflow.graph.vertex.base import Vertex
|
||||
from langflow.graph.vertex.types import (
|
||||
AgentVertex,
|
||||
ChainVertex,
|
||||
DocumentLoaderVertex,
|
||||
EmbeddingVertex,
|
||||
LLMVertex,
|
||||
MemoryVertex,
|
||||
PromptVertex,
|
||||
TextSplitterVertex,
|
||||
ToolVertex,
|
||||
ToolkitVertex,
|
||||
VectorStoreVertex,
|
||||
WrapperVertex,
|
||||
)
|
||||
from langflow.interface.agents.base import agent_creator
|
||||
from langflow.interface.chains.base import chain_creator
|
||||
from langflow.interface.document_loaders.base import documentloader_creator
|
||||
from langflow.interface.embeddings.base import embedding_creator
|
||||
from langflow.interface.llms.base import llm_creator
|
||||
from langflow.interface.memories.base import memory_creator
|
||||
from langflow.interface.prompts.base import prompt_creator
|
||||
from langflow.interface.text_splitters.base import textsplitter_creator
|
||||
from langflow.interface.toolkits.base import toolkits_creator
|
||||
from langflow.interface.tools.base import tool_creator
|
||||
from langflow.interface.vector_store.base import vectorstore_creator
|
||||
from langflow.interface.wrappers.base import wrapper_creator
|
||||
|
||||
|
||||
from typing import Dict, Type
|
||||
|
||||
|
||||
DIRECT_TYPES = ["str", "bool", "code", "int", "float", "Any", "prompt"]
|
||||
|
||||
|
||||
VERTEX_TYPE_MAP: Dict[str, Type[Vertex]] = {
|
||||
**{t: PromptVertex for t in prompt_creator.to_list()},
|
||||
**{t: AgentVertex for t in agent_creator.to_list()},
|
||||
**{t: ChainVertex for t in chain_creator.to_list()},
|
||||
**{t: ToolVertex for t in tool_creator.to_list()},
|
||||
**{t: ToolkitVertex for t in toolkits_creator.to_list()},
|
||||
**{t: WrapperVertex for t in wrapper_creator.to_list()},
|
||||
**{t: LLMVertex for t in llm_creator.to_list()},
|
||||
**{t: MemoryVertex for t in memory_creator.to_list()},
|
||||
**{t: EmbeddingVertex for t in embedding_creator.to_list()},
|
||||
**{t: VectorStoreVertex for t in vectorstore_creator.to_list()},
|
||||
**{t: DocumentLoaderVertex for t in documentloader_creator.to_list()},
|
||||
**{t: TextSplitterVertex for t in textsplitter_creator.to_list()},
|
||||
}
|
||||
0
src/backend/langflow/graph/graph/utils.py
Normal file
0
src/backend/langflow/graph/graph/utils.py
Normal file
|
|
@ -1,4 +1,6 @@
|
|||
import re
|
||||
from typing import Any, Union
|
||||
|
||||
from langflow.interface.utils import extract_input_variables_from_prompt
|
||||
|
||||
|
||||
def validate_prompt(prompt: str):
|
||||
|
|
@ -14,6 +16,12 @@ def fix_prompt(prompt: str):
|
|||
return prompt + " {input}"
|
||||
|
||||
|
||||
def extract_input_variables_from_prompt(prompt: str) -> list[str]:
|
||||
"""Extract input variables from prompt."""
|
||||
return re.findall(r"{(.*?)}", prompt)
|
||||
def flatten_list(list_of_lists: list[Union[list, Any]]) -> list:
|
||||
"""Flatten list of lists."""
|
||||
new_list = []
|
||||
for item in list_of_lists:
|
||||
if isinstance(item, list):
|
||||
new_list.extend(item)
|
||||
else:
|
||||
new_list.append(item)
|
||||
return new_list
|
||||
|
|
|
|||
0
src/backend/langflow/graph/vertex/__init__.py
Normal file
0
src/backend/langflow/graph/vertex/__init__.py
Normal file
|
|
@ -1,27 +1,27 @@
|
|||
# Description: Graph class for building a graph of nodes and edges
|
||||
# Insights:
|
||||
# - Defer prompts building to the last moment or when they have all the tools
|
||||
# - Build each inner agent first, then build the outer agent
|
||||
|
||||
import contextlib
|
||||
import inspect
|
||||
import types
|
||||
import warnings
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langflow.cache import base as cache_utils
|
||||
from langflow.graph.constants import DIRECT_TYPES
|
||||
from langflow.graph.vertex.constants import DIRECT_TYPES
|
||||
from langflow.interface import loading
|
||||
from langflow.interface.listing import ALL_TYPES_DICT
|
||||
from langflow.utils.logger import logger
|
||||
from langflow.utils.util import sync_to_async
|
||||
|
||||
|
||||
class Node:
|
||||
import contextlib
|
||||
import inspect
|
||||
import types
|
||||
import warnings
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langflow.graph.edge.base import Edge
|
||||
|
||||
|
||||
class Vertex:
|
||||
def __init__(self, data: Dict, base_type: Optional[str] = None) -> None:
|
||||
self.id: str = data["id"]
|
||||
self._data = data
|
||||
self.edges: List[Edge] = []
|
||||
self.edges: List["Edge"] = []
|
||||
self.base_type: Optional[str] = base_type
|
||||
self._parse_data()
|
||||
self._built_object = None
|
||||
|
|
@ -48,12 +48,12 @@ class Node:
|
|||
]
|
||||
|
||||
template_dict = self.data["node"]["template"]
|
||||
self.node_type = (
|
||||
self.vertex_type = (
|
||||
self.data["type"] if "Tool" not in self.output else template_dict["_type"]
|
||||
)
|
||||
if self.base_type is None:
|
||||
for base_type, value in ALL_TYPES_DICT.items():
|
||||
if self.node_type in value:
|
||||
if self.vertex_type in value:
|
||||
self.base_type = base_type
|
||||
break
|
||||
|
||||
|
|
@ -113,7 +113,7 @@ class Node:
|
|||
if value["required"] and not edges:
|
||||
# If a required parameter is not found, raise an error
|
||||
raise ValueError(
|
||||
f"Required input {key} for module {self.node_type} not found"
|
||||
f"Required input {key} for module {self.vertex_type} not found"
|
||||
)
|
||||
elif value["list"]:
|
||||
# If this is a list parameter, append all sources to a list
|
||||
|
|
@ -128,7 +128,7 @@ class Node:
|
|||
# so we need to check if value has value
|
||||
new_value = value.get("value")
|
||||
if new_value is None:
|
||||
warnings.warn(f"Value for {key} in {self.node_type} is None. ")
|
||||
warnings.warn(f"Value for {key} in {self.vertex_type} is None. ")
|
||||
if value.get("type") == "int":
|
||||
with contextlib.suppress(TypeError, ValueError):
|
||||
new_value = int(new_value) # type: ignore
|
||||
|
|
@ -148,12 +148,12 @@ class Node:
|
|||
# and continue
|
||||
# Another aspect is that the node_type is the class that we need to import
|
||||
# and instantiate with these built params
|
||||
logger.debug(f"Building {self.node_type}")
|
||||
logger.debug(f"Building {self.vertex_type}")
|
||||
# Build each node in the params dict
|
||||
for key, value in self.params.copy().items():
|
||||
# Check if Node or list of Nodes and not self
|
||||
# to avoid recursion
|
||||
if isinstance(value, Node):
|
||||
if isinstance(value, Vertex):
|
||||
if value == self:
|
||||
del self.params[key]
|
||||
continue
|
||||
|
|
@ -174,10 +174,16 @@ class Node:
|
|||
# turn result which is a function into a coroutine
|
||||
# so that it can be awaited
|
||||
self.params["coroutine"] = sync_to_async(result)
|
||||
if isinstance(result, list):
|
||||
# If the result is a list, then we need to extend the list
|
||||
# with the result but first check if the key exists
|
||||
# if it doesn't, then we need to create a new list
|
||||
if isinstance(self.params[key], list):
|
||||
self.params[key].extend(result)
|
||||
|
||||
self.params[key] = result
|
||||
elif isinstance(value, list) and all(
|
||||
isinstance(node, Node) for node in value
|
||||
isinstance(node, Vertex) for node in value
|
||||
):
|
||||
self.params[key] = []
|
||||
for node in value:
|
||||
|
|
@ -193,17 +199,17 @@ class Node:
|
|||
|
||||
try:
|
||||
self._built_object = loading.instantiate_class(
|
||||
node_type=self.node_type,
|
||||
node_type=self.vertex_type,
|
||||
base_type=self.base_type,
|
||||
params=self.params,
|
||||
)
|
||||
except Exception as exc:
|
||||
raise ValueError(
|
||||
f"Error building node {self.node_type}: {str(exc)}"
|
||||
f"Error building node {self.vertex_type}: {str(exc)}"
|
||||
) from exc
|
||||
|
||||
if self._built_object is None:
|
||||
raise ValueError(f"Node type {self.node_type} not found")
|
||||
raise ValueError(f"Node type {self.vertex_type} not found")
|
||||
|
||||
self._built = True
|
||||
|
||||
|
|
@ -220,57 +226,10 @@ class Node:
|
|||
return f"Node(id={self.id}, data={self.data})"
|
||||
|
||||
def __eq__(self, __o: object) -> bool:
|
||||
return self.id == __o.id if isinstance(__o, Node) else False
|
||||
return self.id == __o.id if isinstance(__o, Vertex) else False
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return id(self)
|
||||
|
||||
def _built_object_repr(self):
|
||||
return repr(self._built_object)
|
||||
|
||||
|
||||
class Edge:
|
||||
def __init__(self, source: "Node", target: "Node"):
|
||||
self.source: "Node" = source
|
||||
self.target: "Node" = target
|
||||
self.validate_edge()
|
||||
|
||||
def validate_edge(self) -> None:
|
||||
# Validate that the outputs of the source node are valid inputs
|
||||
# for the target node
|
||||
self.source_types = self.source.output
|
||||
self.target_reqs = self.target.required_inputs + self.target.optional_inputs
|
||||
# Both lists contain strings and sometimes a string contains the value we are
|
||||
# looking for e.g. comgin_out=["Chain"] and target_reqs=["LLMChain"]
|
||||
# so we need to check if any of the strings in source_types is in target_reqs
|
||||
self.valid = any(
|
||||
output in target_req
|
||||
for output in self.source_types
|
||||
for target_req in self.target_reqs
|
||||
)
|
||||
# Get what type of input the target node is expecting
|
||||
|
||||
self.matched_type = next(
|
||||
(
|
||||
output
|
||||
for output in self.source_types
|
||||
for target_req in self.target_reqs
|
||||
if output in target_req
|
||||
),
|
||||
None,
|
||||
)
|
||||
no_matched_type = self.matched_type is None
|
||||
if no_matched_type:
|
||||
logger.debug(self.source_types)
|
||||
logger.debug(self.target_reqs)
|
||||
if no_matched_type:
|
||||
raise ValueError(
|
||||
f"Edge between {self.source.node_type} and {self.target.node_type} "
|
||||
f"has no matched type"
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"Edge(source={self.source.id}, target={self.target.id}, valid={self.valid}"
|
||||
f", matched_type={self.matched_type})"
|
||||
)
|
||||
|
|
@ -1,22 +1,23 @@
|
|||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langflow.graph.base import Node
|
||||
from langflow.graph.utils import extract_input_variables_from_prompt
|
||||
from langflow.graph.vertex.base import Vertex
|
||||
from langflow.graph.utils import flatten_list
|
||||
from langflow.interface.utils import extract_input_variables_from_prompt
|
||||
|
||||
|
||||
class AgentNode(Node):
|
||||
class AgentVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="agents")
|
||||
|
||||
self.tools: List[ToolNode] = []
|
||||
self.chains: List[ChainNode] = []
|
||||
self.tools: List[Union[ToolkitVertex, ToolVertex]] = []
|
||||
self.chains: List[ChainVertex] = []
|
||||
|
||||
def _set_tools_and_chains(self) -> None:
|
||||
for edge in self.edges:
|
||||
source_node = edge.source
|
||||
if isinstance(source_node, ToolNode):
|
||||
if isinstance(source_node, (ToolVertex, ToolkitVertex)):
|
||||
self.tools.append(source_node)
|
||||
elif isinstance(source_node, ChainNode):
|
||||
elif isinstance(source_node, ChainVertex):
|
||||
self.chains.append(source_node)
|
||||
|
||||
def build(self, force: bool = False) -> Any:
|
||||
|
|
@ -32,25 +33,130 @@ class AgentNode(Node):
|
|||
|
||||
self._build()
|
||||
|
||||
#! Cannot deepcopy VectorStore, VectorStoreRouter, or SQL agents
|
||||
if self.node_type in ["VectorStoreAgent", "VectorStoreRouterAgent", "SQLAgent"]:
|
||||
return self._built_object
|
||||
return self._built_object
|
||||
|
||||
|
||||
class ToolNode(Node):
|
||||
class ToolVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="tools")
|
||||
|
||||
|
||||
class PromptNode(Node):
|
||||
class LLMVertex(Vertex):
|
||||
built_node_type = None
|
||||
class_built_object = None
|
||||
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="llms")
|
||||
|
||||
def build(self, force: bool = False) -> Any:
|
||||
# LLM is different because some models might take up too much memory
|
||||
# or time to load. So we only load them when we need them.ß
|
||||
if self.vertex_type == self.built_node_type:
|
||||
return self.class_built_object
|
||||
if not self._built or force:
|
||||
self._build()
|
||||
self.built_node_type = self.vertex_type
|
||||
self.class_built_object = self._built_object
|
||||
# Avoid deepcopying the LLM
|
||||
# that are loaded from a file
|
||||
return self._built_object
|
||||
|
||||
|
||||
class ToolkitVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="toolkits")
|
||||
|
||||
|
||||
class FileToolVertex(ToolVertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data)
|
||||
|
||||
|
||||
class WrapperVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="wrappers")
|
||||
|
||||
def build(self, force: bool = False) -> Any:
|
||||
if not self._built or force:
|
||||
if "headers" in self.params:
|
||||
self.params["headers"] = eval(self.params["headers"])
|
||||
self._build()
|
||||
return self._built_object
|
||||
|
||||
|
||||
class DocumentLoaderVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="documentloaders")
|
||||
|
||||
def _built_object_repr(self):
|
||||
# This built_object is a list of documents. Maybe we should
|
||||
# show how many documents are in the list?
|
||||
if self._built_object:
|
||||
return f"""{self.vertex_type}({len(self._built_object)} documents)
|
||||
Documents: {self._built_object[:3]}..."""
|
||||
return f"{self.vertex_type}()"
|
||||
|
||||
|
||||
class EmbeddingVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="embeddings")
|
||||
|
||||
|
||||
class VectorStoreVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="vectorstores")
|
||||
|
||||
def _built_object_repr(self):
|
||||
return "Vector stores can take time to build. It will build on the first query."
|
||||
|
||||
|
||||
class MemoryVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="memory")
|
||||
|
||||
|
||||
class TextSplitterVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="textsplitters")
|
||||
|
||||
def _built_object_repr(self):
|
||||
# This built_object is a list of documents. Maybe we should
|
||||
# show how many documents are in the list?
|
||||
if self._built_object:
|
||||
return f"""{self.vertex_type}({len(self._built_object)} documents)
|
||||
\nDocuments: {self._built_object[:3]}..."""
|
||||
return f"{self.vertex_type}()"
|
||||
|
||||
|
||||
class ChainVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="chains")
|
||||
|
||||
def build(
|
||||
self,
|
||||
force: bool = False,
|
||||
tools: Optional[List[Union[ToolkitVertex, ToolVertex]]] = None,
|
||||
) -> Any:
|
||||
if not self._built or force:
|
||||
# Check if the chain requires a PromptVertex
|
||||
for key, value in self.params.items():
|
||||
if isinstance(value, PromptVertex):
|
||||
# Build the PromptVertex, passing the tools if available
|
||||
self.params[key] = value.build(tools=tools, force=force)
|
||||
|
||||
self._build()
|
||||
|
||||
return self._built_object
|
||||
|
||||
|
||||
class PromptVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="prompts")
|
||||
|
||||
def build(
|
||||
self,
|
||||
force: bool = False,
|
||||
tools: Optional[Union[List[Node], List[ToolNode]]] = None,
|
||||
tools: Optional[List[Union[ToolkitVertex, ToolVertex]]] = None,
|
||||
) -> Any:
|
||||
if not self._built or force:
|
||||
if (
|
||||
|
|
@ -59,12 +165,16 @@ class PromptNode(Node):
|
|||
):
|
||||
self.params["input_variables"] = []
|
||||
# Check if it is a ZeroShotPrompt and needs a tool
|
||||
if "ShotPrompt" in self.node_type:
|
||||
if "ShotPrompt" in self.vertex_type:
|
||||
tools = (
|
||||
[tool_node.build() for tool_node in tools]
|
||||
if tools is not None
|
||||
else []
|
||||
)
|
||||
# flatten the list of tools if it is a list of lists
|
||||
# first check if it is a list
|
||||
if tools and isinstance(tools, list) and isinstance(tools[0], list):
|
||||
tools = flatten_list(tools)
|
||||
self.params["tools"] = tools
|
||||
prompt_params = [
|
||||
key
|
||||
|
|
@ -81,113 +191,3 @@ class PromptNode(Node):
|
|||
|
||||
self._build()
|
||||
return self._built_object
|
||||
|
||||
|
||||
class ChainNode(Node):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="chains")
|
||||
|
||||
def build(
|
||||
self,
|
||||
force: bool = False,
|
||||
tools: Optional[Union[List[Node], List[ToolNode]]] = None,
|
||||
) -> Any:
|
||||
if not self._built or force:
|
||||
# Check if the chain requires a PromptNode
|
||||
for key, value in self.params.items():
|
||||
if isinstance(value, PromptNode):
|
||||
# Build the PromptNode, passing the tools if available
|
||||
self.params[key] = value.build(tools=tools, force=force)
|
||||
|
||||
self._build()
|
||||
|
||||
#! Cannot deepcopy SQLDatabaseChain
|
||||
if self.node_type in ["SQLDatabaseChain"]:
|
||||
return self._built_object
|
||||
return self._built_object
|
||||
|
||||
|
||||
class LLMNode(Node):
|
||||
built_node_type = None
|
||||
class_built_object = None
|
||||
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="llms")
|
||||
|
||||
def build(self, force: bool = False) -> Any:
|
||||
# LLM is different because some models might take up too much memory
|
||||
# or time to load. So we only load them when we need them.ß
|
||||
if self.node_type == self.built_node_type:
|
||||
return self.class_built_object
|
||||
if not self._built or force:
|
||||
self._build()
|
||||
self.built_node_type = self.node_type
|
||||
self.class_built_object = self._built_object
|
||||
# Avoid deepcopying the LLM
|
||||
# that are loaded from a file
|
||||
return self._built_object
|
||||
|
||||
|
||||
class ToolkitNode(Node):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="toolkits")
|
||||
|
||||
|
||||
class FileToolNode(ToolNode):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data)
|
||||
|
||||
|
||||
class WrapperNode(Node):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="wrappers")
|
||||
|
||||
def build(self, force: bool = False) -> Any:
|
||||
if not self._built or force:
|
||||
if "headers" in self.params:
|
||||
self.params["headers"] = eval(self.params["headers"])
|
||||
self._build()
|
||||
return self._built_object
|
||||
|
||||
|
||||
class DocumentLoaderNode(Node):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="documentloaders")
|
||||
|
||||
def _built_object_repr(self):
|
||||
# This built_object is a list of documents. Maybe we should
|
||||
# show how many documents are in the list?
|
||||
if self._built_object:
|
||||
return f"""{self.node_type}({len(self._built_object)} documents)
|
||||
Documents: {self._built_object[:3]}..."""
|
||||
return f"{self.node_type}()"
|
||||
|
||||
|
||||
class EmbeddingNode(Node):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="embeddings")
|
||||
|
||||
|
||||
class VectorStoreNode(Node):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="vectorstores")
|
||||
|
||||
def _built_object_repr(self):
|
||||
return "Vector stores can take time to build. It will build on the first query."
|
||||
|
||||
|
||||
class MemoryNode(Node):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="memory")
|
||||
|
||||
|
||||
class TextSplitterNode(Node):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="textsplitters")
|
||||
|
||||
def _built_object_repr(self):
|
||||
# This built_object is a list of documents. Maybe we should
|
||||
# show how many documents are in the list?
|
||||
if self._built_object:
|
||||
return f"""{self.node_type}({len(self._built_object)} documents)\nDocuments: {self._built_object[:3]}..."""
|
||||
return f"{self.node_type}()"
|
||||
|
|
@ -69,7 +69,7 @@ class JsonAgent(CustomAgentExecutor):
|
|||
|
||||
@classmethod
|
||||
def from_toolkit_and_llm(cls, toolkit: JsonToolkit, llm: BaseLanguageModel):
|
||||
tools = toolkit.get_tools()
|
||||
tools = toolkit if isinstance(toolkit, list) else toolkit.get_tools()
|
||||
tool_names = {tool.name for tool in tools}
|
||||
prompt = ZeroShotAgent.create_prompt(
|
||||
tools,
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from langchain.memory.buffer import ConversationBufferMemory
|
|||
from langchain.schema import BaseMemory
|
||||
from pydantic import Field, root_validator
|
||||
|
||||
from langflow.graph.utils import extract_input_variables_from_prompt
|
||||
from langflow.interface.utils import extract_input_variables_from_prompt
|
||||
|
||||
DEFAULT_SUFFIX = """"
|
||||
Current conversation:
|
||||
|
|
|
|||
|
|
@ -11,12 +11,15 @@ from langchain import (
|
|||
text_splitter,
|
||||
)
|
||||
from langchain.agents import agent_toolkits
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.chat_models import AzureChatOpenAI, ChatOpenAI
|
||||
from langchain.chat_models import ChatAnthropic
|
||||
|
||||
from langflow.interface.importing.utils import import_class
|
||||
|
||||
## LLMs
|
||||
llm_type_to_cls_dict = llms.type_to_cls_dict
|
||||
llm_type_to_cls_dict["anthropic-chat"] = ChatAnthropic # type: ignore
|
||||
llm_type_to_cls_dict["azure-chat"] = AzureChatOpenAI # type: ignore
|
||||
llm_type_to_cls_dict["openai-chat"] = ChatOpenAI # type: ignore
|
||||
|
||||
## Chains
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ from langchain.base_language import BaseLanguageModel
|
|||
from langchain.chains.base import Chain
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.tools import BaseTool
|
||||
from langflow.utils import validate
|
||||
|
||||
|
||||
def import_module(module_path: str) -> Any:
|
||||
|
|
@ -147,3 +148,10 @@ def import_utility(utility: str) -> Any:
|
|||
if utility == "SQLDatabase":
|
||||
return import_class(f"langchain.sql_database.{utility}")
|
||||
return import_class(f"langchain.utilities.{utility}")
|
||||
|
||||
|
||||
def get_function(code):
|
||||
"""Get the function"""
|
||||
function_name = validate.extract_function_name(code)
|
||||
|
||||
return validate.create_function(code, function_name)
|
||||
|
|
|
|||
|
|
@ -20,8 +20,7 @@ from langchain.llms.loading import load_llm_from_config
|
|||
from pydantic import ValidationError
|
||||
|
||||
from langflow.interface.agents.custom import CUSTOM_AGENTS
|
||||
from langflow.interface.importing.utils import import_by_type
|
||||
from langflow.interface.run import fix_memory_inputs
|
||||
from langflow.interface.importing.utils import get_function, import_by_type
|
||||
from langflow.interface.toolkits.base import toolkits_creator
|
||||
from langflow.interface.types import get_type_list
|
||||
from langflow.interface.utils import load_file_into_dict
|
||||
|
|
@ -99,6 +98,10 @@ def instantiate_tool(node_type, class_object, params):
|
|||
if node_type == "JsonSpec":
|
||||
params["dict_"] = load_file_into_dict(params.pop("path"))
|
||||
return class_object(**params)
|
||||
elif node_type == "PythonFunctionTool":
|
||||
params["func"] = get_function(params.get("code"))
|
||||
return class_object(**params)
|
||||
# For backward compatibility
|
||||
elif node_type == "PythonFunction":
|
||||
function_string = params["code"]
|
||||
if isinstance(function_string, str):
|
||||
|
|
@ -111,8 +114,11 @@ def instantiate_tool(node_type, class_object, params):
|
|||
|
||||
def instantiate_toolkit(node_type, class_object, params):
|
||||
loaded_toolkit = class_object(**params)
|
||||
if toolkits_creator.has_create_function(node_type):
|
||||
return load_toolkits_executor(node_type, loaded_toolkit, params)
|
||||
# Commenting this out for now to use toolkits as normal tools
|
||||
# if toolkits_creator.has_create_function(node_type):
|
||||
# return load_toolkits_executor(node_type, loaded_toolkit, params)
|
||||
if isinstance(loaded_toolkit, BaseToolkit):
|
||||
return loaded_toolkit.get_tools()
|
||||
return loaded_toolkit
|
||||
|
||||
|
||||
|
|
@ -161,38 +167,6 @@ def instantiate_utility(node_type, class_object, params):
|
|||
return class_object(**params)
|
||||
|
||||
|
||||
def load_flow_from_json(path: str, build=True):
|
||||
"""Load flow from json file"""
|
||||
# This is done to avoid circular imports
|
||||
from langflow.graph import Graph
|
||||
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
flow_graph = json.load(f)
|
||||
data_graph = flow_graph["data"]
|
||||
nodes = data_graph["nodes"]
|
||||
# Substitute ZeroShotPrompt with PromptTemplate
|
||||
# nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
|
||||
# Add input variables
|
||||
# nodes = payload.extract_input_variables(nodes)
|
||||
|
||||
# Nodes, edges and root node
|
||||
edges = data_graph["edges"]
|
||||
graph = Graph(nodes, edges)
|
||||
if build:
|
||||
langchain_object = graph.build()
|
||||
if hasattr(langchain_object, "verbose"):
|
||||
langchain_object.verbose = True
|
||||
|
||||
if hasattr(langchain_object, "return_intermediate_steps"):
|
||||
# https://github.com/hwchase17/langchain/issues/2068
|
||||
# Deactivating until we have a frontend solution
|
||||
# to display intermediate steps
|
||||
langchain_object.return_intermediate_steps = False
|
||||
fix_memory_inputs(langchain_object)
|
||||
return langchain_object
|
||||
return graph
|
||||
|
||||
|
||||
def replace_zero_shot_prompt_with_prompt_template(nodes):
|
||||
"""Replace ZeroShotPrompt with PromptTemplate"""
|
||||
for node in nodes:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Dict, List, Optional, Type
|
|||
from langchain.prompts import PromptTemplate
|
||||
from pydantic import root_validator
|
||||
|
||||
from langflow.graph.utils import extract_input_variables_from_prompt
|
||||
from langflow.interface.utils import extract_input_variables_from_prompt
|
||||
|
||||
# Steps to create a BaseCustomPrompt:
|
||||
# 1. Create a prompt template that endes with:
|
||||
|
|
|
|||
|
|
@ -1,12 +1,5 @@
|
|||
import contextlib
|
||||
import io
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
from langchain.schema import AgentAction
|
||||
|
||||
from langflow.api.callback import AsyncStreamingLLMCallbackHandler, StreamingLLMCallbackHandler # type: ignore
|
||||
from langflow.cache.base import compute_dict_hash, load_cache, memoize_dict
|
||||
from langflow.graph.graph import Graph
|
||||
from langflow.graph import Graph
|
||||
from langflow.utils.logger import logger
|
||||
|
||||
|
||||
|
|
@ -24,15 +17,6 @@ def load_langchain_object(data_graph, is_first_message=False):
|
|||
return computed_hash, langchain_object
|
||||
|
||||
|
||||
def load_or_build_langchain_object(data_graph, is_first_message=False):
|
||||
"""
|
||||
Load langchain object from cache if it exists, otherwise build it.
|
||||
"""
|
||||
if is_first_message:
|
||||
build_langchain_object_with_caching.clear_cache()
|
||||
return build_langchain_object_with_caching(data_graph)
|
||||
|
||||
|
||||
@memoize_dict(maxsize=10)
|
||||
def build_langchain_object_with_caching(data_graph):
|
||||
"""
|
||||
|
|
@ -40,16 +24,10 @@ def build_langchain_object_with_caching(data_graph):
|
|||
"""
|
||||
|
||||
logger.debug("Building langchain object")
|
||||
graph = build_graph(data_graph)
|
||||
graph = Graph.from_payload(data_graph)
|
||||
return graph.build()
|
||||
|
||||
|
||||
def build_graph(data_graph):
|
||||
nodes = data_graph["nodes"]
|
||||
edges = data_graph["edges"]
|
||||
return Graph(nodes, edges)
|
||||
|
||||
|
||||
def build_langchain_object(data_graph):
|
||||
"""
|
||||
Build langchain object from data_graph.
|
||||
|
|
@ -66,29 +44,6 @@ def build_langchain_object(data_graph):
|
|||
return graph.build()
|
||||
|
||||
|
||||
def process_graph_cached(data_graph: Dict[str, Any], message: str):
|
||||
"""
|
||||
Process graph by extracting input variables and replacing ZeroShotPrompt
|
||||
with PromptTemplate,then run the graph and return the result and thought.
|
||||
"""
|
||||
# Load langchain object
|
||||
is_first_message = len(data_graph.get("chatHistory", [])) == 0
|
||||
langchain_object = load_or_build_langchain_object(data_graph, is_first_message)
|
||||
logger.debug("Loaded langchain object")
|
||||
|
||||
if langchain_object is None:
|
||||
# Raise user facing error
|
||||
raise ValueError(
|
||||
"There was an error loading the langchain_object. Please, check all the nodes and try again."
|
||||
)
|
||||
|
||||
# Generate result and thought
|
||||
logger.debug("Generating result and thought")
|
||||
result, thought = get_result_and_thought(langchain_object, message)
|
||||
logger.debug("Generated result and thought")
|
||||
return {"result": str(result), "thought": thought.strip()}
|
||||
|
||||
|
||||
def get_memory_key(langchain_object):
|
||||
"""
|
||||
Given a LangChain object, this function retrieves the current memory key from the object's memory attribute.
|
||||
|
|
@ -124,147 +79,3 @@ def update_memory_keys(langchain_object, possible_new_mem_key):
|
|||
langchain_object.memory.input_key = input_key
|
||||
langchain_object.memory.output_key = output_key
|
||||
langchain_object.memory.memory_key = possible_new_mem_key
|
||||
|
||||
|
||||
def fix_memory_inputs(langchain_object):
|
||||
"""
|
||||
Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the
|
||||
object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the
|
||||
get_memory_key function and updates the memory keys using the update_memory_keys function.
|
||||
"""
|
||||
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
|
||||
try:
|
||||
if langchain_object.memory.memory_key in langchain_object.input_variables:
|
||||
return
|
||||
except AttributeError:
|
||||
input_variables = (
|
||||
langchain_object.prompt.input_variables
|
||||
if hasattr(langchain_object, "prompt")
|
||||
else langchain_object.input_keys
|
||||
)
|
||||
if langchain_object.memory.memory_key in input_variables:
|
||||
return
|
||||
|
||||
possible_new_mem_key = get_memory_key(langchain_object)
|
||||
if possible_new_mem_key is not None:
|
||||
update_memory_keys(langchain_object, possible_new_mem_key)
|
||||
|
||||
|
||||
async def get_result_and_steps(langchain_object, message: str, **kwargs):
|
||||
"""Get result and thought from extracted json"""
|
||||
|
||||
try:
|
||||
if hasattr(langchain_object, "verbose"):
|
||||
langchain_object.verbose = True
|
||||
chat_input = None
|
||||
memory_key = ""
|
||||
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
|
||||
memory_key = langchain_object.memory.memory_key
|
||||
|
||||
if hasattr(langchain_object, "input_keys"):
|
||||
for key in langchain_object.input_keys:
|
||||
if key not in [memory_key, "chat_history"]:
|
||||
chat_input = {key: message}
|
||||
else:
|
||||
chat_input = message # type: ignore
|
||||
|
||||
if hasattr(langchain_object, "return_intermediate_steps"):
|
||||
# https://github.com/hwchase17/langchain/issues/2068
|
||||
# Deactivating until we have a frontend solution
|
||||
# to display intermediate steps
|
||||
langchain_object.return_intermediate_steps = True
|
||||
|
||||
fix_memory_inputs(langchain_object)
|
||||
try:
|
||||
async_callbacks = [AsyncStreamingLLMCallbackHandler(**kwargs)]
|
||||
output = await langchain_object.acall(chat_input, callbacks=async_callbacks)
|
||||
except Exception as exc:
|
||||
# make the error message more informative
|
||||
logger.debug(f"Error: {str(exc)}")
|
||||
sync_callbacks = [StreamingLLMCallbackHandler(**kwargs)]
|
||||
output = langchain_object(chat_input, callbacks=sync_callbacks)
|
||||
|
||||
intermediate_steps = (
|
||||
output.get("intermediate_steps", []) if isinstance(output, dict) else []
|
||||
)
|
||||
|
||||
result = (
|
||||
output.get(langchain_object.output_keys[0])
|
||||
if isinstance(output, dict)
|
||||
else output
|
||||
)
|
||||
thought = format_actions(intermediate_steps) if intermediate_steps else ""
|
||||
except Exception as exc:
|
||||
raise ValueError(f"Error: {str(exc)}") from exc
|
||||
return result, thought
|
||||
|
||||
|
||||
def get_result_and_thought(langchain_object, message: str):
|
||||
"""Get result and thought from extracted json"""
|
||||
try:
|
||||
if hasattr(langchain_object, "verbose"):
|
||||
langchain_object.verbose = True
|
||||
chat_input = None
|
||||
memory_key = ""
|
||||
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
|
||||
memory_key = langchain_object.memory.memory_key
|
||||
|
||||
if hasattr(langchain_object, "input_keys"):
|
||||
for key in langchain_object.input_keys:
|
||||
if key not in [memory_key, "chat_history"]:
|
||||
chat_input = {key: message}
|
||||
else:
|
||||
chat_input = message # type: ignore
|
||||
|
||||
if hasattr(langchain_object, "return_intermediate_steps"):
|
||||
# https://github.com/hwchase17/langchain/issues/2068
|
||||
# Deactivating until we have a frontend solution
|
||||
# to display intermediate steps
|
||||
langchain_object.return_intermediate_steps = False
|
||||
|
||||
fix_memory_inputs(langchain_object)
|
||||
|
||||
with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer):
|
||||
try:
|
||||
# if hasattr(langchain_object, "acall"):
|
||||
# output = await langchain_object.acall(chat_input)
|
||||
# else:
|
||||
output = langchain_object(chat_input)
|
||||
except ValueError as exc:
|
||||
# make the error message more informative
|
||||
logger.debug(f"Error: {str(exc)}")
|
||||
output = langchain_object.run(chat_input)
|
||||
|
||||
intermediate_steps = (
|
||||
output.get("intermediate_steps", []) if isinstance(output, dict) else []
|
||||
)
|
||||
|
||||
result = (
|
||||
output.get(langchain_object.output_keys[0])
|
||||
if isinstance(output, dict)
|
||||
else output
|
||||
)
|
||||
if intermediate_steps:
|
||||
thought = format_actions(intermediate_steps)
|
||||
else:
|
||||
thought = output_buffer.getvalue()
|
||||
|
||||
except Exception as exc:
|
||||
raise ValueError(f"Error: {str(exc)}") from exc
|
||||
return result, thought
|
||||
|
||||
|
||||
def format_actions(actions: List[Tuple[AgentAction, str]]) -> str:
|
||||
"""Format a list of (AgentAction, answer) tuples into a string."""
|
||||
output = []
|
||||
for action, answer in actions:
|
||||
log = action.log
|
||||
tool = action.tool
|
||||
tool_input = action.tool_input
|
||||
output.append(f"Log: {log}")
|
||||
if "Action" not in log and "Action Input" not in log:
|
||||
output.append(f"Tool: {tool}")
|
||||
output.append(f"Tool Input: {tool_input}")
|
||||
output.append(f"Answer: {answer}")
|
||||
output.append("") # Add a blank line
|
||||
return "\n".join(output)
|
||||
|
|
|
|||
|
|
@ -42,24 +42,27 @@ class ToolkitCreator(LangChainTypeCreator):
|
|||
|
||||
def get_signature(self, name: str) -> Optional[Dict]:
|
||||
try:
|
||||
return build_template_from_class(name, self.type_to_loader_dict)
|
||||
template = build_template_from_class(name, self.type_to_loader_dict)
|
||||
# add Tool to base_classes
|
||||
if "toolkit" in name.lower() and template:
|
||||
template["base_classes"].append("Tool")
|
||||
return template
|
||||
except ValueError as exc:
|
||||
raise ValueError("Prompt not found") from exc
|
||||
raise ValueError("Toolkit not found") from exc
|
||||
except AttributeError as exc:
|
||||
logger.error(f"Prompt {name} not loaded: {exc}")
|
||||
logger.error(f"Toolkit {name} not loaded: {exc}")
|
||||
return None
|
||||
|
||||
def to_list(self) -> List[str]:
|
||||
return list(self.type_to_loader_dict.keys())
|
||||
|
||||
def get_create_function(self, name: str) -> Callable:
|
||||
if loader_name := self.create_functions.get(name, None):
|
||||
# import loader
|
||||
if loader_name := self.create_functions.get(name):
|
||||
return import_module(
|
||||
f"from langchain.agents.agent_toolkits import {loader_name[0]}"
|
||||
)
|
||||
else:
|
||||
raise ValueError("Loader not found")
|
||||
raise ValueError("Toolkit not found")
|
||||
|
||||
def has_create_function(self, name: str) -> bool:
|
||||
# check if the function list is not empty
|
||||
|
|
|
|||
|
|
@ -71,7 +71,8 @@ class ToolCreator(LangChainTypeCreator):
|
|||
|
||||
for tool, tool_fcn in ALL_TOOLS_NAMES.items():
|
||||
tool_params = get_tool_params(tool_fcn)
|
||||
tool_name = tool_params.get("name", tool)
|
||||
|
||||
tool_name = tool_params.get("name") or tool
|
||||
|
||||
if tool_name in settings.tools or settings.dev:
|
||||
if tool_name == "JsonSpec":
|
||||
|
|
|
|||
|
|
@ -9,10 +9,14 @@ from langchain.agents.load_tools import (
|
|||
from langchain.tools.json.tool import JsonSpec
|
||||
|
||||
from langflow.interface.importing.utils import import_class
|
||||
from langflow.interface.tools.custom import PythonFunction
|
||||
from langflow.interface.tools.custom import PythonFunctionTool, PythonFunction
|
||||
|
||||
FILE_TOOLS = {"JsonSpec": JsonSpec}
|
||||
CUSTOM_TOOLS = {"Tool": Tool, "PythonFunction": PythonFunction}
|
||||
CUSTOM_TOOLS = {
|
||||
"Tool": Tool,
|
||||
"PythonFunctionTool": PythonFunctionTool,
|
||||
"PythonFunction": PythonFunction,
|
||||
}
|
||||
|
||||
OTHER_TOOLS = {tool: import_class(f"langchain.tools.{tool}") for tool in tools.__all__}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
from typing import Callable, Optional
|
||||
from langflow.interface.importing.utils import get_function
|
||||
|
||||
from pydantic import BaseModel, validator
|
||||
|
||||
from langflow.utils import validate
|
||||
from langchain.agents.tools import Tool
|
||||
|
||||
|
||||
class Function(BaseModel):
|
||||
|
|
@ -31,6 +33,21 @@ class Function(BaseModel):
|
|||
return validate.create_function(self.code, function_name)
|
||||
|
||||
|
||||
class PythonFunctionTool(Function, Tool):
|
||||
"""Python function"""
|
||||
|
||||
name: str = "Custom Tool"
|
||||
description: str
|
||||
code: str
|
||||
|
||||
def ___init__(self, name: str, description: str, code: str):
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.code = code
|
||||
self.func = get_function(self.code)
|
||||
super().__init__(name=name, description=description, func=self.func)
|
||||
|
||||
|
||||
class PythonFunction(Function):
|
||||
"""Python function"""
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import base64
|
|||
import json
|
||||
import os
|
||||
from io import BytesIO
|
||||
import re
|
||||
|
||||
import yaml
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
|
|
@ -52,3 +53,8 @@ def try_setting_streaming_options(langchain_object, websocket):
|
|||
llm.stream = True
|
||||
|
||||
return langchain_object
|
||||
|
||||
|
||||
def extract_input_variables_from_prompt(prompt: str) -> list[str]:
|
||||
"""Extract input variables from prompt."""
|
||||
return re.findall(r"{(.*?)}", prompt)
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from langflow.api.chat import router as chat_router
|
||||
from langflow.api.endpoints import router as endpoints_router
|
||||
from langflow.api.validate import router as validate_router
|
||||
from langflow.api import router
|
||||
|
||||
|
||||
def create_app():
|
||||
|
|
@ -14,6 +12,10 @@ def create_app():
|
|||
"*",
|
||||
]
|
||||
|
||||
@app.get("/health")
|
||||
def get_health():
|
||||
return {"status": "OK"}
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
|
|
@ -22,9 +24,7 @@ def create_app():
|
|||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.include_router(endpoints_router)
|
||||
app.include_router(validate_router)
|
||||
app.include_router(chat_router)
|
||||
app.include_router(router)
|
||||
return app
|
||||
|
||||
|
||||
|
|
|
|||
0
src/backend/langflow/processing/__init__.py
Normal file
0
src/backend/langflow/processing/__init__.py
Normal file
55
src/backend/langflow/processing/base.py
Normal file
55
src/backend/langflow/processing/base.py
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
from langflow.api.v1.callback import (
|
||||
AsyncStreamingLLMCallbackHandler,
|
||||
StreamingLLMCallbackHandler,
|
||||
)
|
||||
from langflow.processing.process import fix_memory_inputs, format_actions
|
||||
from langflow.utils.logger import logger
|
||||
|
||||
|
||||
async def get_result_and_steps(langchain_object, message: str, **kwargs):
|
||||
"""Get result and thought from extracted json"""
|
||||
|
||||
try:
|
||||
if hasattr(langchain_object, "verbose"):
|
||||
langchain_object.verbose = True
|
||||
chat_input = None
|
||||
memory_key = ""
|
||||
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
|
||||
memory_key = langchain_object.memory.memory_key
|
||||
|
||||
if hasattr(langchain_object, "input_keys"):
|
||||
for key in langchain_object.input_keys:
|
||||
if key not in [memory_key, "chat_history"]:
|
||||
chat_input = {key: message}
|
||||
else:
|
||||
chat_input = message # type: ignore
|
||||
|
||||
if hasattr(langchain_object, "return_intermediate_steps"):
|
||||
# https://github.com/hwchase17/langchain/issues/2068
|
||||
# Deactivating until we have a frontend solution
|
||||
# to display intermediate steps
|
||||
langchain_object.return_intermediate_steps = True
|
||||
|
||||
fix_memory_inputs(langchain_object)
|
||||
try:
|
||||
async_callbacks = [AsyncStreamingLLMCallbackHandler(**kwargs)]
|
||||
output = await langchain_object.acall(chat_input, callbacks=async_callbacks)
|
||||
except Exception as exc:
|
||||
# make the error message more informative
|
||||
logger.debug(f"Error: {str(exc)}")
|
||||
sync_callbacks = [StreamingLLMCallbackHandler(**kwargs)]
|
||||
output = langchain_object(chat_input, callbacks=sync_callbacks)
|
||||
|
||||
intermediate_steps = (
|
||||
output.get("intermediate_steps", []) if isinstance(output, dict) else []
|
||||
)
|
||||
|
||||
result = (
|
||||
output.get(langchain_object.output_keys[0])
|
||||
if isinstance(output, dict)
|
||||
else output
|
||||
)
|
||||
thought = format_actions(intermediate_steps) if intermediate_steps else ""
|
||||
except Exception as exc:
|
||||
raise ValueError(f"Error: {str(exc)}") from exc
|
||||
return result, thought
|
||||
172
src/backend/langflow/processing/process.py
Normal file
172
src/backend/langflow/processing/process.py
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
import contextlib
|
||||
import io
|
||||
from langchain.schema import AgentAction
|
||||
import json
|
||||
from langflow.interface.run import (
|
||||
build_langchain_object_with_caching,
|
||||
get_memory_key,
|
||||
update_memory_keys,
|
||||
)
|
||||
from langflow.utils.logger import logger
|
||||
from langflow.graph import Graph
|
||||
|
||||
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
|
||||
def fix_memory_inputs(langchain_object):
|
||||
"""
|
||||
Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the
|
||||
object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the
|
||||
get_memory_key function and updates the memory keys using the update_memory_keys function.
|
||||
"""
|
||||
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
|
||||
try:
|
||||
if langchain_object.memory.memory_key in langchain_object.input_variables:
|
||||
return
|
||||
except AttributeError:
|
||||
input_variables = (
|
||||
langchain_object.prompt.input_variables
|
||||
if hasattr(langchain_object, "prompt")
|
||||
else langchain_object.input_keys
|
||||
)
|
||||
if langchain_object.memory.memory_key in input_variables:
|
||||
return
|
||||
|
||||
possible_new_mem_key = get_memory_key(langchain_object)
|
||||
if possible_new_mem_key is not None:
|
||||
update_memory_keys(langchain_object, possible_new_mem_key)
|
||||
|
||||
|
||||
def format_actions(actions: List[Tuple[AgentAction, str]]) -> str:
|
||||
"""Format a list of (AgentAction, answer) tuples into a string."""
|
||||
output = []
|
||||
for action, answer in actions:
|
||||
log = action.log
|
||||
tool = action.tool
|
||||
tool_input = action.tool_input
|
||||
output.append(f"Log: {log}")
|
||||
if "Action" not in log and "Action Input" not in log:
|
||||
output.append(f"Tool: {tool}")
|
||||
output.append(f"Tool Input: {tool_input}")
|
||||
output.append(f"Answer: {answer}")
|
||||
output.append("") # Add a blank line
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
def get_result_and_thought(langchain_object, message: str):
|
||||
"""Get result and thought from extracted json"""
|
||||
try:
|
||||
if hasattr(langchain_object, "verbose"):
|
||||
langchain_object.verbose = True
|
||||
chat_input = None
|
||||
memory_key = ""
|
||||
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
|
||||
memory_key = langchain_object.memory.memory_key
|
||||
|
||||
if hasattr(langchain_object, "input_keys"):
|
||||
for key in langchain_object.input_keys:
|
||||
if key not in [memory_key, "chat_history"]:
|
||||
chat_input = {key: message}
|
||||
else:
|
||||
chat_input = message # type: ignore
|
||||
|
||||
if hasattr(langchain_object, "return_intermediate_steps"):
|
||||
# https://github.com/hwchase17/langchain/issues/2068
|
||||
# Deactivating until we have a frontend solution
|
||||
# to display intermediate steps
|
||||
langchain_object.return_intermediate_steps = False
|
||||
|
||||
fix_memory_inputs(langchain_object)
|
||||
|
||||
with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer):
|
||||
try:
|
||||
# if hasattr(langchain_object, "acall"):
|
||||
# output = await langchain_object.acall(chat_input)
|
||||
# else:
|
||||
output = langchain_object(chat_input)
|
||||
except ValueError as exc:
|
||||
# make the error message more informative
|
||||
logger.debug(f"Error: {str(exc)}")
|
||||
output = langchain_object.run(chat_input)
|
||||
|
||||
intermediate_steps = (
|
||||
output.get("intermediate_steps", []) if isinstance(output, dict) else []
|
||||
)
|
||||
|
||||
result = (
|
||||
output.get(langchain_object.output_keys[0])
|
||||
if isinstance(output, dict)
|
||||
else output
|
||||
)
|
||||
if intermediate_steps:
|
||||
thought = format_actions(intermediate_steps)
|
||||
else:
|
||||
thought = output_buffer.getvalue()
|
||||
|
||||
except Exception as exc:
|
||||
raise ValueError(f"Error: {str(exc)}") from exc
|
||||
return result, thought
|
||||
|
||||
|
||||
def load_or_build_langchain_object(data_graph, is_first_message=False):
|
||||
"""
|
||||
Load langchain object from cache if it exists, otherwise build it.
|
||||
"""
|
||||
if is_first_message:
|
||||
build_langchain_object_with_caching.clear_cache()
|
||||
return build_langchain_object_with_caching(data_graph)
|
||||
|
||||
|
||||
def process_graph_cached(data_graph: Dict[str, Any], message: str):
|
||||
"""
|
||||
Process graph by extracting input variables and replacing ZeroShotPrompt
|
||||
with PromptTemplate,then run the graph and return the result and thought.
|
||||
"""
|
||||
# Load langchain object
|
||||
is_first_message = len(data_graph.get("chatHistory", [])) == 0
|
||||
langchain_object = load_or_build_langchain_object(data_graph, is_first_message)
|
||||
logger.debug("Loaded langchain object")
|
||||
|
||||
if langchain_object is None:
|
||||
# Raise user facing error
|
||||
raise ValueError(
|
||||
"There was an error loading the langchain_object. Please, check all the nodes and try again."
|
||||
)
|
||||
|
||||
# Generate result and thought
|
||||
logger.debug("Generating result and thought")
|
||||
result, thought = get_result_and_thought(langchain_object, message)
|
||||
logger.debug("Generated result and thought")
|
||||
return {"result": str(result), "thought": thought.strip()}
|
||||
|
||||
|
||||
def load_flow_from_json(path: str, build=True):
|
||||
"""Load flow from json file"""
|
||||
# This is done to avoid circular imports
|
||||
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
flow_graph = json.load(f)
|
||||
data_graph = flow_graph["data"]
|
||||
nodes = data_graph["nodes"]
|
||||
# Substitute ZeroShotPrompt with PromptTemplate
|
||||
# nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
|
||||
# Add input variables
|
||||
# nodes = payload.extract_input_variables(nodes)
|
||||
|
||||
# Nodes, edges and root node
|
||||
edges = data_graph["edges"]
|
||||
graph = Graph(nodes, edges)
|
||||
if build:
|
||||
langchain_object = graph.build()
|
||||
if hasattr(langchain_object, "verbose"):
|
||||
langchain_object.verbose = True
|
||||
|
||||
if hasattr(langchain_object, "return_intermediate_steps"):
|
||||
# https://github.com/hwchase17/langchain/issues/2068
|
||||
# Deactivating until we have a frontend solution
|
||||
# to display intermediate steps
|
||||
langchain_object.return_intermediate_steps = False
|
||||
fix_memory_inputs(langchain_object)
|
||||
return langchain_object
|
||||
return graph
|
||||
|
|
@ -146,7 +146,7 @@ class CSVAgentNode(FrontendNode):
|
|||
),
|
||||
],
|
||||
)
|
||||
description: str = """Construct a json agent from a CSV and tools."""
|
||||
description: str = """Construct a CSV agent from a CSV and tools."""
|
||||
base_classes: list[str] = ["AgentExecutor"]
|
||||
|
||||
def to_dict(self):
|
||||
|
|
@ -194,7 +194,7 @@ class InitializeAgentNode(FrontendNode):
|
|||
),
|
||||
],
|
||||
)
|
||||
description: str = """Construct a json agent from an LLM and tools."""
|
||||
description: str = """Construct a zero shot agent from an LLM and tools."""
|
||||
base_classes: list[str] = ["AgentExecutor", "function"]
|
||||
|
||||
def to_dict(self):
|
||||
|
|
|
|||
|
|
@ -117,14 +117,30 @@ class FrontendNode(BaseModel):
|
|||
) -> None:
|
||||
"""Handles specific field values for certain fields."""
|
||||
if key == "headers":
|
||||
field.value = """{'Authorization':
|
||||
'Bearer <token>'}"""
|
||||
if name == "OpenAI" and key == "model_name":
|
||||
field.options = constants.OPENAI_MODELS
|
||||
field.is_list = True
|
||||
elif name == "ChatOpenAI" and key == "model_name":
|
||||
field.options = constants.CHAT_OPENAI_MODELS
|
||||
field.value = """{'Authorization': 'Bearer <token>'}"""
|
||||
FrontendNode._handle_model_specific_field_values(field, key, name)
|
||||
FrontendNode._handle_api_key_specific_field_values(field, key, name)
|
||||
|
||||
@staticmethod
|
||||
def _handle_model_specific_field_values(
|
||||
field: TemplateField, key: str, name: Optional[str] = None
|
||||
) -> None:
|
||||
"""Handles specific field values related to models."""
|
||||
model_dict = {
|
||||
"OpenAI": constants.OPENAI_MODELS,
|
||||
"ChatOpenAI": constants.CHAT_OPENAI_MODELS,
|
||||
"Anthropic": constants.ANTHROPIC_MODELS,
|
||||
"ChatAnthropic": constants.ANTHROPIC_MODELS,
|
||||
}
|
||||
if name in model_dict and key == "model_name":
|
||||
field.options = model_dict[name]
|
||||
field.is_list = True
|
||||
|
||||
@staticmethod
|
||||
def _handle_api_key_specific_field_values(
|
||||
field: TemplateField, key: str, name: Optional[str] = None
|
||||
) -> None:
|
||||
"""Handles specific field values related to API keys."""
|
||||
if "api_key" in key and "OpenAI" in str(name):
|
||||
field.display_name = "OpenAI API Key"
|
||||
field.required = False
|
||||
|
|
|
|||
|
|
@ -59,6 +59,52 @@ class ToolNode(FrontendNode):
|
|||
return super().to_dict()
|
||||
|
||||
|
||||
class PythonFunctionToolNode(FrontendNode):
|
||||
name: str = "PythonFunctionTool"
|
||||
template: Template = Template(
|
||||
type_name="PythonFunctionTool",
|
||||
fields=[
|
||||
TemplateField(
|
||||
field_type="str",
|
||||
required=True,
|
||||
placeholder="",
|
||||
is_list=False,
|
||||
show=True,
|
||||
multiline=False,
|
||||
value="",
|
||||
name="name",
|
||||
advanced=False,
|
||||
),
|
||||
TemplateField(
|
||||
field_type="str",
|
||||
required=True,
|
||||
placeholder="",
|
||||
is_list=False,
|
||||
show=True,
|
||||
multiline=False,
|
||||
value="",
|
||||
name="description",
|
||||
advanced=False,
|
||||
),
|
||||
TemplateField(
|
||||
field_type="code",
|
||||
required=True,
|
||||
placeholder="",
|
||||
is_list=False,
|
||||
show=True,
|
||||
value=DEFAULT_PYTHON_FUNCTION,
|
||||
name="code",
|
||||
advanced=False,
|
||||
),
|
||||
],
|
||||
)
|
||||
description: str = "Python function to be executed."
|
||||
base_classes: list[str] = ["Tool"]
|
||||
|
||||
def to_dict(self):
|
||||
return super().to_dict()
|
||||
|
||||
|
||||
class PythonFunctionNode(FrontendNode):
|
||||
name: str = "PythonFunction"
|
||||
template: Template = Template(
|
||||
|
|
|
|||
|
|
@ -7,6 +7,20 @@ OPENAI_MODELS = [
|
|||
]
|
||||
CHAT_OPENAI_MODELS = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
|
||||
|
||||
ANTHROPIC_MODELS = [
|
||||
"claude-v1", # largest model, ideal for a wide range of more complex tasks.
|
||||
"claude-v1-100k", # An enhanced version of claude-v1 with a 100,000 token (roughly 75,000 word) context window.
|
||||
"claude-instant-v1", # A smaller model with far lower latency, sampling at roughly 40 words/sec!
|
||||
"claude-instant-v1-100k", # Like claude-instant-v1 with a 100,000 token context window but retains its performance.
|
||||
# Specific sub-versions of the above models:
|
||||
"claude-v1.3", # Vs claude-v1.2: better instruction-following, code, and non-English dialogue and writing.
|
||||
"claude-v1.3-100k", # An enhanced version of claude-v1.3 with a 100,000 token (roughly 75,000 word) context window.
|
||||
"claude-v1.2", # Vs claude-v1.1: small adv in general helpfulness, instruction following, coding, and other tasks.
|
||||
"claude-v1.0", # An earlier version of claude-v1.
|
||||
"claude-instant-v1.1", # Latest version of claude-instant-v1. Better than claude-instant-v1.0 at most tasks.
|
||||
"claude-instant-v1.1-100k", # Version of claude-instant-v1.1 with a 100K token context window.
|
||||
"claude-instant-v1.0", # An earlier version of claude-instant-v1.
|
||||
]
|
||||
|
||||
DEFAULT_PYTHON_FUNCTION = """
|
||||
def python_function(text: str) -> str:
|
||||
|
|
|
|||
|
|
@ -302,7 +302,9 @@ def format_dict(d, name: Optional[str] = None):
|
|||
elif name == "ChatOpenAI" and key == "model_name":
|
||||
value["options"] = constants.CHAT_OPENAI_MODELS
|
||||
value["list"] = True
|
||||
|
||||
elif (name == "Anthropic" or name == "ChatAnthropic") and key == "model_name":
|
||||
value["options"] = constants.ANTHROPIC_MODELS
|
||||
value["list"] = True
|
||||
return d
|
||||
|
||||
|
||||
|
|
|
|||
467
src/frontend/package-lock.json
generated
467
src/frontend/package-lock.json
generated
|
|
@ -13,6 +13,7 @@
|
|||
"@headlessui/react": "^1.7.10",
|
||||
"@heroicons/react": "^2.0.15",
|
||||
"@mui/material": "^5.11.9",
|
||||
"@radix-ui/react-tooltip": "^1.0.6",
|
||||
"@tabler/icons-react": "^2.18.0",
|
||||
"@tailwindcss/forms": "^0.5.3",
|
||||
"@tailwindcss/line-clamp": "^0.4.4",
|
||||
|
|
@ -20,7 +21,10 @@
|
|||
"ansi-to-html": "^0.7.2",
|
||||
"axios": "^1.3.2",
|
||||
"base64-js": "^1.5.1",
|
||||
"class-variance-authority": "^0.6.0",
|
||||
"clsx": "^1.2.1",
|
||||
"lodash": "^4.17.21",
|
||||
"lucide-react": "^0.233.0",
|
||||
"react": "^18.2.0",
|
||||
"react-ace": "^10.1.0",
|
||||
"react-cookie": "^4.1.1",
|
||||
|
|
@ -37,6 +41,8 @@
|
|||
"rehype-mathjax": "^4.0.2",
|
||||
"remark-gfm": "^3.0.1",
|
||||
"remark-math": "^5.1.1",
|
||||
"tailwind-merge": "^1.13.0",
|
||||
"tailwindcss-animate": "^1.0.5",
|
||||
"uuid": "^9.0.0",
|
||||
"vite-plugin-svgr": "^3.2.0",
|
||||
"web-vitals": "^2.1.4"
|
||||
|
|
@ -911,6 +917,18 @@
|
|||
"@floating-ui/core": "^1.2.6"
|
||||
}
|
||||
},
|
||||
"node_modules/@floating-ui/react-dom": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.0.tgz",
|
||||
"integrity": "sha512-Ke0oU3SeuABC2C4OFu2mSAwHIP5WUiV98O9YWoHV4Q5aT6E9k06DV0Khi5uYspR8xmmBk08t8ZDcz3TR3ARkEg==",
|
||||
"dependencies": {
|
||||
"@floating-ui/dom": "^1.2.7"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": ">=16.8.0",
|
||||
"react-dom": ">=16.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@headlessui/react": {
|
||||
"version": "1.7.10",
|
||||
"resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.10.tgz",
|
||||
|
|
@ -1274,6 +1292,407 @@
|
|||
"url": "https://opencollective.com/popperjs"
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/primitive": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.0.1.tgz",
|
||||
"integrity": "sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-arrow": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.0.3.tgz",
|
||||
"integrity": "sha512-wSP+pHsB/jQRaL6voubsQ/ZlrGBHHrOjmBnr19hxYgtS0WvAFwZhK2WP/YY5yF9uKECCEEDGxuLxq1NBK51wFA==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-primitive": "1.0.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-compose-refs": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.0.1.tgz",
|
||||
"integrity": "sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-context": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.0.1.tgz",
|
||||
"integrity": "sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dismissable-layer": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.4.tgz",
|
||||
"integrity": "sha512-7UpBa/RKMoHJYjie1gkF1DlK8l1fdU/VKDpoS3rCCo8YBJR294GwcEHyxHw72yvphJ7ld0AXEcSLAzY2F/WyCg==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/primitive": "1.0.1",
|
||||
"@radix-ui/react-compose-refs": "1.0.1",
|
||||
"@radix-ui/react-primitive": "1.0.3",
|
||||
"@radix-ui/react-use-callback-ref": "1.0.1",
|
||||
"@radix-ui/react-use-escape-keydown": "1.0.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-id": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.0.1.tgz",
|
||||
"integrity": "sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-use-layout-effect": "1.0.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-popper": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.2.tgz",
|
||||
"integrity": "sha512-1CnGGfFi/bbqtJZZ0P/NQY20xdG3E0LALJaLUEoKwPLwl6PPPfbeiCqMVQnhoFRAxjJj4RpBRJzDmUgsex2tSg==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@floating-ui/react-dom": "^2.0.0",
|
||||
"@radix-ui/react-arrow": "1.0.3",
|
||||
"@radix-ui/react-compose-refs": "1.0.1",
|
||||
"@radix-ui/react-context": "1.0.1",
|
||||
"@radix-ui/react-primitive": "1.0.3",
|
||||
"@radix-ui/react-use-callback-ref": "1.0.1",
|
||||
"@radix-ui/react-use-layout-effect": "1.0.1",
|
||||
"@radix-ui/react-use-rect": "1.0.1",
|
||||
"@radix-ui/react-use-size": "1.0.1",
|
||||
"@radix-ui/rect": "1.0.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-portal": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.3.tgz",
|
||||
"integrity": "sha512-xLYZeHrWoPmA5mEKEfZZevoVRK/Q43GfzRXkWV6qawIWWK8t6ifIiLQdd7rmQ4Vk1bmI21XhqF9BN3jWf+phpA==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-primitive": "1.0.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-presence": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.0.1.tgz",
|
||||
"integrity": "sha512-UXLW4UAbIY5ZjcvzjfRFo5gxva8QirC9hF7wRE4U5gz+TP0DbRk+//qyuAQ1McDxBt1xNMBTaciFGvEmJvAZCg==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-compose-refs": "1.0.1",
|
||||
"@radix-ui/react-use-layout-effect": "1.0.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-primitive": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-1.0.3.tgz",
|
||||
"integrity": "sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-slot": "1.0.2"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-slot": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.0.2.tgz",
|
||||
"integrity": "sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-compose-refs": "1.0.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.6.tgz",
|
||||
"integrity": "sha512-DmNFOiwEc2UDigsYj6clJENma58OelxD24O4IODoZ+3sQc3Zb+L8w1EP+y9laTuKCLAysPw4fD6/v0j4KNV8rg==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/primitive": "1.0.1",
|
||||
"@radix-ui/react-compose-refs": "1.0.1",
|
||||
"@radix-ui/react-context": "1.0.1",
|
||||
"@radix-ui/react-dismissable-layer": "1.0.4",
|
||||
"@radix-ui/react-id": "1.0.1",
|
||||
"@radix-ui/react-popper": "1.1.2",
|
||||
"@radix-ui/react-portal": "1.0.3",
|
||||
"@radix-ui/react-presence": "1.0.1",
|
||||
"@radix-ui/react-primitive": "1.0.3",
|
||||
"@radix-ui/react-slot": "1.0.2",
|
||||
"@radix-ui/react-use-controllable-state": "1.0.1",
|
||||
"@radix-ui/react-visually-hidden": "1.0.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-callback-ref": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.0.1.tgz",
|
||||
"integrity": "sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-controllable-state": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.0.1.tgz",
|
||||
"integrity": "sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-use-callback-ref": "1.0.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-escape-keydown": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.0.3.tgz",
|
||||
"integrity": "sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-use-callback-ref": "1.0.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-layout-effect": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.0.1.tgz",
|
||||
"integrity": "sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-rect": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.0.1.tgz",
|
||||
"integrity": "sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/rect": "1.0.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-size": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.0.1.tgz",
|
||||
"integrity": "sha512-ibay+VqrgcaI6veAojjofPATwledXiSmX+C0KrBk/xgpX9rBzPV3OsfwlhQdUOFbh+LKQorLYT+xTXW9V8yd0g==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-use-layout-effect": "1.0.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-visually-hidden": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.0.3.tgz",
|
||||
"integrity": "sha512-D4w41yN5YRKtu464TLnByKzMDG/JlMPHtfZgQAu9v6mNakUqGUI9vUrfQKz8NK41VMm/xbZbh76NUTVtIYqOMA==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10",
|
||||
"@radix-ui/react-primitive": "1.0.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/rect": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.0.1.tgz",
|
||||
"integrity": "sha512-fyrgCaedtvMg9NK3en0pnOYJdtfwxUcNolezkNPUsoX57X8oQk+NkqcvzHXD2uKNij6GXmWU9NDru2IWjrO4BQ==",
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.13.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@reactflow/background": {
|
||||
"version": "11.1.7",
|
||||
"resolved": "https://registry.npmjs.org/@reactflow/background/-/background-11.1.7.tgz",
|
||||
|
|
@ -2422,7 +2841,7 @@
|
|||
"version": "18.2.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.4.tgz",
|
||||
"integrity": "sha512-G2mHoTMTL4yoydITgOGwWdWMVd8sNgyEP85xVmMKAPUBwQWm9wBPQUmvbeF4V3WBY1P7mmL4BkjQ0SqUpf1snw==",
|
||||
"dev": true,
|
||||
"devOptional": true,
|
||||
"dependencies": {
|
||||
"@types/react": "*"
|
||||
}
|
||||
|
|
@ -2947,6 +3366,25 @@
|
|||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/class-variance-authority": {
|
||||
"version": "0.6.0",
|
||||
"resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.6.0.tgz",
|
||||
"integrity": "sha512-qdRDgfjx3GRb9fpwpSvn+YaidnT7IUJNe4wt5/SWwM+PmUwJUhQRk/8zAyNro0PmVfmen2635UboTjIBXXxy5A==",
|
||||
"dependencies": {
|
||||
"clsx": "1.2.1"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://joebell.co.uk"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"typescript": ">= 4.5.5 < 6"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"typescript": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/classcat": {
|
||||
"version": "5.0.4",
|
||||
"resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.4.tgz",
|
||||
|
|
@ -4728,6 +5166,14 @@
|
|||
"yallist": "^3.0.2"
|
||||
}
|
||||
},
|
||||
"node_modules/lucide-react": {
|
||||
"version": "0.233.0",
|
||||
"resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.233.0.tgz",
|
||||
"integrity": "sha512-r0jMHF0vPDq2wBbZ0B3rtIcBjDyWDKpHu+vAjD2OHn2WLUr3HN5IHovtO0EMgQXuSI7YrMZbjsEZWC2uBHr8nQ==",
|
||||
"peerDependencies": {
|
||||
"react": "^16.5.1 || ^17.0.0 || ^18.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/lz-string": {
|
||||
"version": "1.4.4",
|
||||
"resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.4.4.tgz",
|
||||
|
|
@ -7001,6 +7447,15 @@
|
|||
"resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz",
|
||||
"integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw=="
|
||||
},
|
||||
"node_modules/tailwind-merge": {
|
||||
"version": "1.13.0",
|
||||
"resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-1.13.0.tgz",
|
||||
"integrity": "sha512-mUTmDbcU+IhOvJ0c42eLQ/nRkvolTqfpVaVQRSxfJAv9TabS6Y2zW/1wKpKLdKzyL3Gh8j6NTLl6MWNmvOM6kA==",
|
||||
"funding": {
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/dcastil"
|
||||
}
|
||||
},
|
||||
"node_modules/tailwindcss": {
|
||||
"version": "3.3.2",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.2.tgz",
|
||||
|
|
@ -7038,6 +7493,14 @@
|
|||
"node": ">=14.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/tailwindcss-animate": {
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss-animate/-/tailwindcss-animate-1.0.5.tgz",
|
||||
"integrity": "sha512-UU3qrOJ4lFQABY+MVADmBm+0KW3xZyhMdRvejwtXqYOL7YjHYxmuREFAZdmVG5LPe5E9CAst846SLC4j5I3dcw==",
|
||||
"peerDependencies": {
|
||||
"tailwindcss": ">=3.0.0 || insiders"
|
||||
}
|
||||
},
|
||||
"node_modules/terser": {
|
||||
"version": "5.16.3",
|
||||
"resolved": "https://registry.npmjs.org/terser/-/terser-5.16.3.tgz",
|
||||
|
|
@ -7156,7 +7619,7 @@
|
|||
"version": "5.0.4",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.4.tgz",
|
||||
"integrity": "sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==",
|
||||
"dev": true,
|
||||
"devOptional": true,
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@
|
|||
"@headlessui/react": "^1.7.10",
|
||||
"@heroicons/react": "^2.0.15",
|
||||
"@mui/material": "^5.11.9",
|
||||
"@radix-ui/react-tooltip": "^1.0.6",
|
||||
"@tabler/icons-react": "^2.18.0",
|
||||
"@tailwindcss/forms": "^0.5.3",
|
||||
"@tailwindcss/line-clamp": "^0.4.4",
|
||||
|
|
@ -15,7 +16,10 @@
|
|||
"ansi-to-html": "^0.7.2",
|
||||
"axios": "^1.3.2",
|
||||
"base64-js": "^1.5.1",
|
||||
"class-variance-authority": "^0.6.0",
|
||||
"clsx": "^1.2.1",
|
||||
"lodash": "^4.17.21",
|
||||
"lucide-react": "^0.233.0",
|
||||
"react": "^18.2.0",
|
||||
"react-ace": "^10.1.0",
|
||||
"react-cookie": "^4.1.1",
|
||||
|
|
@ -32,6 +36,8 @@
|
|||
"rehype-mathjax": "^4.0.2",
|
||||
"remark-gfm": "^3.0.1",
|
||||
"remark-math": "^5.1.1",
|
||||
"tailwind-merge": "^1.13.0",
|
||||
"tailwindcss-animate": "^1.0.5",
|
||||
"uuid": "^9.0.0",
|
||||
"vite-plugin-svgr": "^3.2.0",
|
||||
"web-vitals": "^2.1.4"
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ export default function App() {
|
|||
className="absolute left-7 bottom-2 flex h-6 cursor-pointer flex-col items-center justify-start overflow-hidden rounded-lg bg-gray-800 px-2 text-center font-sans text-xs tracking-wide text-gray-300 transition-all duration-500 ease-in-out hover:h-12 dark:bg-gray-100 dark:text-gray-800"
|
||||
>
|
||||
{version && <div className="mt-1">⛓️ LangFlow v{version}</div>}
|
||||
<div className="mt-2">Created by Logspace</div>
|
||||
<div className={version ? "mt-2" : "mt-1"}>Created by Logspace</div>
|
||||
</a>
|
||||
</div>
|
||||
);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,11 @@
|
|||
import { Handle, Position, useUpdateNodeInternals } from "reactflow";
|
||||
import Tooltip from "../../../../components/TooltipComponent";
|
||||
import { classNames, isValidConnection } from "../../../../utils";
|
||||
import {
|
||||
classNames,
|
||||
groupByFamily,
|
||||
isValidConnection,
|
||||
toFirstUpperCase,
|
||||
} from "../../../../utils";
|
||||
import { useContext, useEffect, useRef, useState } from "react";
|
||||
import InputComponent from "../../../../components/inputComponent";
|
||||
import ToggleComponent from "../../../../components/toggleComponent";
|
||||
|
|
@ -15,6 +20,10 @@ import InputFileComponent from "../../../../components/inputFileComponent";
|
|||
import { TabsContext } from "../../../../contexts/tabsContext";
|
||||
import IntComponent from "../../../../components/intComponent";
|
||||
import PromptAreaComponent from "../../../../components/promptComponent";
|
||||
import { nodeNames, nodeIcons } from "../../../../utils";
|
||||
import React from "react";
|
||||
import { nodeColors } from "../../../../utils";
|
||||
import ShadTooltip from "../../../../components/ShadTooltipComponent";
|
||||
|
||||
export default function ParameterComponent({
|
||||
left,
|
||||
|
|
@ -28,6 +37,7 @@ export default function ParameterComponent({
|
|||
required = false,
|
||||
}: ParameterComponentType) {
|
||||
const ref = useRef(null);
|
||||
const refHtml = useRef(null);
|
||||
const updateNodeInternals = useUpdateNodeInternals();
|
||||
const [position, setPosition] = useState(0);
|
||||
useEffect(() => {
|
||||
|
|
@ -48,6 +58,48 @@ export default function ParameterComponent({
|
|||
let disabled =
|
||||
reactFlowInstance?.getEdges().some((e) => e.targetHandle === id) ?? false;
|
||||
const { save } = useContext(TabsContext);
|
||||
const [myData, setMyData] = useState(useContext(typesContext).data);
|
||||
|
||||
useEffect(() => {
|
||||
const groupedObj = groupByFamily(myData, tooltipTitle);
|
||||
|
||||
refHtml.current = groupedObj.map((item, i) => (
|
||||
<span
|
||||
key={item}
|
||||
className={classNames(
|
||||
i > 0 ? "items-center flex mt-3" : "items-center flex"
|
||||
)}
|
||||
>
|
||||
<div
|
||||
className="h-5 w-5"
|
||||
style={{
|
||||
color: nodeColors[item.family],
|
||||
}}
|
||||
>
|
||||
{React.createElement(nodeIcons[item.family])}
|
||||
</div>
|
||||
<span className="ps-2 text-gray-950">
|
||||
{nodeNames[item.family] ?? ""}{" "}
|
||||
<span className={classNames(left ? "hidden" : "")}>
|
||||
{" "}
|
||||
-
|
||||
{item.type.split(", ").length > 2
|
||||
? item.type.split(", ").map((el, i) => (
|
||||
<>
|
||||
<span key={el}>
|
||||
{i == item.type.split(", ").length - 1
|
||||
? el
|
||||
: (el += `, `)}
|
||||
</span>
|
||||
{i % 2 == 0 && i > 0 && <br></br>}
|
||||
</>
|
||||
))
|
||||
: item.type}
|
||||
</span>
|
||||
</span>
|
||||
</span>
|
||||
));
|
||||
}, [tooltipTitle]);
|
||||
|
||||
return (
|
||||
<div
|
||||
|
|
@ -69,7 +121,11 @@ export default function ParameterComponent({
|
|||
type === "int") ? (
|
||||
<></>
|
||||
) : (
|
||||
<Tooltip title={tooltipTitle + (required ? " (required)" : "")}>
|
||||
<ShadTooltip
|
||||
delayDuration={0}
|
||||
content={refHtml.current}
|
||||
side={left ? "left" : "right"}
|
||||
>
|
||||
<Handle
|
||||
type={left ? "target" : "source"}
|
||||
position={left ? Position.Left : Position.Right}
|
||||
|
|
@ -86,7 +142,7 @@ export default function ParameterComponent({
|
|||
top: position,
|
||||
}}
|
||||
></Handle>
|
||||
</Tooltip>
|
||||
</ShadTooltip>
|
||||
)}
|
||||
|
||||
{left === true &&
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ import NodeModal from "../../modals/NodeModal";
|
|||
import { useCallback } from "react";
|
||||
import { TabsContext } from "../../contexts/tabsContext";
|
||||
import { debounce } from "../../utils";
|
||||
import TooltipReact from "../../components/ReactTooltipComponent";
|
||||
import Tooltip from "../../components/TooltipComponent";
|
||||
import ShadTooltip from "../../components/ShadTooltipComponent";
|
||||
export default function GenericNode({
|
||||
data,
|
||||
selected,
|
||||
|
|
@ -115,14 +115,9 @@ export default function GenericNode({
|
|||
}}
|
||||
/>
|
||||
<div className="ml-2 truncate">
|
||||
<TooltipReact
|
||||
delayShow={1000}
|
||||
selector={`node-selector-${data.type}`}
|
||||
htmlContent={data.type}
|
||||
position="top"
|
||||
>
|
||||
<ShadTooltip delayDuration={1500} content={data.type}>
|
||||
<div className="ml-2 truncate">{data.type}</div>
|
||||
</TooltipReact>
|
||||
</ShadTooltip>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex gap-3">
|
||||
|
|
@ -253,11 +248,7 @@ export default function GenericNode({
|
|||
: toTitleCase(t)
|
||||
}
|
||||
name={t}
|
||||
tooltipTitle={
|
||||
"Type: " +
|
||||
data.node.template[t].type +
|
||||
(data.node.template[t].list ? " list" : "")
|
||||
}
|
||||
tooltipTitle={data.node.template[t].type}
|
||||
required={data.node.template[t].required}
|
||||
id={data.node.template[t].type + "|" + t + "|" + data.id}
|
||||
left={true}
|
||||
|
|
@ -283,7 +274,7 @@ export default function GenericNode({
|
|||
data={data}
|
||||
color={nodeColors[types[data.type]] ?? nodeColors.unknown}
|
||||
title={data.type}
|
||||
tooltipTitle={`Type: ${data.node.base_classes.join(" | ")}`}
|
||||
tooltipTitle={`${data.node.base_classes.join("\n")}`}
|
||||
id={[data.type, data.id, ...data.node.base_classes].join("|")}
|
||||
type={data.node.base_classes.join("|")}
|
||||
left={false}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { Disclosure } from "@headlessui/react";
|
||||
import { ChevronLeftIcon } from "@heroicons/react/24/outline";
|
||||
import { useContext } from "react";
|
||||
import { useContext, useState } from "react";
|
||||
import { Link } from "react-router-dom";
|
||||
import { classNames } from "../../utils";
|
||||
import { locationContext } from "../../contexts/locationContext";
|
||||
|
|
@ -13,6 +13,7 @@ export default function ExtraSidebar() {
|
|||
extraNavigation,
|
||||
extraComponent,
|
||||
} = useContext(locationContext);
|
||||
|
||||
return (
|
||||
<>
|
||||
<aside
|
||||
|
|
@ -21,10 +22,8 @@ export default function ExtraSidebar() {
|
|||
} flex-shrink-0 flex overflow-hidden flex-col border-r dark:border-r-gray-700 transition-all duration-500`}
|
||||
>
|
||||
<div className="w-52 dark:bg-gray-800 border dark:border-gray-700 overflow-y-auto scrollbar-hide h-full flex flex-col items-start">
|
||||
<div className="flex pt-1 px-4 justify-between align-middle w-full">
|
||||
<span className="text-gray-900 dark:text-white py-[2px] font-medium ">
|
||||
{extraNavigation.title}
|
||||
</span>
|
||||
<div className="flex px-4 justify-between align-middle w-full">
|
||||
<span className="text-gray-900 dark:text-white py-[2px] font-medium "></span>
|
||||
</div>
|
||||
<div className="flex flex-grow flex-col w-full">
|
||||
{extraNavigation.options ? (
|
||||
|
|
|
|||
|
|
@ -37,13 +37,15 @@ const TooltipReact: FC<TooltipProps> = ({
|
|||
id={selector}
|
||||
content={content}
|
||||
className={classNames(
|
||||
"!bg-white !text-xs !font-normal !text-gray-700 !shadow-md !opacity-100 z-20",
|
||||
"!bg-white !text-xs !font-normal !text-gray-700 !shadow-md !opacity-100 z-[9999]",
|
||||
className
|
||||
)}
|
||||
place={position}
|
||||
clickable={clickable}
|
||||
isOpen={disabled ? false : undefined}
|
||||
delayShow={delayShow}
|
||||
positionStrategy="absolute"
|
||||
float={true}
|
||||
>
|
||||
{htmlContent && htmlContent}
|
||||
</ReactTooltip>
|
||||
|
|
|
|||
25
src/frontend/src/components/ShadTooltipComponent/index.tsx
Normal file
25
src/frontend/src/components/ShadTooltipComponent/index.tsx
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "../ui/tooltip";
|
||||
|
||||
const ShadTooltip = (props) => {
|
||||
return (
|
||||
<TooltipProvider>
|
||||
<Tooltip delayDuration={props.delayDuration}>
|
||||
<TooltipTrigger asChild>{props.children}</TooltipTrigger>
|
||||
<TooltipContent
|
||||
side={props.side}
|
||||
avoidCollisions={false}
|
||||
sticky="always"
|
||||
>
|
||||
{props.content}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
);
|
||||
};
|
||||
|
||||
export default ShadTooltip;
|
||||
|
|
@ -25,7 +25,9 @@ export default function Dropdown({
|
|||
<>
|
||||
<div className="relative mt-1 w-full">
|
||||
<Listbox.Button className="relative w-full cursor-default rounded-md border border-gray-300 bg-white dark:bg-gray-900 py-2 pl-3 pr-10 text-left shadow-sm focus:border-indigo-500 focus:outline-none focus:ring-1 focus:ring-indigo-500 sm:text-sm">
|
||||
<span className="block truncate w-full">{internalValue}</span>
|
||||
<span className="block truncate w-full dark:text-gray-300">
|
||||
{internalValue}
|
||||
</span>
|
||||
<span className="pointer-events-none absolute inset-y-0 right-0 flex items-center pr-2">
|
||||
<ChevronUpDownIcon
|
||||
className="h-5 w-5 text-gray-400"
|
||||
|
|
|
|||
29
src/frontend/src/components/ui/tooltip.tsx
Normal file
29
src/frontend/src/components/ui/tooltip.tsx
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
"use client";
|
||||
|
||||
import * as React from "react";
|
||||
import * as TooltipPrimitive from "@radix-ui/react-tooltip";
|
||||
import { cn } from "../../utils";
|
||||
|
||||
const TooltipProvider = TooltipPrimitive.Provider;
|
||||
|
||||
const Tooltip = TooltipPrimitive.Root;
|
||||
|
||||
const TooltipTrigger = TooltipPrimitive.Trigger;
|
||||
|
||||
const TooltipContent = React.forwardRef<
|
||||
React.ElementRef<typeof TooltipPrimitive.Content>,
|
||||
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>
|
||||
>(({ className, sideOffset = 4, ...props }, ref) => (
|
||||
<TooltipPrimitive.Content
|
||||
ref={ref}
|
||||
sideOffset={sideOffset}
|
||||
className={cn(
|
||||
"z-50 overflow-hidden rounded-md border bg-popover px-3 py-1.5 text-sm text-popover-foreground shadow-md animate-in fade-in-50 data-[side=bottom]:slide-in-from-top-1 data-[side=left]:slide-in-from-right-1 data-[side=right]:slide-in-from-left-1 data-[side=top]:slide-in-from-bottom-1",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
));
|
||||
TooltipContent.displayName = TooltipPrimitive.Content.displayName;
|
||||
|
||||
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider };
|
||||
|
|
@ -13,13 +13,16 @@ const initialValue = {
|
|||
export const darkContext = createContext<darkContextType>(initialValue);
|
||||
|
||||
export function DarkProvider({ children }) {
|
||||
const [dark, setDark] = useState(false);
|
||||
const [dark, setDark] = useState(
|
||||
JSON.parse(window.localStorage.getItem("isDark")) ?? false
|
||||
);
|
||||
useEffect(() => {
|
||||
if (dark) {
|
||||
document.getElementById("body").classList.add("dark");
|
||||
} else {
|
||||
document.getElementById("body").classList.remove("dark");
|
||||
}
|
||||
window.localStorage.setItem("isDark", dark.toString());
|
||||
}, [dark]);
|
||||
return (
|
||||
<darkContext.Provider
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ export const TabsContext = createContext<TabsContextType>(
|
|||
);
|
||||
|
||||
export function TabsProvider({ children }: { children: ReactNode }) {
|
||||
const { setNoticeData } = useContext(alertContext);
|
||||
const { setErrorData, setNoticeData } = useContext(alertContext);
|
||||
const [tabIndex, setTabIndex] = useState(0);
|
||||
const [flows, setFlows] = useState<Array<FlowType>>([]);
|
||||
const [id, setId] = useState(uuidv4());
|
||||
|
|
@ -98,25 +98,25 @@ export function TabsProvider({ children }: { children: ReactNode }) {
|
|||
edge.style = { stroke: "#555555" };
|
||||
});
|
||||
flow.data.nodes.forEach((node) => {
|
||||
if (Object.keys(templates[node.data.type]["template"]).length > 0) {
|
||||
node.data.node.base_classes =
|
||||
templates[node.data.type]["base_classes"];
|
||||
const template = templates[node.data.type];
|
||||
if (!template) {
|
||||
setErrorData({ title: `Unknown node type: ${node.data.type}` });
|
||||
return;
|
||||
}
|
||||
if (Object.keys(template["template"]).length > 0) {
|
||||
node.data.node.base_classes = template["base_classes"];
|
||||
flow.data.edges.forEach((edge) => {
|
||||
if (edge.source === node.id) {
|
||||
edge.sourceHandle = edge.sourceHandle
|
||||
.split("|")
|
||||
.slice(0, 2)
|
||||
.concat(templates[node.data.type]["base_classes"])
|
||||
.concat(template["base_classes"])
|
||||
.join("|");
|
||||
}
|
||||
});
|
||||
node.data.node.description =
|
||||
templates[node.data.type]["description"];
|
||||
node.data.node.description = template["description"];
|
||||
node.data.node.template = updateTemplate(
|
||||
templates[node.data.type][
|
||||
"template"
|
||||
] as unknown as APITemplateType,
|
||||
|
||||
template["template"] as unknown as APITemplateType,
|
||||
node.data.node.template as APITemplateType
|
||||
);
|
||||
}
|
||||
|
|
@ -319,21 +319,25 @@ export function TabsProvider({ children }: { children: ReactNode }) {
|
|||
edge.animated = edge.targetHandle.split("|")[0] === "Text";
|
||||
});
|
||||
data.nodes.forEach((node) => {
|
||||
if (Object.keys(templates[node.data.type]["template"]).length > 0) {
|
||||
node.data.node.base_classes =
|
||||
templates[node.data.type]["base_classes"];
|
||||
const template = templates[node.data.type];
|
||||
if (!template) {
|
||||
setErrorData({ title: `Unknown node type: ${node.data.type}` });
|
||||
return;
|
||||
}
|
||||
if (Object.keys(template["template"]).length > 0) {
|
||||
node.data.node.base_classes = template["base_classes"];
|
||||
flow.data.edges.forEach((edge) => {
|
||||
if (edge.source === node.id) {
|
||||
edge.sourceHandle = edge.sourceHandle
|
||||
.split("|")
|
||||
.slice(0, 2)
|
||||
.concat(templates[node.data.type]["base_classes"])
|
||||
.concat(template["base_classes"])
|
||||
.join("|");
|
||||
}
|
||||
});
|
||||
node.data.node.description = templates[node.data.type]["description"];
|
||||
node.data.node.description = template["description"];
|
||||
node.data.node.template = updateTemplate(
|
||||
templates[node.data.type]["template"] as unknown as APITemplateType,
|
||||
template["template"] as unknown as APITemplateType,
|
||||
node.data.node.template as APITemplateType
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,13 +14,13 @@ export async function sendAll(data: sendAllProps) {
|
|||
export async function checkCode(
|
||||
code: string
|
||||
): Promise<AxiosResponse<errorsTypeAPI>> {
|
||||
return await axios.post("/validate/code", { code });
|
||||
return await axios.post("api/v1/validate/code", { code });
|
||||
}
|
||||
|
||||
export async function checkPrompt(
|
||||
template: string
|
||||
): Promise<AxiosResponse<PromptTypeAPI>> {
|
||||
return await axios.post("/validate/prompt", { template });
|
||||
return await axios.post("api/v1/validate/prompt", { template });
|
||||
}
|
||||
|
||||
export async function getExamples(): Promise<FlowType[]> {
|
||||
|
|
|
|||
9
src/frontend/src/icons/Anthropic/anthropic.svg
Normal file
9
src/frontend/src/icons/Anthropic/anthropic.svg
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" viewBox="-170.333 113.047 600 67.4" width="600" height="67.4">
|
||||
<defs>
|
||||
<style type="text/css">
|
||||
.st0{fill:#1F1F1E;}
|
||||
</style>
|
||||
</defs>
|
||||
<path class="st0" d="M -23.533 126.747 L -1.633 126.747 L -1.633 179.347 L 12.367 179.347 L 12.367 126.747 L 34.267 126.747 L 34.267 114.147 L -23.533 114.147 L -23.533 126.747 Z M -48.133 159.747 L -77.433 114.147 L -93.233 114.147 L -93.233 179.247 L -79.733 179.247 L -79.733 133.647 L -50.433 179.347 L -34.633 179.347 L -34.633 114.247 L -48.133 114.247 L -48.133 159.747 Z M 90.067 140.147 L 59.367 140.147 L 59.367 114.147 L 45.367 114.147 L 45.367 179.247 L 59.367 179.247 L 59.367 152.647 L 90.067 152.647 L 90.067 179.247 L 104.067 179.247 L 104.067 114.147 L 90.067 114.147 L 90.067 140.147 Z M -144.333 114.147 L -170.333 179.247 L -155.833 179.247 L -150.533 165.547 L -123.333 165.547 L -118.033 179.247 L -103.533 179.247 L -129.533 114.147 L -144.333 114.147 Z M -145.833 153.547 L -136.933 130.647 L -128.033 153.547 L -145.833 153.547 Z M 219.667 113.047 C 200.867 113.047 187.567 127.047 187.567 146.847 C 187.567 166.447 200.867 180.447 219.667 180.447 C 238.367 180.447 251.567 166.447 251.567 146.847 C 251.567 127.047 238.367 113.047 219.667 113.047 Z M 219.667 167.447 C 208.667 167.447 201.967 159.647 201.967 146.847 C 201.967 133.947 208.667 126.047 219.667 126.047 C 230.567 126.047 237.167 133.847 237.167 146.847 C 237.167 159.547 230.567 167.447 219.667 167.447 Z M 414.767 157.447 C 412.367 163.747 407.467 167.447 400.867 167.447 C 389.867 167.447 383.167 159.647 383.167 146.847 C 383.167 133.947 389.867 126.047 400.867 126.047 C 407.467 126.047 412.267 129.647 414.767 136.047 L 429.567 136.047 C 425.967 122.047 415.067 113.047 400.867 113.047 C 382.067 113.047 368.767 127.047 368.767 146.847 C 368.767 166.447 382.067 180.447 400.867 180.447 C 415.067 180.447 425.967 171.347 429.667 157.447 L 414.767 157.447 Z M 325.867 114.147 L 351.867 179.247 L 366.067 179.247 L 340.067 114.147 L 325.867 114.147 Z M 296.367 114.147 L 264.567 114.147 L 264.567 179.247 L 278.567 179.247 L 278.567 155.647 L 296.467 155.647 C 311.267 155.647 320.267 147.847 320.267 134.847 C 320.267 121.947 311.167 114.147 296.367 114.147 Z M 295.767 143.147 L 278.567 143.147 L 278.567 126.747 L 295.767 126.747 C 302.667 126.747 306.267 129.547 306.267 134.947 C 306.267 140.347 302.667 143.147 295.767 143.147 Z M 176.867 134.047 C 176.867 121.747 167.867 114.247 153.067 114.247 L 121.267 114.247 L 121.267 179.347 L 135.267 179.347 L 135.267 153.847 L 150.767 153.847 L 164.767 179.347 L 180.167 179.347 L 164.667 151.947 C 172.367 148.847 176.867 142.647 176.867 134.047 Z M 135.167 126.747 L 152.367 126.747 C 159.267 126.747 162.867 129.247 162.867 134.047 C 162.867 138.747 159.267 141.347 152.367 141.347 L 135.167 141.347 L 135.167 126.747 Z"></path>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.9 KiB |
11
src/frontend/src/icons/Anthropic/anthropic_box.svg
Normal file
11
src/frontend/src/icons/Anthropic/anthropic_box.svg
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" viewBox="-126.9 247.9 207.161 212.728" width="207.161" height="212.728">
|
||||
<defs>
|
||||
<style type="text/css">
|
||||
.st0{fill:#1F1F1E;}
|
||||
</style>
|
||||
</defs>
|
||||
<path class="st0" d="M 19.9 260.5 L 41.8 260.5 L 41.8 313.1 L 55.8 313.1 L 55.8 260.5 L 77.7 260.5 L 77.7 247.9 L 19.9 247.9 L 19.9 260.5 Z M -4.7 293.5 L -34 247.9 L -49.8 247.9 L -49.8 313 L -36.3 313 L -36.3 267.4 L -7 313.1 L 8.8 313.1 L 8.8 248 L -4.7 248 L -4.7 293.5 Z M -100.9 247.9 L -126.9 313 L -112.4 313 L -107.1 299.3 L -79.9 299.3 L -74.6 313 L -60.1 313 L -86.1 247.9 L -100.9 247.9 Z M -102.4 287.3 L -93.5 264.4 L -84.6 287.3 L -102.4 287.3 Z"></path>
|
||||
<path class="st0" d="M 38.246 437.628 C 35.846 443.928 30.946 447.628 24.346 447.628 C 13.346 447.628 6.646 439.828 6.646 427.028 C 6.646 414.128 13.346 406.228 24.346 406.228 C 30.946 406.228 35.746 409.828 38.246 416.228 L 53.046 416.228 C 49.446 402.228 38.546 393.228 24.346 393.228 C 5.546 393.228 -7.754 407.228 -7.754 427.028 C -7.754 446.628 5.546 460.628 24.346 460.628 C 38.546 460.628 49.446 451.528 53.146 437.628 L 38.246 437.628 Z M -50.654 394.328 L -24.654 459.428 L -10.454 459.428 L -36.454 394.328 L -50.654 394.328 Z M -80.154 394.328 L -111.954 394.328 L -111.954 459.428 L -97.954 459.428 L -97.954 435.828 L -80.054 435.828 C -65.254 435.828 -56.254 428.028 -56.254 415.028 C -56.254 402.128 -65.354 394.328 -80.154 394.328 Z M -80.754 423.328 L -97.954 423.328 L -97.954 406.928 L -80.754 406.928 C -73.854 406.928 -70.254 409.728 -70.254 415.128 C -70.254 420.528 -73.854 423.328 -80.754 423.328 Z"></path>
|
||||
<path class="st0" d="M -81.239 347.704 L -111.939 347.704 L -111.939 321.704 L -125.939 321.704 L -125.939 386.804 L -111.939 386.804 L -111.939 360.204 L -81.239 360.204 L -81.239 386.804 L -67.239 386.804 L -67.239 321.704 L -81.239 321.704 L -81.239 347.704 Z M 48.361 320.604 C 29.561 320.604 16.261 334.604 16.261 354.404 C 16.261 374.004 29.561 388.004 48.361 388.004 C 67.061 388.004 80.261 374.004 80.261 354.404 C 80.261 334.604 67.061 320.604 48.361 320.604 Z M 48.361 375.004 C 37.361 375.004 30.661 367.204 30.661 354.404 C 30.661 341.504 37.361 333.604 48.361 333.604 C 59.261 333.604 65.861 341.404 65.861 354.404 C 65.861 367.104 59.261 375.004 48.361 375.004 Z M 5.561 341.604 C 5.561 329.304 -3.439 321.804 -18.239 321.804 L -50.039 321.804 L -50.039 386.904 L -36.039 386.904 L -36.039 361.404 L -20.539 361.404 L -6.539 386.904 L 8.861 386.904 L -6.639 359.504 C 1.061 356.404 5.561 350.204 5.561 341.604 Z M -36.139 334.304 L -18.939 334.304 C -12.039 334.304 -8.439 336.804 -8.439 341.604 C -8.439 346.304 -12.039 348.904 -18.939 348.904 L -36.139 348.904 L -36.139 334.304 Z"></path>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.7 KiB |
9
src/frontend/src/icons/Anthropic/index.tsx
Normal file
9
src/frontend/src/icons/Anthropic/index.tsx
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
import React, { forwardRef } from "react";
|
||||
import { ReactComponent as AnthropicSVG } from "./anthropic_box.svg";
|
||||
|
||||
export const AnthropicIcon = forwardRef<
|
||||
SVGSVGElement,
|
||||
React.PropsWithChildren<{}>
|
||||
>((props, ref) => {
|
||||
return <AnthropicSVG ref={ref} {...props} />;
|
||||
});
|
||||
|
|
@ -2,16 +2,88 @@
|
|||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
|
||||
@layer base {
|
||||
:root {
|
||||
--background: 0 0% 100%;
|
||||
--foreground: 222.2 47.4% 11.2%;
|
||||
--muted: 210 40% 96.1%;
|
||||
--muted-foreground: 215.4 16.3% 46.9%;
|
||||
--popover: 0 0% 100%;
|
||||
--popover-foreground: 222.2 47.4% 11.2%;
|
||||
--card: 0 0% 100%;
|
||||
--card-foreground: 222.2 47.4% 11.2%;
|
||||
--border: 214.3 31.8% 91.4%;
|
||||
--input: 214.3 31.8% 91.4%;
|
||||
--primary: 222.2 47.4% 11.2%;
|
||||
--primary-foreground: 210 40% 98%;
|
||||
--secondary: 210 40% 96.1%;
|
||||
--secondary-foreground: 222.2 47.4% 11.2%;
|
||||
--accent: 210 40% 96.1%;
|
||||
--accent-foreground: 222.2 47.4% 11.2%;
|
||||
--destructive: 0 100% 50%;
|
||||
--destructive-foreground: 210 40% 98%;
|
||||
--ring: 215 20.2% 65.1%;
|
||||
--radius: 0.5rem;
|
||||
}
|
||||
|
||||
.dark {
|
||||
-background: 224 71% 4%;
|
||||
-foreground: 213 31% 91%;
|
||||
-muted: 223 47% 11%;
|
||||
-muted-foreground: 215.4 16.3% 56.9%;
|
||||
-popover: 224 71% 4%;
|
||||
-popover-foreground: 215 20.2% 65.1%;
|
||||
-card: 224 71% 4%;
|
||||
-card-foreground: 213 31% 91%;
|
||||
-border: 216 34% 17%;
|
||||
-input: 216 34% 17%;
|
||||
-primary: 210 40% 98%;
|
||||
-primary-foreground: 222.2 47.4% 1.2%;
|
||||
-secondary: 222.2 47.4% 11.2%;
|
||||
-secondary-foreground: 210 40% 98%;
|
||||
-accent: 216 34% 17%;
|
||||
-accent-foreground: 210 40% 98%;
|
||||
-destructive: 0 63% 31%;
|
||||
-destructive-foreground: 210 40% 98%;
|
||||
-ring: 216 34% 17%;
|
||||
-radius: 0.5rem;
|
||||
}
|
||||
}
|
||||
|
||||
@layer base {
|
||||
* {
|
||||
@apply border-border;
|
||||
}
|
||||
body {
|
||||
@apply bg-background text-foreground;
|
||||
font-feature-settings: "rlig" 1, "calt" 1;
|
||||
}
|
||||
}
|
||||
|
||||
@layer base {
|
||||
* {
|
||||
@apply border-border;
|
||||
}
|
||||
body {
|
||||
@apply bg-background text-foreground;
|
||||
font-feature-settings: "rlig" 1, "calt" 1;
|
||||
}
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen",
|
||||
"Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue",
|
||||
sans-serif;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
margin: 0;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen",
|
||||
"Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue",
|
||||
sans-serif;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
code {
|
||||
font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New",
|
||||
monospace;
|
||||
font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New",
|
||||
monospace;
|
||||
}
|
||||
|
||||
.react-flow__pane {
|
||||
cursor: default;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,9 +16,11 @@ import Convert from "ansi-to-html";
|
|||
export default function ChatMessage({
|
||||
chat,
|
||||
lockChat,
|
||||
lastMessage,
|
||||
}: {
|
||||
chat: ChatMessageType;
|
||||
lockChat: boolean;
|
||||
lastMessage: boolean;
|
||||
}) {
|
||||
const convert = new Convert({ newline: true });
|
||||
const [message, setMessage] = useState("");
|
||||
|
|
@ -48,7 +50,7 @@ export default function ChatMessage({
|
|||
"absolute transition-opacity duration-500 scale-150 " +
|
||||
(lockChat ? "opacity-100" : "opacity-0")
|
||||
}
|
||||
src={AiIcon}
|
||||
src={lastMessage ? AiIcon : AiIconStill}
|
||||
/>
|
||||
<img
|
||||
className={
|
||||
|
|
|
|||
|
|
@ -182,10 +182,10 @@ export default function ChatModal({
|
|||
try {
|
||||
const urlWs =
|
||||
process.env.NODE_ENV === "development"
|
||||
? `ws://localhost:7860/chat/${id.current}`
|
||||
? `ws://localhost:7860/api/v1/chat/${id.current}`
|
||||
: `${window.location.protocol === "https:" ? "wss" : "ws"}://${
|
||||
window.location.host
|
||||
}/chat/${id.current}`;
|
||||
}api/v1/chat/${id.current}`;
|
||||
const newWs = new WebSocket(urlWs);
|
||||
newWs.onopen = () => {
|
||||
console.log("WebSocket connection established!");
|
||||
|
|
@ -409,7 +409,12 @@ export default function ChatModal({
|
|||
>
|
||||
{chatHistory.length > 0 ? (
|
||||
chatHistory.map((c, i) => (
|
||||
<ChatMessage lockChat={lockChat} chat={c} key={i} />
|
||||
<ChatMessage
|
||||
lockChat={lockChat}
|
||||
chat={c}
|
||||
lastMessage={chatHistory.length - 1 == i ? true : false}
|
||||
key={i}
|
||||
/>
|
||||
))
|
||||
) : (
|
||||
<div className="flex flex-col h-full text-center justify-center w-full items-center align-middle">
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import { DisclosureComponentType } from "../../../../types/components";
|
|||
export default function DisclosureComponent({
|
||||
button: { title, Icon, buttons = [] },
|
||||
children,
|
||||
openDisc,
|
||||
}: DisclosureComponentType) {
|
||||
return (
|
||||
<Disclosure as="div" key={title}>
|
||||
|
|
@ -27,14 +28,14 @@ export default function DisclosureComponent({
|
|||
<div>
|
||||
<ChevronRightIcon
|
||||
className={`${
|
||||
open ? "rotate-90 transform" : ""
|
||||
open || openDisc ? "rotate-90 transform" : ""
|
||||
} h-4 w-4 text-gray-800 dark:text-white`}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</Disclosure.Button>
|
||||
</div>
|
||||
<Disclosure.Panel as="div" className="-mt-px">
|
||||
<Disclosure.Panel as="div" className="-mt-px" static={openDisc}>
|
||||
{children}
|
||||
</Disclosure.Panel>
|
||||
</>
|
||||
|
|
|
|||
|
|
@ -1,13 +1,21 @@
|
|||
import { Bars2Icon } from "@heroicons/react/24/outline";
|
||||
import DisclosureComponent from "../DisclosureComponent";
|
||||
import { nodeColors, nodeIcons, nodeNames } from "../../../../utils";
|
||||
import { useContext, useEffect, useState } from "react";
|
||||
import {
|
||||
classNames,
|
||||
nodeColors,
|
||||
nodeIcons,
|
||||
nodeNames,
|
||||
} from "../../../../utils";
|
||||
import { useContext, useEffect, useState, useRef } from "react";
|
||||
import { typesContext } from "../../../../contexts/typesContext";
|
||||
import { APIClassType, APIObjectType } from "../../../../types/api";
|
||||
import TooltipReact from "../../../../components/ReactTooltipComponent";
|
||||
import { MagnifyingGlassIcon } from "@heroicons/react/24/outline";
|
||||
import ShadTooltip from "../../../../components/ShadTooltipComponent";
|
||||
|
||||
export default function ExtraSidebar() {
|
||||
const { data } = useContext(typesContext);
|
||||
const [dataFilter, setFilterData] = useState(data);
|
||||
const [search, setSearch] = useState("");
|
||||
|
||||
function onDragStart(
|
||||
event: React.DragEvent<any>,
|
||||
|
|
@ -24,66 +32,102 @@ export default function ExtraSidebar() {
|
|||
event.dataTransfer.setData("json", JSON.stringify(data));
|
||||
}
|
||||
|
||||
function handleSearchInput(e: string) {
|
||||
setFilterData((_) => {
|
||||
let ret = {};
|
||||
Object.keys(data).forEach((d: keyof APIObjectType, i) => {
|
||||
ret[d] = {};
|
||||
let keys = Object.keys(data[d]).filter((nd) =>
|
||||
nd.toLowerCase().includes(e.toLowerCase())
|
||||
);
|
||||
keys.forEach((element) => {
|
||||
ret[d][element] = data[d][element];
|
||||
});
|
||||
});
|
||||
return ret;
|
||||
});
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="mt-1 w-full">
|
||||
{Object.keys(data)
|
||||
.sort()
|
||||
.map((d: keyof APIObjectType, i) => (
|
||||
<DisclosureComponent
|
||||
key={i}
|
||||
button={{
|
||||
title: nodeNames[d] ?? nodeNames.unknown,
|
||||
Icon: nodeIcons[d] ?? nodeIcons.unknown,
|
||||
}}
|
||||
>
|
||||
<div className="p-2 flex flex-col gap-2">
|
||||
{Object.keys(data[d])
|
||||
.sort()
|
||||
.map((t: string, k) => (
|
||||
<TooltipReact
|
||||
selector={t}
|
||||
htmlContent={t}
|
||||
position="right"
|
||||
delayShow={1500}
|
||||
key={k}
|
||||
>
|
||||
<div key={k} data-tooltip-id={t}>
|
||||
<div
|
||||
draggable
|
||||
className={" cursor-grab border-l-8 rounded-l-md"}
|
||||
style={{
|
||||
borderLeftColor: nodeColors[d] ?? nodeColors.unknown,
|
||||
}}
|
||||
onDragStart={(event) =>
|
||||
onDragStart(event, {
|
||||
type: t,
|
||||
node: data[d][t],
|
||||
})
|
||||
}
|
||||
onDragEnd={() => {
|
||||
document.body.removeChild(
|
||||
document.getElementsByClassName(
|
||||
"cursor-grabbing"
|
||||
)[0]
|
||||
);
|
||||
}}
|
||||
<>
|
||||
<div className="relative mt-2 flex items-center mb-2 mx-2">
|
||||
<input
|
||||
type="text"
|
||||
name="search"
|
||||
id="search"
|
||||
placeholder="Search nodes"
|
||||
className="dark:text-white focus:outline-none block w-full rounded-md py-1.5 ps-3 pr-9 text-gray-900 shadow-sm ring-1 ring-inset ring-gray-300 placeholder:text-gray-400 sm:text-sm sm:leading-6 dark:ring-0 dark:bg-[#2d3747] dark:focus:outline-none"
|
||||
onChange={(e) => {
|
||||
handleSearchInput(e.target.value);
|
||||
setSearch(e.target.value);
|
||||
}}
|
||||
/>
|
||||
<div className="absolute inset-y-0 right-0 flex py-1.5 pr-3 items-center">
|
||||
<MagnifyingGlassIcon className="h-5 w-5 dark:text-white"></MagnifyingGlassIcon>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="mt-1 w-full">
|
||||
{Object.keys(dataFilter)
|
||||
.sort()
|
||||
.map((d: keyof APIObjectType, i) =>
|
||||
Object.keys(dataFilter[d]).length > 0 ? (
|
||||
<DisclosureComponent
|
||||
openDisc={search.length == 0 ? false : true}
|
||||
key={i}
|
||||
button={{
|
||||
title: nodeNames[d] ?? nodeNames.unknown,
|
||||
Icon: nodeIcons[d] ?? nodeIcons.unknown,
|
||||
}}
|
||||
>
|
||||
<div className="p-2 flex flex-col gap-2">
|
||||
{Object.keys(dataFilter[d])
|
||||
.sort()
|
||||
.map((t: string, k) => (
|
||||
<ShadTooltip
|
||||
content={t}
|
||||
delayDuration={1500}
|
||||
side="right"
|
||||
>
|
||||
<div className="flex w-full justify-between text-sm px-3 py-1 bg-white dark:bg-gray-800 items-center border-dashed border-gray-400 dark:border-gray-600 border-l-0 rounded-md rounded-l-none border">
|
||||
<span className="text-black dark:text-white w-36 pr-1 truncate text-xs">
|
||||
{t}
|
||||
</span>
|
||||
<Bars2Icon className="w-4 h-6 text-gray-400 dark:text-gray-600" />
|
||||
<div key={k} data-tooltip-id={t}>
|
||||
<div
|
||||
draggable
|
||||
className={" cursor-grab border-l-8 rounded-l-md"}
|
||||
style={{
|
||||
borderLeftColor:
|
||||
nodeColors[d] ?? nodeColors.unknown,
|
||||
}}
|
||||
onDragStart={(event) =>
|
||||
onDragStart(event, {
|
||||
type: t,
|
||||
node: data[d][t],
|
||||
})
|
||||
}
|
||||
onDragEnd={() => {
|
||||
document.body.removeChild(
|
||||
document.getElementsByClassName(
|
||||
"cursor-grabbing"
|
||||
)[0]
|
||||
);
|
||||
}}
|
||||
>
|
||||
<div className="flex w-full justify-between text-sm px-3 py-1 bg-white dark:bg-gray-800 items-center border-dashed border-gray-400 dark:border-gray-600 border-l-0 rounded-md rounded-l-none border">
|
||||
<span className="text-black dark:text-white w-36 pr-1 truncate text-xs">
|
||||
{t}
|
||||
</span>
|
||||
<Bars2Icon className="w-4 h-6 text-gray-400 dark:text-gray-600" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</TooltipReact>
|
||||
))}
|
||||
{Object.keys(data[d]).length === 0 && (
|
||||
<div className="text-gray-400 text-center">Coming soon</div>
|
||||
)}
|
||||
</div>
|
||||
</DisclosureComponent>
|
||||
))}
|
||||
</div>
|
||||
</ShadTooltip>
|
||||
))}
|
||||
</div>
|
||||
</DisclosureComponent>
|
||||
) : (
|
||||
<div key={i}></div>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ export default function TabsManagerComponent() {
|
|||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="w-full h-full">
|
||||
<div className="w-full h-full dark:bg-gray-800">
|
||||
<ReactFlowProvider>
|
||||
{flows[tabIndex] ? (
|
||||
<FlowPage flow={flows[tabIndex]}></FlowPage>
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import {
|
|||
ReactElement,
|
||||
ReactFragment,
|
||||
ReactNode,
|
||||
SVGProps,
|
||||
} from "react";
|
||||
import { NodeDataType } from "../flow/index";
|
||||
export type InputComponentType = {
|
||||
|
|
@ -56,6 +57,7 @@ export type FileComponentType = {
|
|||
|
||||
export type DisclosureComponentType = {
|
||||
children: ReactNode;
|
||||
openDisc: boolean;
|
||||
button: {
|
||||
title: string;
|
||||
Icon: ForwardRefExoticComponent<React.SVGProps<SVGSVGElement>>;
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import { FlowType, NodeType } from "./types/flow";
|
|||
import { APITemplateType, TemplateVariableType } from "./types/api";
|
||||
import _ from "lodash";
|
||||
import { ChromaIcon } from "./icons/ChromaIcon";
|
||||
import { AnthropicIcon } from "./icons/Anthropic";
|
||||
import { AirbyteIcon } from "./icons/Airbyte";
|
||||
import { AzIcon } from "./icons/AzLogo";
|
||||
import { BingIcon } from "./icons/Bing";
|
||||
|
|
@ -47,6 +48,12 @@ import { WolframIcon } from "./icons/Wolfram";
|
|||
import { WordIcon } from "./icons/Word";
|
||||
import { SerperIcon } from "./icons/Serper";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { clsx, type ClassValue } from "clsx";
|
||||
import { twMerge } from "tailwind-merge";
|
||||
|
||||
export function cn(...inputs: ClassValue[]) {
|
||||
return twMerge(clsx(inputs));
|
||||
}
|
||||
|
||||
export function classNames(...classes: Array<string>) {
|
||||
return classes.filter(Boolean).join(" ");
|
||||
|
|
@ -153,6 +160,8 @@ export const nodeIcons: {
|
|||
AirbyteJSONLoader: AirbyteIcon,
|
||||
// SerpAPIWrapper: SerperIcon,
|
||||
// AZLyricsLoader: AzIcon,
|
||||
Anthropic: AnthropicIcon,
|
||||
ChatAnthropic: AnthropicIcon,
|
||||
BingSearchAPIWrapper: BingIcon,
|
||||
BingSearchRun: BingIcon,
|
||||
Cohere: CohereIcon,
|
||||
|
|
@ -632,3 +641,58 @@ export function updateIds(newFlow, getNodeId) {
|
|||
e.targetHandle;
|
||||
});
|
||||
}
|
||||
|
||||
export function groupByFamily(data, baseClasses) {
|
||||
let arrOfParent: string[] = [];
|
||||
let arrOfType: { family: string; type: string }[] = [];
|
||||
|
||||
Object.keys(data).map((d) => {
|
||||
Object.keys(data[d]).map((n) => {
|
||||
if (
|
||||
data[d][n].base_classes.some((r) => baseClasses.split("\n").includes(r))
|
||||
) {
|
||||
arrOfParent.push(d);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
let uniq = arrOfParent.filter(
|
||||
(item, index) => arrOfParent.indexOf(item) === index
|
||||
);
|
||||
|
||||
Object.keys(data).map((d) => {
|
||||
Object.keys(data[d]).map((n) => {
|
||||
baseClasses.split("\n").forEach((tol) => {
|
||||
data[d][n].base_classes.forEach((data) => {
|
||||
if (tol == data) {
|
||||
arrOfType.push({
|
||||
family: d,
|
||||
type: data,
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
let groupedBy = arrOfType.filter((object, index, self) => {
|
||||
const foundIndex = self.findIndex(
|
||||
(o) => o.family === object.family && o.type === object.type
|
||||
);
|
||||
return foundIndex === index;
|
||||
});
|
||||
|
||||
let groupedObj = groupedBy.reduce((result, item) => {
|
||||
const existingGroup = result.find((group) => group.family === item.family);
|
||||
|
||||
if (existingGroup) {
|
||||
existingGroup.type += `, ${item.type}`;
|
||||
} else {
|
||||
result.push({ family: item.family, type: item.type });
|
||||
}
|
||||
|
||||
return result;
|
||||
}, []);
|
||||
|
||||
return groupedObj;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,83 @@
|
|||
/** @type {import('tailwindcss').Config} */
|
||||
const { fontFamily } = require("tailwindcss/defaultTheme")
|
||||
|
||||
import plugin from "tailwindcss/plugin";
|
||||
module.exports = {
|
||||
content: ["./index.html", "./src/**/*.{js,ts,tsx,jsx}"],
|
||||
darkMode: "class",
|
||||
important: true,
|
||||
theme: {
|
||||
container: {
|
||||
center: true,
|
||||
padding: "2rem",
|
||||
screens: {
|
||||
"2xl": "1400px",
|
||||
},
|
||||
},
|
||||
extend: {
|
||||
colors: {
|
||||
border: "hsl(var(--border))",
|
||||
input: "hsl(var(--input))",
|
||||
ring: "hsl(var(--ring))",
|
||||
background: "hsl(var(--background))",
|
||||
foreground: "hsl(var(--foreground))",
|
||||
primary: {
|
||||
DEFAULT: "hsl(var(--primary))",
|
||||
foreground: "hsl(var(--primary-foreground))",
|
||||
},
|
||||
secondary: {
|
||||
DEFAULT: "hsl(var(--secondary))",
|
||||
foreground: "hsl(var(--secondary-foreground))",
|
||||
},
|
||||
destructive: {
|
||||
DEFAULT: "hsl(var(--destructive))",
|
||||
foreground: "hsl(var(--destructive-foreground))",
|
||||
},
|
||||
muted: {
|
||||
DEFAULT: "hsl(var(--muted))",
|
||||
foreground: "hsl(var(--muted-foreground))",
|
||||
},
|
||||
accent: {
|
||||
DEFAULT: "hsl(var(--accent))",
|
||||
foreground: "hsl(var(--accent-foreground))",
|
||||
},
|
||||
popover: {
|
||||
DEFAULT: "hsl(var(--popover))",
|
||||
foreground: "hsl(var(--popover-foreground))",
|
||||
},
|
||||
card: {
|
||||
DEFAULT: "hsl(var(--card))",
|
||||
foreground: "hsl(var(--card-foreground))",
|
||||
},
|
||||
},
|
||||
borderRadius: {
|
||||
lg: `var(--radius)`,
|
||||
md: `calc(var(--radius) - 2px)`,
|
||||
sm: "calc(var(--radius) - 4px)",
|
||||
},
|
||||
fontFamily: {
|
||||
sans: ["var(--font-sans)", ...fontFamily.sans],
|
||||
},
|
||||
keyframes: {
|
||||
"accordion-down": {
|
||||
from: { height: 0 },
|
||||
to: { height: "var(--radix-accordion-content-height)" },
|
||||
},
|
||||
"accordion-up": {
|
||||
from: { height: "var(--radix-accordion-content-height)" },
|
||||
to: { height: 0 },
|
||||
},
|
||||
pulseGreen: {
|
||||
"0%": { boxShadow: "0 0 0 0 rgba(72, 187, 120, 0.7)" },
|
||||
"100%": { boxShadow: "0 0 0 10px rgba(72, 187, 120, 0)" },
|
||||
},
|
||||
},
|
||||
animation: {
|
||||
"accordion-down": "accordion-down 0.2s ease-out",
|
||||
"accordion-up": "accordion-up 0.2s ease-out",
|
||||
"pulse-green": "pulseGreen 1s linear",
|
||||
'spin-once': 'spin 1s linear 0.7'
|
||||
},
|
||||
borderColor: {
|
||||
"red-outline": "rgba(255, 0, 0, 0.8)",
|
||||
"green-outline": "rgba(72, 187, 120, 0.7)",
|
||||
|
|
@ -14,17 +86,6 @@ module.exports = {
|
|||
"red-outline": "0 0 5px rgba(255, 0, 0, 0.5)",
|
||||
"green-outline": "0 0 5px rgba(72, 187, 120, 0.7)",
|
||||
},
|
||||
|
||||
animation: {
|
||||
"pulse-green": "pulseGreen 1s linear",
|
||||
'spin-once': 'spin 1s linear 0.7'
|
||||
},
|
||||
keyframes: {
|
||||
pulseGreen: {
|
||||
"0%": { boxShadow: "0 0 0 0 rgba(72, 187, 120, 0.7)" },
|
||||
"100%": { boxShadow: "0 0 0 10px rgba(72, 187, 120, 0)" },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
plugins: [
|
||||
|
|
@ -96,4 +157,4 @@ module.exports = {
|
|||
}),
|
||||
require("@tailwindcss/typography"),
|
||||
],
|
||||
};
|
||||
};
|
||||
|
|
@ -19,7 +19,8 @@
|
|||
"isolatedModules": true,
|
||||
"noEmit": true,
|
||||
"jsx": "react-jsx",
|
||||
"noImplicitAny": false
|
||||
"noImplicitAny": false,
|
||||
"baseUrl": "."
|
||||
},
|
||||
"include": [
|
||||
"src"
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ const apiRoutes = [
|
|||
];
|
||||
|
||||
// Use environment variable to determine the target.
|
||||
const target = process.env.VITE_PROXY_TARGET || "http://127.0.0.1:7860";
|
||||
const target = process.env.VITE_PROXY_TARGET || "http://127.0.0.1:7860/api/v1";
|
||||
|
||||
const proxyTargets = apiRoutes.reduce((proxyObj, route) => {
|
||||
proxyObj[route] = {
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import json
|
|||
from pathlib import Path
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from langflow.graph.graph.base import Graph
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from httpx import AsyncClient
|
||||
|
|
@ -46,7 +47,6 @@ def client():
|
|||
|
||||
def get_graph(_type="basic"):
|
||||
"""Get a graph from a json file"""
|
||||
from langflow.graph.graph import Graph
|
||||
|
||||
if _type == "basic":
|
||||
path = pytest.BASIC_EXAMPLE_PATH
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@
|
|||
"y": 136.29836646158452
|
||||
},
|
||||
"data": {
|
||||
"type": "PythonFunction",
|
||||
"type": "PythonFunctionTool",
|
||||
"node": {
|
||||
"template": {
|
||||
"code": {
|
||||
|
|
@ -210,6 +210,26 @@
|
|||
"type": "str",
|
||||
"list": false
|
||||
},
|
||||
"description": {
|
||||
"required": true,
|
||||
"placeholder": "",
|
||||
"show": true,
|
||||
"multiline": true,
|
||||
"value": "My description",
|
||||
"name": "description",
|
||||
"type": "str",
|
||||
"list": false
|
||||
},
|
||||
"name": {
|
||||
"required": true,
|
||||
"placeholder": "",
|
||||
"show": true,
|
||||
"multiline": true,
|
||||
"value": "My Tool",
|
||||
"name": "name",
|
||||
"type": "str",
|
||||
"list": false
|
||||
},
|
||||
"_type": "python_function"
|
||||
},
|
||||
"description": "Python function to be executed.",
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from langflow.settings import settings
|
|||
# check that all agents are in settings.agents
|
||||
# are in json_response["agents"]
|
||||
def test_agents_settings(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
agents = json_response["agents"]
|
||||
|
|
@ -13,7 +13,7 @@ def test_agents_settings(client: TestClient):
|
|||
|
||||
|
||||
def test_zero_shot_agent(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
agents = json_response["agents"]
|
||||
|
|
@ -52,7 +52,7 @@ def test_zero_shot_agent(client: TestClient):
|
|||
|
||||
|
||||
def test_json_agent(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
agents = json_response["agents"]
|
||||
|
|
@ -87,7 +87,7 @@ def test_json_agent(client: TestClient):
|
|||
|
||||
|
||||
def test_csv_agent(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
agents = json_response["agents"]
|
||||
|
|
@ -126,7 +126,7 @@ def test_csv_agent(client: TestClient):
|
|||
|
||||
|
||||
def test_initialize_agent(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
agents = json_response["agents"]
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
import json
|
||||
from langflow.graph import Graph
|
||||
from langflow.processing.process import load_or_build_langchain_object
|
||||
|
||||
import pytest
|
||||
from langflow.interface.run import (
|
||||
build_graph,
|
||||
build_langchain_object_with_caching,
|
||||
load_or_build_langchain_object,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -62,7 +62,7 @@ def test_build_langchain_object_with_caching(basic_data_graph):
|
|||
|
||||
# Test build_graph
|
||||
def test_build_graph(basic_data_graph):
|
||||
graph = build_graph(basic_data_graph)
|
||||
graph = Graph.from_payload(basic_data_graph)
|
||||
assert graph is not None
|
||||
assert len(graph.nodes) == len(basic_data_graph["nodes"])
|
||||
assert len(graph.edges) == len(basic_data_graph["edges"])
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from langflow.settings import settings
|
|||
|
||||
|
||||
def test_chains_settings(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
|
@ -12,7 +12,7 @@ def test_chains_settings(client: TestClient):
|
|||
|
||||
# Test the ConversationChain object
|
||||
def test_conversation_chain(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
|
@ -94,7 +94,7 @@ def test_conversation_chain(client: TestClient):
|
|||
|
||||
|
||||
def test_llm_chain(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
|
@ -152,7 +152,7 @@ def test_llm_chain(client: TestClient):
|
|||
|
||||
|
||||
def test_llm_checker_chain(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
|
@ -228,7 +228,7 @@ def test_llm_checker_chain(client: TestClient):
|
|||
|
||||
|
||||
def test_llm_math_chain(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
|
@ -306,7 +306,7 @@ def test_llm_math_chain(client: TestClient):
|
|||
|
||||
|
||||
def test_series_character_chain(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
|
@ -368,7 +368,7 @@ def test_series_character_chain(client: TestClient):
|
|||
|
||||
|
||||
def test_mid_journey_prompt_chain(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
|
@ -407,7 +407,7 @@ def test_mid_journey_prompt_chain(client: TestClient):
|
|||
|
||||
|
||||
def test_time_travel_guide_chain(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
|
|
|||
|
|
@ -1,13 +1,32 @@
|
|||
# Test this:
|
||||
from langflow.interface.importing.utils import get_function
|
||||
import pytest
|
||||
from langflow.interface.tools.custom import PythonFunction
|
||||
from langflow.interface.tools.custom import PythonFunctionTool, PythonFunction
|
||||
from langflow.utils import constants
|
||||
|
||||
|
||||
def test_python_function_tool():
|
||||
"""Test Python function"""
|
||||
code = constants.DEFAULT_PYTHON_FUNCTION
|
||||
func = get_function(code)
|
||||
func = PythonFunctionTool(name="Test", description="Testing", code=code, func=func)
|
||||
assert func("text") == "text"
|
||||
# the tool decorator should raise an error if
|
||||
# the function is not str -> str
|
||||
|
||||
# This raises ValidationError
|
||||
with pytest.raises(SyntaxError):
|
||||
code = pytest.CODE_WITH_SYNTAX_ERROR
|
||||
func = get_function(code)
|
||||
func = PythonFunctionTool(
|
||||
name="Test", description="Testing", code=code, func=func
|
||||
)
|
||||
|
||||
|
||||
def test_python_function():
|
||||
"""Test Python function"""
|
||||
func = PythonFunction(code=constants.DEFAULT_PYTHON_FUNCTION)
|
||||
assert func.get_function()("text") == "text"
|
||||
assert get_function(func.code)("text") == "text"
|
||||
# the tool decorator should raise an error if
|
||||
# the function is not str -> str
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langflow.interface.tools.constants import CUSTOM_TOOLS
|
|||
|
||||
|
||||
def test_get_all(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
# We need to test the custom nodes
|
||||
|
|
@ -21,7 +21,7 @@ import math
|
|||
def square(x):
|
||||
return x ** 2
|
||||
"""
|
||||
response1 = client.post("/validate/code", json={"code": code1})
|
||||
response1 = client.post("api/v1/validate/code", json={"code": code1})
|
||||
assert response1.status_code == 200
|
||||
assert response1.json() == {"imports": {"errors": []}, "function": {"errors": []}}
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ import non_existent_module
|
|||
def square(x):
|
||||
return x ** 2
|
||||
"""
|
||||
response2 = client.post("/validate/code", json={"code": code2})
|
||||
response2 = client.post("api/v1/validate/code", json={"code": code2})
|
||||
assert response2.status_code == 200
|
||||
assert response2.json() == {
|
||||
"imports": {"errors": ["No module named 'non_existent_module'"]},
|
||||
|
|
@ -46,7 +46,7 @@ import math
|
|||
def square(x)
|
||||
return x ** 2
|
||||
"""
|
||||
response3 = client.post("/validate/code", json={"code": code3})
|
||||
response3 = client.post("api/v1/validate/code", json={"code": code3})
|
||||
assert response3.status_code == 200
|
||||
assert response3.json() == {
|
||||
"imports": {"errors": []},
|
||||
|
|
@ -54,11 +54,11 @@ def square(x)
|
|||
}
|
||||
|
||||
# Test case with invalid JSON payload
|
||||
response4 = client.post("/validate/code", json={"invalid_key": code1})
|
||||
response4 = client.post("api/v1/validate/code", json={"invalid_key": code1})
|
||||
assert response4.status_code == 422
|
||||
|
||||
# Test case with an empty code string
|
||||
response5 = client.post("/validate/code", json={"code": ""})
|
||||
response5 = client.post("api/v1/validate/code", json={"code": ""})
|
||||
assert response5.status_code == 200
|
||||
assert response5.json() == {"imports": {"errors": []}, "function": {"errors": []}}
|
||||
|
||||
|
|
@ -69,7 +69,7 @@ import math
|
|||
def square(x)
|
||||
return x ** 2
|
||||
"""
|
||||
response6 = client.post("/validate/code", json={"code": code6})
|
||||
response6 = client.post("api/v1/validate/code", json={"code": code6})
|
||||
assert response6.status_code == 200
|
||||
assert response6.json() == {
|
||||
"imports": {"errors": []},
|
||||
|
|
@ -95,13 +95,13 @@ INVALID_PROMPT = "This is an invalid prompt without any input variable."
|
|||
|
||||
|
||||
def test_valid_prompt(client: TestClient):
|
||||
response = client.post("/validate/prompt", json={"template": VALID_PROMPT})
|
||||
response = client.post("api/v1/validate/prompt", json={"template": VALID_PROMPT})
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {"input_variables": ["product"]}
|
||||
|
||||
|
||||
def test_invalid_prompt(client: TestClient):
|
||||
response = client.post("/validate/prompt", json={"template": INVALID_PROMPT})
|
||||
response = client.post("api/v1/validate/prompt", json={"template": INVALID_PROMPT})
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {"input_variables": []}
|
||||
|
||||
|
|
@ -116,7 +116,7 @@ def test_invalid_prompt(client: TestClient):
|
|||
],
|
||||
)
|
||||
def test_various_prompts(client, prompt, expected_input_variables):
|
||||
response = client.post("/validate/prompt", json={"template": prompt})
|
||||
response = client.post("api/v1/validate/prompt", json={"template": prompt})
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {
|
||||
"input_variables": expected_input_variables,
|
||||
|
|
|
|||
|
|
@ -1,20 +1,22 @@
|
|||
from typing import Type, Union
|
||||
from langflow.graph.edge.base import Edge
|
||||
from langflow.graph.vertex.base import Vertex
|
||||
|
||||
import pytest
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.llms.fake import FakeListLLM
|
||||
from langflow.graph import Edge, Graph, Node
|
||||
from langflow.graph.nodes import (
|
||||
AgentNode,
|
||||
ChainNode,
|
||||
FileToolNode,
|
||||
LLMNode,
|
||||
PromptNode,
|
||||
ToolkitNode,
|
||||
ToolNode,
|
||||
WrapperNode,
|
||||
from langflow.graph import Graph
|
||||
from langflow.graph.vertex.types import (
|
||||
AgentVertex,
|
||||
ChainVertex,
|
||||
FileToolVertex,
|
||||
LLMVertex,
|
||||
PromptVertex,
|
||||
ToolkitVertex,
|
||||
ToolVertex,
|
||||
WrapperVertex,
|
||||
)
|
||||
from langflow.interface.run import get_result_and_thought
|
||||
from langflow.processing.process import get_result_and_thought
|
||||
from langflow.utils.payload import get_root_node
|
||||
|
||||
# Test cases for the graph module
|
||||
|
|
@ -23,7 +25,7 @@ from langflow.utils.payload import get_root_node
|
|||
# BASIC_EXAMPLE_PATH, COMPLEX_EXAMPLE_PATH, OPENAPI_EXAMPLE_PATH
|
||||
|
||||
|
||||
def get_node_by_type(graph, node_type: Type[Node]) -> Union[Node, None]:
|
||||
def get_node_by_type(graph, node_type: Type[Vertex]) -> Union[Vertex, None]:
|
||||
"""Get a node by type"""
|
||||
return next((node for node in graph.nodes if isinstance(node, node_type)), None)
|
||||
|
||||
|
|
@ -33,7 +35,7 @@ def test_graph_structure(basic_graph):
|
|||
assert len(basic_graph.nodes) > 0
|
||||
assert len(basic_graph.edges) > 0
|
||||
for node in basic_graph.nodes:
|
||||
assert isinstance(node, Node)
|
||||
assert isinstance(node, Vertex)
|
||||
for edge in basic_graph.edges:
|
||||
assert isinstance(edge, Edge)
|
||||
assert edge.source in basic_graph.nodes
|
||||
|
|
@ -156,14 +158,16 @@ def test_get_node_neighbors_complex(complex_graph):
|
|||
tool_neighbors = complex_graph.get_nodes_with_target(tool)
|
||||
assert tool_neighbors is not None
|
||||
# Check if there is a PythonFunction in the tool's neighbors
|
||||
assert any("PythonFunction" in neighbor.data["type"] for neighbor in tool_neighbors)
|
||||
assert any(
|
||||
"PythonFunctionTool" in neighbor.data["type"] for neighbor in tool_neighbors
|
||||
)
|
||||
|
||||
|
||||
def test_get_node(basic_graph):
|
||||
"""Test getting a single node"""
|
||||
node_id = basic_graph.nodes[0].id
|
||||
node = basic_graph.get_node(node_id)
|
||||
assert isinstance(node, Node)
|
||||
assert isinstance(node, Vertex)
|
||||
assert node.id == node_id
|
||||
|
||||
|
||||
|
|
@ -172,7 +176,7 @@ def test_build_nodes(basic_graph):
|
|||
|
||||
assert len(basic_graph.nodes) == len(basic_graph._nodes)
|
||||
for node in basic_graph.nodes:
|
||||
assert isinstance(node, Node)
|
||||
assert isinstance(node, Vertex)
|
||||
|
||||
|
||||
def test_build_edges(basic_graph):
|
||||
|
|
@ -180,8 +184,8 @@ def test_build_edges(basic_graph):
|
|||
assert len(basic_graph.edges) == len(basic_graph._edges)
|
||||
for edge in basic_graph.edges:
|
||||
assert isinstance(edge, Edge)
|
||||
assert isinstance(edge.source, Node)
|
||||
assert isinstance(edge.target, Node)
|
||||
assert isinstance(edge.source, Vertex)
|
||||
assert isinstance(edge.target, Vertex)
|
||||
|
||||
|
||||
def test_get_root_node(basic_graph, complex_graph):
|
||||
|
|
@ -189,13 +193,13 @@ def test_get_root_node(basic_graph, complex_graph):
|
|||
assert isinstance(basic_graph, Graph)
|
||||
root = get_root_node(basic_graph)
|
||||
assert root is not None
|
||||
assert isinstance(root, Node)
|
||||
assert isinstance(root, Vertex)
|
||||
assert root.data["type"] == "TimeTravelGuideChain"
|
||||
# For complex example, the root node is a ZeroShotAgent too
|
||||
assert isinstance(complex_graph, Graph)
|
||||
root = get_root_node(complex_graph)
|
||||
assert root is not None
|
||||
assert isinstance(root, Node)
|
||||
assert isinstance(root, Vertex)
|
||||
assert root.data["type"] == "ZeroShotAgent"
|
||||
|
||||
|
||||
|
|
@ -237,11 +241,10 @@ def test_build_params(basic_graph):
|
|||
assert "memory" in root.params
|
||||
|
||||
|
||||
def test_build(basic_graph, complex_graph, openapi_graph):
|
||||
def test_build(basic_graph, complex_graph):
|
||||
"""Test Node's build method"""
|
||||
assert_agent_was_built(basic_graph)
|
||||
assert_agent_was_built(complex_graph)
|
||||
assert_agent_was_built(openapi_graph)
|
||||
|
||||
|
||||
def assert_agent_was_built(graph):
|
||||
|
|
@ -255,14 +258,14 @@ def assert_agent_was_built(graph):
|
|||
|
||||
|
||||
def test_agent_node_build(complex_graph):
|
||||
agent_node = get_node_by_type(complex_graph, AgentNode)
|
||||
agent_node = get_node_by_type(complex_graph, AgentVertex)
|
||||
assert agent_node is not None
|
||||
built_object = agent_node.build()
|
||||
assert built_object is not None
|
||||
|
||||
|
||||
def test_tool_node_build(complex_graph):
|
||||
tool_node = get_node_by_type(complex_graph, ToolNode)
|
||||
tool_node = get_node_by_type(complex_graph, ToolVertex)
|
||||
assert tool_node is not None
|
||||
built_object = tool_node.build()
|
||||
assert built_object is not None
|
||||
|
|
@ -270,7 +273,7 @@ def test_tool_node_build(complex_graph):
|
|||
|
||||
|
||||
def test_chain_node_build(complex_graph):
|
||||
chain_node = get_node_by_type(complex_graph, ChainNode)
|
||||
chain_node = get_node_by_type(complex_graph, ChainVertex)
|
||||
assert chain_node is not None
|
||||
built_object = chain_node.build()
|
||||
assert built_object is not None
|
||||
|
|
@ -278,7 +281,7 @@ def test_chain_node_build(complex_graph):
|
|||
|
||||
|
||||
def test_prompt_node_build(complex_graph):
|
||||
prompt_node = get_node_by_type(complex_graph, PromptNode)
|
||||
prompt_node = get_node_by_type(complex_graph, PromptVertex)
|
||||
assert prompt_node is not None
|
||||
built_object = prompt_node.build()
|
||||
assert built_object is not None
|
||||
|
|
@ -286,7 +289,7 @@ def test_prompt_node_build(complex_graph):
|
|||
|
||||
|
||||
def test_llm_node_build(basic_graph):
|
||||
llm_node = get_node_by_type(basic_graph, LLMNode)
|
||||
llm_node = get_node_by_type(basic_graph, LLMVertex)
|
||||
assert llm_node is not None
|
||||
built_object = llm_node.build()
|
||||
assert built_object is not None
|
||||
|
|
@ -294,7 +297,7 @@ def test_llm_node_build(basic_graph):
|
|||
|
||||
|
||||
def test_toolkit_node_build(openapi_graph):
|
||||
toolkit_node = get_node_by_type(openapi_graph, ToolkitNode)
|
||||
toolkit_node = get_node_by_type(openapi_graph, ToolkitVertex)
|
||||
assert toolkit_node is not None
|
||||
built_object = toolkit_node.build()
|
||||
assert built_object is not None
|
||||
|
|
@ -302,7 +305,7 @@ def test_toolkit_node_build(openapi_graph):
|
|||
|
||||
|
||||
def test_file_tool_node_build(openapi_graph):
|
||||
file_tool_node = get_node_by_type(openapi_graph, FileToolNode)
|
||||
file_tool_node = get_node_by_type(openapi_graph, FileToolVertex)
|
||||
assert file_tool_node is not None
|
||||
built_object = file_tool_node.build()
|
||||
assert built_object is not None
|
||||
|
|
@ -310,7 +313,7 @@ def test_file_tool_node_build(openapi_graph):
|
|||
|
||||
|
||||
def test_wrapper_node_build(openapi_graph):
|
||||
wrapper_node = get_node_by_type(openapi_graph, WrapperNode)
|
||||
wrapper_node = get_node_by_type(openapi_graph, WrapperVertex)
|
||||
assert wrapper_node is not None
|
||||
built_object = wrapper_node.build()
|
||||
assert built_object is not None
|
||||
|
|
@ -325,7 +328,7 @@ def test_get_result_and_thought(basic_graph):
|
|||
message = "Hello"
|
||||
# Find the node that is an LLMNode and change the
|
||||
# _built_object to a FakeListLLM
|
||||
llm_node = get_node_by_type(basic_graph, LLMNode)
|
||||
llm_node = get_node_by_type(basic_graph, LLMVertex)
|
||||
assert llm_node is not None
|
||||
llm_node._built_object = FakeListLLM(responses=responses)
|
||||
llm_node._built = True
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from langflow.settings import settings
|
|||
|
||||
|
||||
def test_llms_settings(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
llms = json_response["llms"]
|
||||
|
|
@ -11,7 +11,7 @@ def test_llms_settings(client: TestClient):
|
|||
|
||||
|
||||
# def test_hugging_face_hub(client: TestClient):
|
||||
# response = client.get("/all")
|
||||
# response = client.get("api/v1/all")
|
||||
# assert response.status_code == 200
|
||||
# json_response = response.json()
|
||||
# language_models = json_response["llms"]
|
||||
|
|
@ -103,7 +103,7 @@ def test_llms_settings(client: TestClient):
|
|||
|
||||
|
||||
def test_openai(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
language_models = json_response["llms"]
|
||||
|
|
@ -333,7 +333,7 @@ def test_openai(client: TestClient):
|
|||
|
||||
|
||||
def test_chat_open_ai(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
language_models = json_response["llms"]
|
||||
|
|
@ -482,3 +482,78 @@ def test_chat_open_ai(client: TestClient):
|
|||
"ChatOpenAI",
|
||||
"BaseLanguageModel",
|
||||
}
|
||||
|
||||
|
||||
# Commenting this out for now, as it requires to activate the nodes
|
||||
# def test_azure_open_ai(client: TestClient):
|
||||
# response = client.get("/all")
|
||||
# assert response.status_code == 200
|
||||
# json_response = response.json()
|
||||
# language_models = json_response["llms"]
|
||||
|
||||
# model = language_models["AzureOpenAI"]
|
||||
# template = model["template"]
|
||||
|
||||
# assert template["model_name"]["show"] is False
|
||||
# assert template["deployment_name"] == {
|
||||
# "required": False,
|
||||
# "placeholder": "",
|
||||
# "show": True,
|
||||
# "multiline": False,
|
||||
# "value": "",
|
||||
# "password": False,
|
||||
# "name": "deployment_name",
|
||||
# "advanced": False,
|
||||
# "type": "str",
|
||||
# "list": False,
|
||||
# }
|
||||
|
||||
|
||||
# def test_azure_chat_open_ai(client: TestClient):
|
||||
# response = client.get("/all")
|
||||
# assert response.status_code == 200
|
||||
# json_response = response.json()
|
||||
# language_models = json_response["llms"]
|
||||
|
||||
# model = language_models["AzureChatOpenAI"]
|
||||
# template = model["template"]
|
||||
|
||||
# assert template["model_name"]["show"] is False
|
||||
# assert template["deployment_name"] == {
|
||||
# "required": False,
|
||||
# "placeholder": "",
|
||||
# "show": True,
|
||||
# "multiline": False,
|
||||
# "value": "",
|
||||
# "password": False,
|
||||
# "name": "deployment_name",
|
||||
# "advanced": False,
|
||||
# "type": "str",
|
||||
# "list": False,
|
||||
# }
|
||||
# assert template["openai_api_type"] == {
|
||||
# "required": False,
|
||||
# "placeholder": "",
|
||||
# "show": False,
|
||||
# "multiline": False,
|
||||
# "value": "azure",
|
||||
# "password": False,
|
||||
# "name": "openai_api_type",
|
||||
# "display_name": "OpenAI API Type",
|
||||
# "advanced": False,
|
||||
# "type": "str",
|
||||
# "list": False,
|
||||
# }
|
||||
# assert template["openai_api_version"] == {
|
||||
# "required": False,
|
||||
# "placeholder": "",
|
||||
# "show": True,
|
||||
# "multiline": False,
|
||||
# "value": "2023-03-15-preview",
|
||||
# "password": False,
|
||||
# "name": "openai_api_version",
|
||||
# "display_name": "OpenAI API Version",
|
||||
# "advanced": False,
|
||||
# "type": "str",
|
||||
# "list": False,
|
||||
# }
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import json
|
|||
|
||||
import pytest
|
||||
from langchain.chains.base import Chain
|
||||
from langflow import load_flow_from_json
|
||||
from langflow.processing.process import load_flow_from_json
|
||||
from langflow.graph import Graph
|
||||
from langflow.utils.payload import get_root_node
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from langflow.settings import settings
|
|||
|
||||
|
||||
def test_prompts_settings(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
prompts = json_response["prompts"]
|
||||
|
|
@ -11,7 +11,7 @@ def test_prompts_settings(client: TestClient):
|
|||
|
||||
|
||||
def test_prompt_template(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
prompts = json_response["prompts"]
|
||||
|
|
@ -89,7 +89,7 @@ def test_prompt_template(client: TestClient):
|
|||
|
||||
|
||||
def test_few_shot_prompt_template(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
prompts = json_response["prompts"]
|
||||
|
|
@ -168,7 +168,7 @@ def test_few_shot_prompt_template(client: TestClient):
|
|||
|
||||
|
||||
def test_zero_shot_prompt(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
prompts = json_response["prompts"]
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from langflow.settings import settings
|
|||
# check that all agents are in settings.agents
|
||||
# are in json_response["agents"]
|
||||
def test_vectorstores_settings(client: TestClient):
|
||||
response = client.get("/all")
|
||||
response = client.get("api/v1/all")
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
vectorstores = json_response["vectorstores"]
|
||||
|
|
|
|||
|
|
@ -5,17 +5,17 @@ from fastapi.testclient import TestClient
|
|||
|
||||
|
||||
def test_websocket_connection(client: TestClient):
|
||||
with client.websocket_connect("/chat/test_client") as websocket:
|
||||
with client.websocket_connect("api/v1/chat/test_client") as websocket:
|
||||
assert websocket.scope["client"] == ["testclient", 50000]
|
||||
assert websocket.scope["path"] == "/chat/test_client"
|
||||
assert websocket.scope["path"] == "/api/v1/chat/test_client"
|
||||
|
||||
|
||||
def test_chat_history(client: TestClient):
|
||||
# Mock the process_graph function to return a specific value
|
||||
with patch("langflow.api.chat_manager.process_graph") as mock_process_graph:
|
||||
with patch("langflow.chat.manager.process_graph") as mock_process_graph:
|
||||
mock_process_graph.return_value = ("Hello, I'm a mock response!", "")
|
||||
|
||||
with client.websocket_connect("/chat/test_client") as websocket:
|
||||
with client.websocket_connect("api/v1/chat/test_client") as websocket:
|
||||
# First message should be the history
|
||||
history = websocket.receive_json()
|
||||
assert history == [] # Empty history
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue