Merge branch 'zustand/io/migration' into cz/fixTestsIo

This commit is contained in:
anovazzi1 2024-02-28 16:22:22 -03:00
commit 6b2409643d
117 changed files with 2999 additions and 2224 deletions

View file

@ -3,7 +3,15 @@ name: lint
on:
push:
branches: [main]
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
pull_request:
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
env:
POETRY_VERSION: "1.7.0"

View file

@ -3,8 +3,16 @@ name: test
on:
push:
branches: [main]
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
pull_request:
branches: [dev]
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
env:
POETRY_VERSION: "1.5.0"

3
.vscode/launch.json vendored
View file

@ -17,6 +17,9 @@
],
"jinja": true,
"justMyCode": true,
"env": {
"LANGFLOW_LOG_LEVEL": "debug"
},
"envFile": "${workspaceFolder}/.env"
},
{

411
poetry.lock generated
View file

@ -418,17 +418,17 @@ files = [
[[package]]
name = "boto3"
version = "1.34.49"
version = "1.34.50"
description = "The AWS SDK for Python"
optional = false
python-versions = ">= 3.8"
files = [
{file = "boto3-1.34.49-py3-none-any.whl", hash = "sha256:ce8d1de03024f52a1810e8d71ad4dba3a5b9bb48b35567191500e3432a9130b4"},
{file = "boto3-1.34.49.tar.gz", hash = "sha256:96b9dc85ce8d52619b56ca7b1ac1423eaf0af5ce132904bcc8aa81396eec2abf"},
{file = "boto3-1.34.50-py3-none-any.whl", hash = "sha256:8d709365231234bc4f0ca98fdf33a25eeebf78072853c6aa3d259f0f5cf09877"},
{file = "boto3-1.34.50.tar.gz", hash = "sha256:290952be7899560039cb0042e8a2354f61a7dead0d0ca8bea6ba901930df0468"},
]
[package.dependencies]
botocore = ">=1.34.49,<1.35.0"
botocore = ">=1.34.50,<1.35.0"
jmespath = ">=0.7.1,<2.0.0"
s3transfer = ">=0.10.0,<0.11.0"
@ -437,13 +437,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
version = "1.34.49"
version = "1.34.50"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">= 3.8"
files = [
{file = "botocore-1.34.49-py3-none-any.whl", hash = "sha256:4ed9d7603a04b5bb5bd5de63b513bc2c8a7e8b1cd0088229c5ceb461161f43b6"},
{file = "botocore-1.34.49.tar.gz", hash = "sha256:d89410bc60673eaff1699f3f1fdcb0e3a5e1f7a6a048c0d88c3ce5c3549433ec"},
{file = "botocore-1.34.50-py3-none-any.whl", hash = "sha256:fda510559dbe796eefdb59561cc81be1b99afba3dee53fd23db9a3d587adc0ab"},
{file = "botocore-1.34.50.tar.gz", hash = "sha256:33ab82cb96c4bb684f0dbafb071808e4817d83debc88b223e7d988256370c6d7"},
]
[package.dependencies]
@ -549,6 +549,20 @@ files = [
{file = "Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724"},
]
[[package]]
name = "bs4"
version = "0.0.2"
description = "Dummy package for Beautiful Soup (beautifulsoup4)"
optional = false
python-versions = "*"
files = [
{file = "bs4-0.0.2-py2.py3-none-any.whl", hash = "sha256:abf8742c0805ef7f662dce4b51cca104cffe52b835238afc169142ab9b3fbccc"},
{file = "bs4-0.0.2.tar.gz", hash = "sha256:a48685c58f50fe127722417bae83fe6badf500d54b55f7e39ffe43b798653925"},
]
[package.dependencies]
beautifulsoup4 = "*"
[[package]]
name = "build"
version = "1.0.3"
@ -575,13 +589,13 @@ virtualenv = ["virtualenv (>=20.0.35)"]
[[package]]
name = "cachetools"
version = "5.3.2"
version = "5.3.3"
description = "Extensible memoizing collections and decorators"
optional = false
python-versions = ">=3.7"
files = [
{file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"},
{file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"},
{file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"},
{file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"},
]
[[package]]
@ -2893,13 +2907,13 @@ files = [
[[package]]
name = "ipykernel"
version = "6.29.2"
version = "6.29.3"
description = "IPython Kernel for Jupyter"
optional = false
python-versions = ">=3.8"
files = [
{file = "ipykernel-6.29.2-py3-none-any.whl", hash = "sha256:50384f5c577a260a1d53f1f59a828c7266d321c9b7d00d345693783f66616055"},
{file = "ipykernel-6.29.2.tar.gz", hash = "sha256:3bade28004e3ff624ed57974948116670604ac5f676d12339693f3142176d3f0"},
{file = "ipykernel-6.29.3-py3-none-any.whl", hash = "sha256:5aa086a4175b0229d4eca211e181fb473ea78ffd9869af36ba7694c947302a21"},
{file = "ipykernel-6.29.3.tar.gz", hash = "sha256:e14c250d1f9ea3989490225cc1a542781b095a18a19447fcf2b5eaf7d0ac5bd2"},
]
[package.dependencies]
@ -2922,7 +2936,7 @@ cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"]
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"]
pyqt5 = ["pyqt5"]
pyside6 = ["pyside6"]
test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (==0.23.4)", "pytest-cov", "pytest-timeout"]
test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"]
[[package]]
name = "ipython"
@ -3433,13 +3447,13 @@ llama-index = ["llama-index (>=0.10.6,<0.11.0)"]
[[package]]
name = "langsmith"
version = "0.1.8"
version = "0.1.9"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "langsmith-0.1.8-py3-none-any.whl", hash = "sha256:f4320fd80ec9d311a648e7d4c44e0814e6e5454772c5026f40db0307bc07e287"},
{file = "langsmith-0.1.8.tar.gz", hash = "sha256:ab5f1cdfb7d418109ea506d41928fb8708547db2f6c7f7da7cfe997f3c55767b"},
{file = "langsmith-0.1.9-py3-none-any.whl", hash = "sha256:f821b3cb07a87eac5cb2181ff0b61051811e4eef09ae4b46e700981f7ae5dfb9"},
{file = "langsmith-0.1.9.tar.gz", hash = "sha256:9bd3e80607722c3d2db84cf3440005491a859b80b5e499bc988032d5c2da91f0"},
]
[package.dependencies]
@ -3488,13 +3502,142 @@ test = ["httpx (>=0.24.1)", "pytest (>=7.4.0)", "scipy (>=1.10)"]
[[package]]
name = "llama-index"
version = "0.10.13.post1"
description = "Interface between LLMs and your data"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index-0.10.13.post1-py3-none-any.whl", hash = "sha256:3a1281eb5b5505d3c4b5d8da036561e267c5b9311bd3ddbeeab3e1eeb92df86a"},
{file = "llama_index-0.10.13.post1.tar.gz", hash = "sha256:55a8bb34b4f538fb33f6db914d89ad2dbc7ae5e0ec24d8bc4238ed05ff502ee2"},
]
[package.dependencies]
llama-index-agent-openai = ">=0.1.4,<0.2.0"
llama-index-cli = ">=0.1.2,<0.2.0"
llama-index-core = ">=0.10.13,<0.11.0"
llama-index-embeddings-openai = ">=0.1.5,<0.2.0"
llama-index-indices-managed-llama-cloud = ">=0.1.2,<0.2.0"
llama-index-legacy = ">=0.9.48,<0.10.0"
llama-index-llms-openai = ">=0.1.5,<0.2.0"
llama-index-multi-modal-llms-openai = ">=0.1.3,<0.2.0"
llama-index-program-openai = ">=0.1.3,<0.2.0"
llama-index-question-gen-openai = ">=0.1.2,<0.2.0"
llama-index-readers-file = ">=0.1.4,<0.2.0"
llama-index-readers-llama-parse = ">=0.1.2,<0.2.0"
[[package]]
name = "llama-index-agent-openai"
version = "0.1.5"
description = "llama-index agent openai integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_agent_openai-0.1.5-py3-none-any.whl", hash = "sha256:1ab06fe853d9d391ba724dcb0009b249ae88ca4de4b5842226373b0c55ee435a"},
{file = "llama_index_agent_openai-0.1.5.tar.gz", hash = "sha256:42099326d526af140493c5f744ef70bef0aed8a941b6c9aea4b3eff9c63f0ba6"},
]
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
llama-index-llms-openai = ">=0.1.5,<0.2.0"
[[package]]
name = "llama-index-cli"
version = "0.1.5"
description = "llama-index cli"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_cli-0.1.5-py3-none-any.whl", hash = "sha256:a0fcfc3239d8b05158558423ca5c1a426d2a455eab44128b2b786cab566f74ad"},
{file = "llama_index_cli-0.1.5.tar.gz", hash = "sha256:e2493ff7ecfd1983fd15c28c6c0c7bfdba66662c1d8960f6aea229db3d7fafda"},
]
[package.dependencies]
llama-index-core = ">=0.10.11.post1,<0.11.0"
llama-index-embeddings-openai = ">=0.1.1,<0.2.0"
llama-index-llms-openai = ">=0.1.1,<0.2.0"
llama-index-vector-stores-chroma = ">=0.1.1,<0.2.0"
[[package]]
name = "llama-index-core"
version = "0.10.13"
description = "Interface between LLMs and your data"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_core-0.10.13-py3-none-any.whl", hash = "sha256:40c76fc02be7cd948a333ca541f2ff38cf02774e1c960674e2b68c61943bac90"},
{file = "llama_index_core-0.10.13.tar.gz", hash = "sha256:826fded00767923fba8aca94f46c32b259e8879f517016ab7a3801b1b37187a1"},
]
[package.dependencies]
aiohttp = ">=3.8.6,<4.0.0"
dataclasses-json = "*"
deprecated = ">=1.2.9.3"
dirtyjson = ">=1.0.8,<2.0.0"
fsspec = ">=2023.5.0"
httpx = "*"
llamaindex-py-client = ">=0.1.13,<0.2.0"
nest-asyncio = ">=1.5.8,<2.0.0"
networkx = ">=3.0"
nltk = ">=3.8.1,<4.0.0"
numpy = "*"
openai = ">=1.1.0"
pandas = "*"
pillow = ">=9.0.0"
PyYAML = ">=6.0.1"
requests = ">=2.31.0"
SQLAlchemy = {version = ">=1.4.49", extras = ["asyncio"]}
tenacity = ">=8.2.0,<9.0.0"
tiktoken = ">=0.3.3"
tqdm = ">=4.66.1,<5.0.0"
typing-extensions = ">=4.5.0"
typing-inspect = ">=0.8.0"
[package.extras]
gradientai = ["gradientai (>=1.4.0)"]
html = ["beautifulsoup4 (>=4.12.2,<5.0.0)"]
langchain = ["langchain (>=0.0.303)"]
local-models = ["optimum[onnxruntime] (>=1.13.2,<2.0.0)", "sentencepiece (>=0.1.99,<0.2.0)", "transformers[torch] (>=4.33.1,<5.0.0)"]
postgres = ["asyncpg (>=0.28.0,<0.29.0)", "pgvector (>=0.1.0,<0.2.0)", "psycopg2-binary (>=2.9.9,<3.0.0)"]
query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "lm-format-enforcer (>=0.4.3,<0.5.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "scikit-learn", "spacy (>=3.7.1,<4.0.0)"]
[[package]]
name = "llama-index-embeddings-openai"
version = "0.1.6"
description = "llama-index embeddings openai integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_embeddings_openai-0.1.6-py3-none-any.whl", hash = "sha256:f8b2dded0718e9f57c08ce352d186941e6acf7de414c64219210b66f7a6d6d2d"},
{file = "llama_index_embeddings_openai-0.1.6.tar.gz", hash = "sha256:f12f0ef6f92211efe1a022a97bb68fc8731c93bd20df3b0567dba69c610033db"},
]
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
[[package]]
name = "llama-index-indices-managed-llama-cloud"
version = "0.1.3"
description = "llama-index indices llama-cloud integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_indices_managed_llama_cloud-0.1.3-py3-none-any.whl", hash = "sha256:9fe2823855f00bf8b091be008ce953b9a9c5d4b2d976b54ab0d37877c83457f5"},
{file = "llama_index_indices_managed_llama_cloud-0.1.3.tar.gz", hash = "sha256:5db725cb7db675019dc65e38153890802e2ae89838c127c19d3184efc46ea28b"},
]
[package.dependencies]
llama-index-core = ">=0.10.0,<0.11.0"
llamaindex-py-client = ">=0.1.13,<0.2.0"
[[package]]
name = "llama-index-legacy"
version = "0.9.48"
description = "Interface between LLMs and your data"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index-0.9.48-py3-none-any.whl", hash = "sha256:56aa406d39e7ca53a5d990b55d69901fbb9eddc9af6a40950367dc5d734f6283"},
{file = "llama_index-0.9.48.tar.gz", hash = "sha256:c50d02ac8c7e4ff9fb41f0860391fe0020ad8a3d7c30048db52d17d8be654bf3"},
{file = "llama_index_legacy-0.9.48-py3-none-any.whl", hash = "sha256:714ada95beac179b4acefa4d2deff74bb7b2f22b0f699ac247d4cb67738d16d4"},
{file = "llama_index_legacy-0.9.48.tar.gz", hash = "sha256:82ddc4691edbf49533d65582c249ba22c03fe96fbd3e92f7758dccef28e43834"},
]
[package.dependencies]
@ -3525,6 +3668,146 @@ local-models = ["optimum[onnxruntime] (>=1.13.2,<2.0.0)", "sentencepiece (>=0.1.
postgres = ["asyncpg (>=0.28.0,<0.29.0)", "pgvector (>=0.1.0,<0.2.0)", "psycopg2-binary (>=2.9.9,<3.0.0)"]
query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "lm-format-enforcer (>=0.4.3,<0.5.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "scikit-learn", "spacy (>=3.7.1,<4.0.0)"]
[[package]]
name = "llama-index-llms-openai"
version = "0.1.6"
description = "llama-index llms openai integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_llms_openai-0.1.6-py3-none-any.whl", hash = "sha256:4260ad31c3444e97ec8a8d061cb6dbf1074262b82341a2b69d2b27e8a23efe62"},
{file = "llama_index_llms_openai-0.1.6.tar.gz", hash = "sha256:15530dfa3893b15c5576ebc71e01b77acbf47abd689219436fdf7b6ca567a9fd"},
]
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
[[package]]
name = "llama-index-multi-modal-llms-openai"
version = "0.1.4"
description = "llama-index multi-modal-llms openai integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_multi_modal_llms_openai-0.1.4-py3-none-any.whl", hash = "sha256:03b887d110551d5d5b99b9fd110824e6311f2e31f4d5e67dafd2ee66da32818d"},
{file = "llama_index_multi_modal_llms_openai-0.1.4.tar.gz", hash = "sha256:6a5d6584c33a9d1b06cf5c874c63af2603fc93b660bde481a8c547e876c6e2c3"},
]
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
llama-index-llms-openai = ">=0.1.1,<0.2.0"
[[package]]
name = "llama-index-program-openai"
version = "0.1.4"
description = "llama-index program openai integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_program_openai-0.1.4-py3-none-any.whl", hash = "sha256:cfa8f00f3743d2fc70043e80f7c3925d23b1413a0cc7a72863ad60497a18307d"},
{file = "llama_index_program_openai-0.1.4.tar.gz", hash = "sha256:573e99a2dd16ad3caf382c8ab28d1ac10eb2571bc9481d84a6d89806ad6aa5d4"},
]
[package.dependencies]
llama-index-agent-openai = ">=0.1.1,<0.2.0"
llama-index-core = ">=0.10.1,<0.11.0"
llama-index-llms-openai = ">=0.1.1,<0.2.0"
[[package]]
name = "llama-index-question-gen-openai"
version = "0.1.3"
description = "llama-index question_gen openai integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_question_gen_openai-0.1.3-py3-none-any.whl", hash = "sha256:1f83b49e8b2e665030d1ec8c54687d6985d9fa8426147b64e46628a9e489b302"},
{file = "llama_index_question_gen_openai-0.1.3.tar.gz", hash = "sha256:4486198117a45457d2e036ae60b93af58052893cc7d78fa9b6f47dd47b81e2e1"},
]
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
llama-index-llms-openai = ">=0.1.1,<0.2.0"
llama-index-program-openai = ">=0.1.1,<0.2.0"
[[package]]
name = "llama-index-readers-file"
version = "0.1.6"
description = "llama-index readers file integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_readers_file-0.1.6-py3-none-any.whl", hash = "sha256:f583bd90353a0c0985213af02c97aa2f2f22e702d4311fe719de91382c9ad8dd"},
{file = "llama_index_readers_file-0.1.6.tar.gz", hash = "sha256:d9fc0ca84926d04bd757c57fe87841cd9dbc2606aab5f2ce927deec14aaa1a74"},
]
[package.dependencies]
beautifulsoup4 = ">=4.12.3,<5.0.0"
bs4 = ">=0.0.2,<0.0.3"
llama-index-core = ">=0.10.1,<0.11.0"
pymupdf = ">=1.23.21,<2.0.0"
pypdf = ">=4.0.1,<5.0.0"
[[package]]
name = "llama-index-readers-llama-parse"
version = "0.1.3"
description = "llama-index readers llama-parse integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_readers_llama_parse-0.1.3-py3-none-any.whl", hash = "sha256:f52a06a2765a2ffe6c138cf1703ab1de6249ff069ba62d80b9147e849bbcbc27"},
{file = "llama_index_readers_llama_parse-0.1.3.tar.gz", hash = "sha256:e0ee0c393e10fc80eac644788338bbd2032050c8b8a474f3d0b5ebd08e9867fe"},
]
[package.dependencies]
llama-index-core = ">=0.10.7,<0.11.0"
llama-parse = ">=0.3.3,<0.4.0"
[[package]]
name = "llama-index-vector-stores-chroma"
version = "0.1.4"
description = "llama-index vector_stores chroma integration"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_vector_stores_chroma-0.1.4-py3-none-any.whl", hash = "sha256:f475a450431ee4d9b2915ba9da2112dfdfacaee1ea220b8603720be1c116786c"},
{file = "llama_index_vector_stores_chroma-0.1.4.tar.gz", hash = "sha256:7364f2a3f8a51b83d350da39da7e7046704cfa9c848ebe8fd1c6cb39ad4878f9"},
]
[package.dependencies]
chromadb = ">=0.4.22,<0.5.0"
llama-index-core = ">=0.10.1,<0.11.0"
onnxruntime = ">=1.17.0,<2.0.0"
tokenizers = ">=0.15.1,<0.16.0"
[[package]]
name = "llama-parse"
version = "0.3.4"
description = "Parse files into RAG-Optimized formats."
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_parse-0.3.4-py3-none-any.whl", hash = "sha256:b667c78d4c32fc5d0561e6e3ca6c53648a6701b436f21d0d252cd46774927660"},
{file = "llama_parse-0.3.4.tar.gz", hash = "sha256:5a30569c390ab9089dad66cf2a8c967f8c21d77641deec0a922672df4e16cfa3"},
]
[package.dependencies]
llama-index-core = ">=0.10.7"
[[package]]
name = "llamaindex-py-client"
version = "0.1.13"
description = ""
optional = false
python-versions = ">=3.8,<4.0"
files = [
{file = "llamaindex_py_client-0.1.13-py3-none-any.whl", hash = "sha256:02400c90655da80ae373e0455c829465208607d72462f1898fd383fdfe8dabce"},
{file = "llamaindex_py_client-0.1.13.tar.gz", hash = "sha256:3bd9b435ee0a78171eba412dea5674d813eb5bf36e577d3c7c7e90edc54900d9"},
]
[package.dependencies]
httpx = ">=0.20.0"
pydantic = ">=1.10"
[[package]]
name = "locust"
version = "2.23.1"
@ -3681,6 +3964,24 @@ babel = ["Babel"]
lingua = ["lingua"]
testing = ["pytest"]
[[package]]
name = "markdown"
version = "3.5.2"
description = "Python implementation of John Gruber's Markdown."
optional = false
python-versions = ">=3.8"
files = [
{file = "Markdown-3.5.2-py3-none-any.whl", hash = "sha256:d43323865d89fc0cb9b20c75fc8ad313af307cc087e84b657d9eec768eddeadd"},
{file = "Markdown-3.5.2.tar.gz", hash = "sha256:e1ac7b3dc550ee80e602e71c1d168002f062e49f1b11e26a36264dafd4df2ef8"},
]
[package.dependencies]
importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""}
[package.extras]
docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"]
testing = ["coverage", "pyyaml"]
[[package]]
name = "markdown-it-py"
version = "3.0.0"
@ -3776,22 +4077,21 @@ files = [
[[package]]
name = "marshmallow"
version = "3.20.2"
version = "3.21.0"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.8"
files = [
{file = "marshmallow-3.20.2-py3-none-any.whl", hash = "sha256:c21d4b98fee747c130e6bc8f45c4b3199ea66bc00c12ee1f639f0aeca034d5e9"},
{file = "marshmallow-3.20.2.tar.gz", hash = "sha256:4c1daff273513dc5eb24b219a8035559dc573c8f322558ef85f5438ddd1236dd"},
{file = "marshmallow-3.21.0-py3-none-any.whl", hash = "sha256:e7997f83571c7fd476042c2c188e4ee8a78900ca5e74bd9c8097afa56624e9bd"},
{file = "marshmallow-3.21.0.tar.gz", hash = "sha256:20f53be28c6e374a711a16165fb22a8dc6003e3f7cda1285e3ca777b9193885b"},
]
[package.dependencies]
packaging = ">=17.0"
[package.extras]
dev = ["pre-commit (>=2.4,<4.0)", "pytest", "pytz", "simplejson", "tox"]
docs = ["alabaster (==0.7.15)", "autodocsumm (==0.2.12)", "sphinx (==7.2.6)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"]
lint = ["pre-commit (>=2.4,<4.0)"]
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"]
docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.2.6)", "sphinx-issues (==4.0.0)", "sphinx-version-warning (==1.1.2)"]
tests = ["pytest", "pytz", "simplejson"]
[[package]]
@ -5987,6 +6287,64 @@ snappy = ["python-snappy"]
test = ["pytest (>=7)"]
zstd = ["zstandard"]
[[package]]
name = "pymupdf"
version = "1.23.25"
description = "A high performance Python library for data extraction, analysis, conversion & manipulation of PDF (and other) documents."
optional = false
python-versions = ">=3.8"
files = [
{file = "PyMuPDF-1.23.25-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:6be2b20fbff40602f673fc8e60fde3e5911397f8ca9ed6aa2d15be94b12cc2c4"},
{file = "PyMuPDF-1.23.25-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:0f6923a44fbeaeefaabb2fa10955dcef3624e8826db661201951f3b3409fed32"},
{file = "PyMuPDF-1.23.25-cp310-none-manylinux2014_aarch64.whl", hash = "sha256:8eeb2e97347586ec293fddaf61e8dfc58d6b2763406e8f7a6e45b560bf9b15a3"},
{file = "PyMuPDF-1.23.25-cp310-none-manylinux2014_x86_64.whl", hash = "sha256:dca46799c152051697c5e88d66c17ba6d0244668d0c4dd8a2ba2d8d3cb745988"},
{file = "PyMuPDF-1.23.25-cp310-none-win32.whl", hash = "sha256:88bfed1bd13ec84869489fc7b97381016cb8b99956073f4c3e8ac8c840bbb15a"},
{file = "PyMuPDF-1.23.25-cp310-none-win_amd64.whl", hash = "sha256:98a78582c8a0c61b372e2bcd63dc61efc873e40b7d1f0b896a195e1a9ef9ffa7"},
{file = "PyMuPDF-1.23.25-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:d7792810634036a745ea3eb3c4ccf2b6adab55ca9644e3352747d2b5aa5327f9"},
{file = "PyMuPDF-1.23.25-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:03bd1985b0234c3d2b8e26bb3e9ab1d2641dbada1e199b838a6bf884f35224c8"},
{file = "PyMuPDF-1.23.25-cp311-none-manylinux2014_aarch64.whl", hash = "sha256:638fcb1f7551eb5ab582e412e204e8ded94acbbc37bc7f1e891a5dfc428881ee"},
{file = "PyMuPDF-1.23.25-cp311-none-manylinux2014_x86_64.whl", hash = "sha256:067c88b4e6609cb7e74d98d0b0a35c11eb8e29f4fc51dc7ed1dd448b81d347c7"},
{file = "PyMuPDF-1.23.25-cp311-none-win32.whl", hash = "sha256:a694f160d1701285cf3152951430740878d168511cd9ea0a3adcfaf3cac00322"},
{file = "PyMuPDF-1.23.25-cp311-none-win_amd64.whl", hash = "sha256:514bcb679926b33413637b0bd73b223c90fb0d19352caf3395d0f23b1d47e8af"},
{file = "PyMuPDF-1.23.25-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:bba342321e1b5574631894d7d34ec046605d953a23553b7d2f9c0e4d3c27254b"},
{file = "PyMuPDF-1.23.25-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:b2cb058c8229f9697deebe0574f7d95e4b9a5e295ceafd554346bbd464141e89"},
{file = "PyMuPDF-1.23.25-cp312-none-manylinux2014_aarch64.whl", hash = "sha256:2479473b533936593428ce78499a1e9901570110ac602f03f1f3174efa0fa6a8"},
{file = "PyMuPDF-1.23.25-cp312-none-manylinux2014_x86_64.whl", hash = "sha256:a247a4be1e43a6127ee305eae9f65767ee7519a2aa0cb1a2aa6acfd4e7fe7a9b"},
{file = "PyMuPDF-1.23.25-cp312-none-win32.whl", hash = "sha256:b062be400bbaff6e8b17c0a8da9481e01ec935f97967e0870e9aacd7ba60a52a"},
{file = "PyMuPDF-1.23.25-cp312-none-win_amd64.whl", hash = "sha256:b12e608761e1586a65f6e96a34417a91f814dbab29f2929b41d825ab32fab6ef"},
{file = "PyMuPDF-1.23.25-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:ac97691c0e0e23607626d394bd660a46ea33f64921dc9288cf24daee207f9fe3"},
{file = "PyMuPDF-1.23.25-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:c0a16cda5dc9b59d494ae23bdd9c4a3db53d04f2b6390265f5c0fe6269777975"},
{file = "PyMuPDF-1.23.25-cp38-none-manylinux2014_aarch64.whl", hash = "sha256:23d735db51722a889bb50636d161d2747f08fa0b82cc2e4a7eb8e228b25d1c4e"},
{file = "PyMuPDF-1.23.25-cp38-none-manylinux2014_x86_64.whl", hash = "sha256:cbc1407dcf01b2e3e547b2d7643b97cc44c0950d2bb4b12c74322664c5cb37d7"},
{file = "PyMuPDF-1.23.25-cp38-none-win32.whl", hash = "sha256:c29518701d6360beb01c25cf69a77b6426db90a9e7cd11179b3bd783c7fb4cb1"},
{file = "PyMuPDF-1.23.25-cp38-none-win_amd64.whl", hash = "sha256:c1bb6fa9e00c846e6829dec2bee8326754adaef5c80626b99233c01923f0342c"},
{file = "PyMuPDF-1.23.25-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:514b272bfcd897f9ae29384da04167dcdea3b13ce0f2b9099b645314355d037d"},
{file = "PyMuPDF-1.23.25-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:ef345a5b050d0869ef404845075edd5f4bd7fd99e235f4d32ce85f423779a120"},
{file = "PyMuPDF-1.23.25-cp39-none-manylinux2014_aarch64.whl", hash = "sha256:b3ade5b349c38ddffb24f8c266fbcd7161f488c43960ff0f03f977d40d4df967"},
{file = "PyMuPDF-1.23.25-cp39-none-manylinux2014_x86_64.whl", hash = "sha256:111d795a3e840aec2ad66beebd90a5327994ec85ed56fd68312f5463062dbbfa"},
{file = "PyMuPDF-1.23.25-cp39-none-win32.whl", hash = "sha256:2237ce9897771f4af686cc0c81517ffb020fc1a011b95ccf5ccf05383492bd6d"},
{file = "PyMuPDF-1.23.25-cp39-none-win_amd64.whl", hash = "sha256:251c9c321a2112716068d5ae11deedd1911d0387cbdd0ef19adb216a3adf882c"},
{file = "PyMuPDF-1.23.25.tar.gz", hash = "sha256:eb414e92f08107f43576a1fedea28aa837220b15ad58c8e32015435fe96cc03e"},
]
[package.dependencies]
PyMuPDFb = "1.23.22"
[[package]]
name = "pymupdfb"
version = "1.23.22"
description = "MuPDF shared libraries for PyMuPDF."
optional = false
python-versions = ">=3.8"
files = [
{file = "PyMuPDFb-1.23.22-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9085a1e2fbf16f2820f9f7ad3d25e85f81d9b9eb0409110c1670d4cf5a27a678"},
{file = "PyMuPDFb-1.23.22-py3-none-macosx_11_0_arm64.whl", hash = "sha256:01016dd33220cef4ecaf929d09fd27a584dc3ec3e5c9f4112dfe63613ea35135"},
{file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf50e814db91f2a2325219302fbac229a23682c372cf8232aabd51ea3f18210e"},
{file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ffa713ad18e816e584c8a5f569995c32d22f8ac76ab6e4a61f2d2983c4b73d9"},
{file = "PyMuPDFb-1.23.22-py3-none-win32.whl", hash = "sha256:d00e372452845aea624659c302d25e935052269fd3aafe26948301576d6f2ee8"},
{file = "PyMuPDFb-1.23.22-py3-none-win_amd64.whl", hash = "sha256:7c9c157281fdee9f296e666a323307dbf74cb38f017921bb131fa7bfcd39c2bd"},
]
[[package]]
name = "pyparsing"
version = "2.4.7"
@ -8212,6 +8570,7 @@ emoji = "*"
filetype = "*"
langdetect = "*"
lxml = "*"
markdown = {version = "*", optional = true, markers = "extra == \"md\""}
nltk = "*"
numpy = "*"
python-iso639 = "*"
@ -9026,4 +9385,4 @@ local = ["ctransformers", "llama-cpp-python", "sentence-transformers"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<3.12"
content-hash = "1462954b3befc2989ae226f2214111be786eb05bade578c9c80b4ed80d5b59ff"
content-hash = "e34d70b4ca2e9bdab5478d4b0b31dc39379c4506d1cc6962e378090570ce757c"

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.6.7"
version = "0.7.0a0"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -103,9 +103,9 @@ langchain-google-genai = "^0.0.6"
elasticsearch = "^8.12.0"
pytube = "^15.0.0"
python-socketio = "^5.11.0"
llama-index = "0.9.48"
llama-index = "^0.10.13"
langchain-openai = "^0.0.6"
unstructured = "^0.12.4"
unstructured = { extras = ["md"], version = "^0.12.4" }
[tool.poetry.group.dev.dependencies]
pytest-asyncio = "^0.23.1"

View file

@ -3,9 +3,7 @@ from pathlib import Path
from typing import TYPE_CHECKING, List, Optional
from fastapi import HTTPException
from langchain_core.documents import Document
from platformdirs import user_cache_dir
from pydantic import BaseModel
from sqlmodel import Session
from langflow.graph.graph.base import Graph
@ -22,7 +20,9 @@ API_WORDS = ["api", "key", "token"]
def has_api_terms(word: str):
return "api" in word and ("key" in word or ("token" in word and "tokens" not in word))
return "api" in word and (
"key" in word or ("token" in word and "tokens" not in word)
)
def remove_api_keys(flow: dict):
@ -32,7 +32,11 @@ def remove_api_keys(flow: dict):
node_data = node.get("data").get("node")
template = node_data.get("template")
for value in template.values():
if isinstance(value, dict) and has_api_terms(value["name"]) and value.get("password"):
if (
isinstance(value, dict)
and has_api_terms(value["name"])
and value.get("password")
):
value["value"] = None
return flow
@ -53,7 +57,9 @@ def build_input_keys_response(langchain_object, artifacts):
input_keys_response["input_keys"][key] = value
# If the object has memory, that memory will have a memory_variables attribute
# memory variables should be removed from the input keys
if hasattr(langchain_object, "memory") and hasattr(langchain_object.memory, "memory_variables"):
if hasattr(langchain_object, "memory") and hasattr(
langchain_object.memory, "memory_variables"
):
# Remove memory variables from input keys
input_keys_response["input_keys"] = {
key: value
@ -63,7 +69,9 @@ def build_input_keys_response(langchain_object, artifacts):
# Add memory variables to memory_keys
input_keys_response["memory_keys"] = langchain_object.memory.memory_variables
if hasattr(langchain_object, "prompt") and hasattr(langchain_object.prompt, "template"):
if hasattr(langchain_object, "prompt") and hasattr(
langchain_object.prompt, "template"
):
input_keys_response["template"] = langchain_object.prompt.template
return input_keys_response
@ -98,7 +106,11 @@ def raw_frontend_data_is_valid(raw_frontend_data):
def is_valid_data(frontend_node, raw_frontend_data):
"""Check if the data is valid for processing."""
return frontend_node and "template" in frontend_node and raw_frontend_data_is_valid(raw_frontend_data)
return (
frontend_node
and "template" in frontend_node
and raw_frontend_data_is_valid(raw_frontend_data)
)
def update_template_values(frontend_template, raw_template):
@ -138,7 +150,9 @@ def get_file_path_value(file_path):
# If the path is not in the cache dir, return empty string
# This is to prevent access to files outside the cache dir
# If the path is not a file, return empty string
if not path.exists() or not str(path).startswith(user_cache_dir("langflow", "langflow")):
if not path.exists() or not str(path).startswith(
user_cache_dir("langflow", "langflow")
):
return ""
return file_path
@ -169,7 +183,9 @@ async def check_langflow_version(component: StoreComponentCreate):
langflow_version = get_lf_version_from_pypi()
if langflow_version is None:
raise HTTPException(status_code=500, detail="Unable to verify the latest version of Langflow")
raise HTTPException(
status_code=500, detail="Unable to verify the latest version of Langflow"
)
elif langflow_version != component.last_tested_version:
warnings.warn(
f"Your version of Langflow ({component.last_tested_version}) is outdated. "
@ -199,20 +215,6 @@ def format_elapsed_time(elapsed_time: float) -> str:
return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}"
def serialize_field(value):
"""Unified serialization function for handling both BaseModel and Document types,
including handling lists of these types."""
if isinstance(value, (list, tuple)):
return [serialize_field(v) for v in value]
elif isinstance(value, Document):
return value.to_json()
elif isinstance(value, BaseModel):
return value.model_dump()
elif isinstance(value, str):
return {"result": value}
return value
def build_and_cache_graph(
flow_id: str,
session: Session,
@ -230,3 +232,25 @@ def build_and_cache_graph(
graph = graph.update(other_graph)
chat_service.set_cache(flow_id, graph)
return graph
def format_syntax_error_message(exc: SyntaxError) -> str:
"""Format a SyntaxError message for returning to the frontend."""
return f"Syntax error in code. Error on line {exc.lineno}: {exc.text.strip()}"
def get_causing_exception(exc: Exception) -> Exception:
"""Get the causing exception from an exception."""
if hasattr(exc, "__cause__") and exc.__cause__:
return get_causing_exception(exc.__cause__)
return exc
def format_exception_message(exc: Exception) -> str:
"""Format an exception message for returning to the frontend."""
# We need to check if the __cause__ is a SyntaxError
# If it is, we need to return the message of the SyntaxError
causing_exception = get_causing_exception(exc)
if isinstance(causing_exception, SyntaxError):
return format_syntax_error_message(causing_exception)
return str(exc)

View file

@ -1,5 +1,6 @@
import time
from typing import Optional
import uuid
from typing import TYPE_CHECKING, Optional
from fastapi import (
APIRouter,
@ -14,9 +15,13 @@ from fastapi.responses import StreamingResponse
from loguru import logger
from sqlmodel import Session
from langflow.api.utils import build_and_cache_graph, format_elapsed_time
from langflow.api.utils import (
build_and_cache_graph,
format_elapsed_time,
format_exception_message,
)
from langflow.api.v1.schemas import (
ResultData,
ResultDataResponse,
StreamData,
VertexBuildResponse,
VerticesOrderResponse,
@ -27,8 +32,12 @@ from langflow.services.auth.utils import (
get_current_user_for_websocket,
)
from langflow.services.chat.service import ChatService
from langflow.services.deps import get_chat_service, get_session
from langflow.services.deps import get_chat_service, get_session, get_session_service
from langflow.services.monitor.utils import log_vertex_build
from langflow.services.session.service import SessionService
if TYPE_CHECKING:
from langflow.graph.vertex.types import ChatVertex
router = APIRouter(tags=["Chat"])
@ -45,9 +54,13 @@ async def chat(
user = await get_current_user_for_websocket(websocket, db)
await websocket.accept()
if not user:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
await websocket.close(
code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized"
)
elif not user.is_active:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
await websocket.close(
code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized"
)
if client_id in chat_service.cache_service:
await chat_service.handle_websocket(client_id, websocket)
@ -63,7 +76,9 @@ async def chat(
logger.error(f"Error in chat websocket: {exc}")
messsage = exc.detail if isinstance(exc, HTTPException) else str(exc)
if "Could not validate credentials" in str(exc):
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
await websocket.close(
code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized"
)
else:
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=messsage)
@ -110,7 +125,8 @@ async def get_vertices(
# Now vertices is a list of lists
# We need to get the id of each vertex
# and return the same structure but only with the ids
return VerticesOrderResponse(ids=vertices)
run_id = uuid.uuid4()
return VerticesOrderResponse(ids=vertices, run_id=run_id)
except Exception as exc:
logger.error(f"Error checking build status: {exc}")
@ -133,30 +149,23 @@ async def build_vertex(
cache = chat_service.get_cache(flow_id)
if not cache:
# If there's no cache
logger.warning(f"No cache found for {flow_id}. Building graph starting at {vertex_id}")
graph = build_and_cache_graph(flow_id=flow_id, session=next(get_session()), chat_service=chat_service)
logger.warning(
f"No cache found for {flow_id}. Building graph starting at {vertex_id}"
)
graph = build_and_cache_graph(
flow_id=flow_id, session=next(get_session()), chat_service=chat_service
)
else:
graph = cache.get("result")
result_dict = {}
result_data_response = {}
duration = ""
vertex = graph.get_vertex(vertex_id)
try:
if not vertex.pinned or not vertex._built:
await vertex.build(user_id=current_user.id)
params = vertex._built_object_repr()
valid = True
result_dict = vertex.get_built_result()
# We need to set the artifacts to pass information
# to the frontend
vertex.set_artifacts()
artifacts = vertex.artifacts
result_dict = ResultData(
results=result_dict,
artifacts=artifacts,
)
vertex.set_result(result_dict)
elif vertex.result is not None:
if vertex.result is not None:
params = vertex._built_object_repr()
valid = True
result_dict = vertex.result
@ -164,30 +173,34 @@ async def build_vertex(
else:
raise ValueError(f"No result found for vertex {vertex_id}")
result_data_response = ResultDataResponse(**result_dict.model_dump())
except Exception as exc:
params = str(exc)
logger.error(f"Error building vertex: {exc}")
params = format_exception_message(exc)
valid = False
result_dict = ResultData(results={})
result_data_response = ResultDataResponse(results={})
artifacts = {}
# If there's an error building the vertex
# we need to clear the cache
chat_service.clear_cache(flow_id)
# Log the vertex build
background_tasks.add_task(
log_vertex_build,
flow_id=flow_id,
vertex_id=vertex_id,
valid=valid,
params=params,
data=result_dict,
artifacts=artifacts,
)
if not vertex.will_stream:
background_tasks.add_task(
log_vertex_build,
flow_id=flow_id,
vertex_id=vertex_id,
valid=valid,
params=params,
data=result_data_response,
artifacts=artifacts,
)
timedelta = time.perf_counter() - start_time
duration = format_elapsed_time(timedelta)
result_dict.duration = duration
result_dict.timedelta = timedelta
result_data_response.duration = duration
result_data_response.timedelta = timedelta
vertex.add_build_time(timedelta)
inactive_vertices = None
if graph.inactive_vertices:
@ -200,7 +213,7 @@ async def build_vertex(
valid=valid,
params=params,
id=vertex.id,
data=result_dict,
data=result_data_response,
)
except Exception as exc:
logger.error(f"Error building vertex: {exc}")
@ -215,41 +228,71 @@ async def build_vertex(
async def build_vertex_stream(
flow_id: str,
vertex_id: str,
session_id: Optional[str] = None,
chat_service: "ChatService" = Depends(get_chat_service),
session_service: "SessionService" = Depends(get_session_service),
):
"""Build a vertex instead of the entire graph."""
try:
async def stream_vertex():
try:
cache = chat_service.get_cache(flow_id)
if not cache:
# If there's no cache
raise ValueError(f"No cache found for {flow_id}.")
if not session_id:
cache = chat_service.get_cache(flow_id)
if not cache:
# If there's no cache
raise ValueError(f"No cache found for {flow_id}.")
else:
graph = cache.get("result")
else:
graph = cache.get("result")
session_data = await session_service.load_session(session_id)
graph, artifacts = session_data if session_data else (None, None)
if not graph:
raise ValueError(f"No graph found for {flow_id}.")
vertex = graph.get_vertex(vertex_id)
if not vertex.pinned or not vertex._built:
vertex: "ChatVertex" = graph.get_vertex(vertex_id)
if not hasattr(vertex, "stream"):
raise ValueError(f"Vertex {vertex_id} does not support streaming")
if isinstance(vertex._built_result, str) and vertex._built_result:
stream_data = StreamData(
event="message",
data={"message": "Building vertex"},
data={"message": f"Streaming vertex {vertex_id}"},
)
yield str(stream_data)
stream_data = StreamData(
event="message",
data={"chunk": vertex._built_result},
)
yield str(stream_data)
elif not vertex.pinned or not vertex._built:
logger.debug(f"Streaming vertex {vertex_id}")
stream_data = StreamData(
event="message",
data={"message": f"Streaming vertex {vertex_id}"},
)
yield str(stream_data)
async for chunk in vertex.stream():
stream_data = StreamData(
event="message",
data={"chunk": chunk},
)
yield str(stream_data)
elif vertex.result is not None:
stream_data = StreamData(
event="message",
data={"chunk": vertex._built_result},
)
yield str(stream_data)
else:
raise ValueError(f"No result found for vertex {vertex_id}")
except Exception as exc:
logger.error(f"Error building vertex: {exc}")
yield str(StreamData(event="error", data={"error": str(exc)}))
yield str(StreamData(event="close", data={"message": "Stream closed"}))
finally:
logger.debug("Closing stream")
yield str(StreamData(event="close", data={"message": "Stream closed"}))
return StreamingResponse(stream_vertex(), media_type="text/event-stream")
except Exception as exc:

View file

@ -3,11 +3,15 @@ from typing import Annotated, Any, List, Optional, Union
import sqlalchemy as sa
from fastapi import APIRouter, Body, Depends, HTTPException, UploadFile, status
from loguru import logger
from sqlmodel import select
from langflow.api.utils import update_frontend_node_with_template_values
from langflow.api.v1.schemas import (
CustomComponentCode,
PreloadResponse,
ProcessResponse,
RunResponse,
TaskResponse,
TaskStatusResponse,
UploadFileResponse,
@ -15,15 +19,23 @@ from langflow.api.v1.schemas import (
from langflow.interface.custom.custom_component import CustomComponent
from langflow.interface.custom.directory_reader import DirectoryReader
from langflow.interface.custom.utils import build_custom_component_template
from langflow.processing.process import build_graph_and_generate_result, process_graph_cached, process_tweaks
from langflow.processing.process import (
build_graph_and_generate_result,
process_graph_cached,
process_tweaks,
run_graph,
)
from langflow.services.auth.utils import api_key_security, get_current_active_user
from langflow.services.cache.utils import save_uploaded_file
from langflow.services.database.models.flow import Flow
from langflow.services.database.models.user.model import User
from langflow.services.deps import get_session, get_session_service, get_settings_service, get_task_service
from langflow.services.deps import (
get_session,
get_session_service,
get_settings_service,
get_task_service,
)
from langflow.services.session.service import SessionService
from loguru import logger
from sqlmodel import select
try:
from langflow.worker import process_graph_cached_task
@ -33,9 +45,10 @@ except ImportError:
raise NotImplementedError("Celery is not installed")
from langflow.services.task.service import TaskService
from sqlmodel import Session
from langflow.services.task.service import TaskService
# build router
router = APIRouter(tags=["Base"])
@ -80,9 +93,15 @@ async def process_graph_data(
)
if session_id is None:
# Generate a session ID
session_id = get_session_service().generate_key(session_id=session_id, data_graph=graph_data)
session_id = get_session_service().generate_key(
session_id=session_id, data_graph=graph_data
)
task_id, task = await task_service.launch_task(
process_graph_cached_task if task_service.use_celery else process_graph_cached,
(
process_graph_cached_task
if task_service.use_celery
else process_graph_cached
),
graph_data,
inputs,
clear_cache,
@ -176,7 +195,11 @@ async def preload_flow(
else:
if session_id is None:
session_id = flow_id
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
flow = session.exec(
select(Flow)
.where(Flow.id == flow_id)
.where(Flow.user_id == api_key_user.id)
).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
@ -197,6 +220,80 @@ async def preload_flow(
raise HTTPException(status_code=500, detail=str(exc)) from exc
@router.post(
"/run/{flow_id}", response_model=RunResponse, response_model_exclude_none=True
)
async def run_flow_with_caching(
session: Annotated[Session, Depends(get_session)],
flow_id: str,
inputs: Optional[Union[List[dict], dict]] = None,
tweaks: Optional[dict] = None,
stream: Annotated[bool, Body(embed=True)] = False, # noqa: F821
session_id: Annotated[Union[None, str], Body(embed=True)] = None, # noqa: F821
api_key_user: User = Depends(api_key_security),
session_service: SessionService = Depends(get_session_service),
):
try:
if session_id:
session_data = await session_service.load_session(session_id)
graph, artifacts = session_data if session_data else (None, None)
task_result: Any = None
if not graph:
raise ValueError("Graph not found in the session")
task_result, session_id = await run_graph(
graph=graph,
flow_id=flow_id,
session_id=session_id,
inputs=inputs,
artifacts=artifacts,
session_service=session_service,
stream=stream,
)
else:
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = session.exec(
select(Flow)
.where(Flow.id == flow_id)
.where(Flow.user_id == api_key_user.id)
).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
graph_data = process_tweaks(graph_data, tweaks)
task_result, session_id = await run_graph(
graph=graph_data,
flow_id=flow_id,
session_id=session_id,
inputs=inputs,
artifacts={},
session_service=session_service,
stream=stream,
)
return RunResponse(outputs=task_result, session_id=session_id)
except sa.exc.StatementError as exc:
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
if "badly formed hexadecimal UUID string" in str(exc):
# This means the Flow ID is not a valid UUID which means it can't find the flow
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)
) from exc
except ValueError as exc:
if f"Flow {flow_id} not found" in str(exc):
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)
) from exc
else:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)
) from exc
@router.post(
"/predict/{flow_id}",
response_model=ProcessResponse,
@ -269,7 +366,11 @@ async def process(
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
flow = session.exec(
select(Flow)
.where(Flow.id == flow_id)
.where(Flow.user_id == api_key_user.id)
).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
@ -289,12 +390,18 @@ async def process(
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
if "badly formed hexadecimal UUID string" in str(exc):
# This means the Flow ID is not a valid UUID which means it can't find the flow
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)
) from exc
except ValueError as exc:
if f"Flow {flow_id} not found" in str(exc):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)
) from exc
else:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)) from exc
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)
) from exc
except Exception as e:
# Log stack trace
logger.exception(e)
@ -364,12 +471,16 @@ async def custom_component(
built_frontend_node = build_custom_component_template(component, user_id=user.id)
built_frontend_node = update_frontend_node_with_template_values(built_frontend_node, raw_code.frontend_node)
built_frontend_node = update_frontend_node_with_template_values(
built_frontend_node, raw_code.frontend_node
)
return built_frontend_node
@router.post("/custom_component/reload", status_code=HTTPStatus.OK)
async def reload_custom_component(path: str, user: User = Depends(get_current_active_user)):
async def reload_custom_component(
path: str, user: User = Depends(get_current_active_user)
):
from langflow.interface.custom.utils import build_custom_component_template
try:
@ -391,6 +502,8 @@ async def custom_component_update(
):
component = CustomComponent(code=raw_code.code)
component_node = build_custom_component_template(component, user_id=user.id, update_field=raw_code.field)
component_node = build_custom_component_template(
component, user_id=user.id, update_field=raw_code.field
)
# Update the field
return component_node

View file

@ -4,12 +4,12 @@ from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from uuid import UUID
from langflow.api.utils import serialize_field
from pydantic import BaseModel, Field, field_validator, model_serializer
from langflow.services.database.models.api_key.model import ApiKeyRead
from langflow.services.database.models.base import orjson_dumps
from langflow.services.database.models.flow import FlowCreate, FlowRead
from langflow.services.database.models.user import UserRead
from pydantic import BaseModel, Field, field_serializer, field_validator
class BuildStatus(Enum):
@ -66,6 +66,26 @@ class ProcessResponse(BaseModel):
backend: Optional[str] = None
class RunResponse(BaseModel):
"""Run response schema."""
outputs: Optional[List[Any]] = None
session_id: Optional[str] = None
@model_serializer(mode="wrap")
def serialize(self, handler):
# Serialize all the outputs if they are base models
if self.outputs:
serialized_outputs = []
for output in self.outputs:
if isinstance(output, BaseModel):
serialized_outputs.append(output.model_dump(exclude_none=True))
else:
serialized_outputs.append(output)
self.outputs = serialized_outputs
return handler(self)
class PreloadResponse(BaseModel):
"""Preload response schema."""
@ -73,9 +93,6 @@ class PreloadResponse(BaseModel):
is_clear: Optional[bool] = None
# TaskStatusResponse(
# status=task.status, result=task.result if task.ready() else None
# )
class TaskStatusResponse(BaseModel):
"""Task status response schema."""
@ -161,7 +178,9 @@ class StreamData(BaseModel):
data: dict
def __str__(self) -> str:
return f"event: {self.event}\ndata: {orjson_dumps(self.data, indent_2=False)}\n\n"
return (
f"event: {self.event}\ndata: {orjson_dumps(self.data, indent_2=False)}\n\n"
)
class CustomComponentCode(BaseModel):
@ -218,20 +237,15 @@ class ApiKeyCreateRequest(BaseModel):
class VerticesOrderResponse(BaseModel):
ids: List[List[str]]
run_id: UUID
class ResultData(BaseModel):
class ResultDataResponse(BaseModel):
results: Optional[Any] = Field(default_factory=dict)
artifacts: Optional[Any] = Field(default_factory=dict)
timedelta: Optional[float] = None
duration: Optional[str] = None
@field_serializer("results")
def serialize_results(self, value):
if isinstance(value, dict):
return {key: serialize_field(val) for key, val in value.items()}
return serialize_field(value)
class VertexBuildResponse(BaseModel):
id: Optional[str] = None
@ -239,7 +253,7 @@ class VertexBuildResponse(BaseModel):
valid: bool
params: Optional[str]
"""JSON string of the params."""
data: ResultData
data: ResultDataResponse
"""Mapping of vertex ids to result dict containing the param name and result value."""
timestamp: Optional[datetime] = Field(default_factory=datetime.utcnow)
"""Timestamp of the build."""

View file

@ -1,9 +1,9 @@
from typing import Callable, Optional, Union
from typing import Optional
from langchain.chains import ConversationChain
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, BaseMemory, Chain, Text
from langflow.field_typing import BaseLanguageModel, BaseMemory, Text
class ConversationChainComponent(CustomComponent):
@ -23,15 +23,15 @@ class ConversationChainComponent(CustomComponent):
def build(
self,
inputs: str,
input_value: str,
llm: BaseLanguageModel,
memory: Optional[BaseMemory] = None,
) -> Union[Chain, Callable, Text]:
) -> Text:
if memory is None:
chain = ConversationChain(llm=llm)
else:
chain = ConversationChain(llm=llm, memory=memory)
result = chain.invoke(inputs)
result = chain.invoke(input_value)
# result is an AIMessage which is a subclass of BaseMessage
# We need to check if it is a string or a BaseMessage
if hasattr(result, "content") and isinstance(result.content, str):

View file

@ -1,14 +1,15 @@
from typing import Callable, Union
from langchain.chains import LLMCheckerChain
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, Chain
from langflow.field_typing import BaseLanguageModel, Text
class LLMCheckerChainComponent(CustomComponent):
display_name = "LLMCheckerChain"
description = ""
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_checker"
documentation = (
"https://python.langchain.com/docs/modules/chains/additional/llm_checker"
)
def build_config(self):
return {
@ -17,6 +18,12 @@ class LLMCheckerChainComponent(CustomComponent):
def build(
self,
input_value: str,
llm: BaseLanguageModel,
) -> Union[Chain, Callable]:
return LLMCheckerChain.from_llm(llm=llm)
) -> Text:
chain = LLMCheckerChain.from_llm(llm=llm)
response = chain.invoke({chain.input_key: inputs})
result = response.get(chain.output_key)
self.status = result
return result

View file

@ -1,15 +1,17 @@
from typing import Callable, Optional, Union
from typing import Optional
from langchain.chains import LLMChain, LLMMathChain
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, BaseMemory, Chain
from langflow.field_typing import BaseLanguageModel, BaseMemory, Text
class LLMMathChainComponent(CustomComponent):
display_name = "LLMMathChain"
description = "Chain that interprets a prompt and executes python code to do math."
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_math"
documentation = (
"https://python.langchain.com/docs/modules/chains/additional/llm_math"
)
def build_config(self):
return {
@ -22,10 +24,21 @@ class LLMMathChainComponent(CustomComponent):
def build(
self,
input_value: Text,
llm: BaseLanguageModel,
llm_chain: LLMChain,
input_key: str = "question",
output_key: str = "answer",
memory: Optional[BaseMemory] = None,
) -> Union[LLMMathChain, Callable, Chain]:
return LLMMathChain(llm=llm, llm_chain=llm_chain, input_key=input_key, output_key=output_key, memory=memory)
) -> Text:
chain = LLMMathChain(
llm=llm,
llm_chain=llm_chain,
input_key=input_key,
output_key=output_key,
memory=memory,
)
response = chain.invoke({input_key: inputs})
result = response.get(output_key)
self.status = result
return result

View file

@ -20,14 +20,17 @@ class RetrievalQAComponent(CustomComponent):
"input_key": {"display_name": "Input Key", "advanced": True},
"output_key": {"display_name": "Output Key", "advanced": True},
"return_source_documents": {"display_name": "Return Source Documents"},
"inputs": {"display_name": "Input", "input_types": ["Text", "Document"]},
"input_value": {
"display_name": "Input",
"input_types": ["Text", "Document"],
},
}
def build(
self,
combine_documents_chain: BaseCombineDocumentsChain,
retriever: BaseRetriever,
inputs: str = "",
input_value: str = "",
memory: Optional[BaseMemory] = None,
input_key: str = "query",
output_key: str = "result",

View file

@ -26,7 +26,7 @@ class RetrievalQAWithSourcesChainComponent(CustomComponent):
def build(
self,
inputs: str,
input_value: str,
retriever: BaseRetriever,
llm: BaseLanguageModel,
chain_type: str,

View file

@ -28,25 +28,43 @@ class SQLGeneratorComponent(CustomComponent):
def build(
self,
inputs: Text,
input_value: Text,
db: SQLDatabase,
llm: BaseLanguageModel,
top_k: int = 5,
prompt: Optional[PromptTemplate] = None,
prompt: Optional[Text] = None,
) -> Text:
if prompt:
prompt_template = PromptTemplate.from_template(template=prompt)
else:
prompt_template = None
if top_k > 0:
kwargs = {
"k": top_k,
}
if not prompt:
if not prompt_template:
sql_query_chain = create_sql_query_chain(llm=llm, db=db, **kwargs)
else:
template = prompt.template if hasattr(prompt, "template") else prompt
template = (
prompt_template.template
if hasattr(prompt, "template")
else prompt_template
)
# Check if {question} is in the prompt
if "{question}" not in template or "question" not in template.input_variables:
raise ValueError("Prompt must contain `{question}` to be used with Natural Language to SQL.")
sql_query_chain = create_sql_query_chain(llm=llm, db=db, prompt=prompt, **kwargs)
query_writer = sql_query_chain | {"query": lambda x: x.replace("SQLQuery:", "").strip()}
if (
"{question}" not in template
or "question" not in template.input_variables
):
raise ValueError(
"Prompt must contain `{question}` to be used with Natural Language to SQL."
)
sql_query_chain = create_sql_query_chain(
llm=llm, db=db, prompt=prompt_template, **kwargs
)
query_writer = sql_query_chain | {
"query": lambda x: x.replace("SQLQuery:", "").strip()
}
response = query_writer.invoke({"question": inputs})
query = response.get("query")
self.status = query

View file

@ -1,59 +1,26 @@
from typing import Optional
from typing import Optional, Union
from langflow import CustomComponent
from langflow.components.io.base.chat import ChatComponent
from langflow.field_typing import Text
from langflow.schema import Record
class ChatInput(CustomComponent):
class ChatInput(ChatComponent):
display_name = "Chat Input"
description = "Used to get user input from the chat."
def build_config(self):
return {
"message": {
"input_types": ["Text"],
"display_name": "Message",
"multiline": True,
},
"sender": {
"options": ["Machine", "User"],
"display_name": "Sender Type",
},
"sender_name": {"display_name": "Sender Name"},
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
},
"return_record": {
"display_name": "Return Record",
"info": "Return the message as a record containing the sender, sender_name, and session_id.",
},
}
def build(
self,
sender: Optional[str] = "User",
sender_name: Optional[str] = "User",
message: Optional[str] = None,
input_value: Optional[str] = None,
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
) -> Record:
if return_record:
if isinstance(message, Record):
# Update the data of the record
message.data["sender"] = sender
message.data["sender_name"] = sender_name
message.data["session_id"] = session_id
else:
message = Record(
text=message,
data={
"sender": sender,
"sender_name": sender_name,
"session_id": session_id,
},
)
if not message:
message = ""
self.status = message
return message
) -> Union[Text, Record]:
return super().build(
sender=sender,
sender_name=sender_name,
input_value=input_value,
session_id=session_id,
return_record=return_record,
)

View file

@ -1,63 +1,26 @@
from typing import Optional, Union
from langflow import CustomComponent
from langflow.components.io.base.chat import ChatComponent
from langflow.field_typing import Text
from langflow.schema import Record
class ChatOutput(CustomComponent):
class ChatOutput(ChatComponent):
display_name = "Chat Output"
description = "Used to send a message to the chat."
field_config = {
"code": {
"show": True,
}
}
def build_config(self):
return {
"message": {"input_types": ["Text"], "display_name": "Message"},
"sender": {
"options": ["Machine", "User"],
"display_name": "Sender Type",
},
"sender_name": {"display_name": "Sender Name"},
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
"input_types": ["Text"],
},
"return_record": {
"display_name": "Return Record",
"info": "Return the message as a record containing the sender, sender_name, and session_id.",
},
}
def build(
self,
sender: Optional[str] = "Machine",
sender_name: Optional[str] = "AI",
sender: Optional[str] = "User",
sender_name: Optional[str] = "User",
input_value: Optional[str] = None,
session_id: Optional[str] = None,
message: Optional[str] = None,
return_record: Optional[bool] = False,
) -> Union[Text, Record]:
if return_record:
if isinstance(message, Record):
# Update the data of the record
message.data["sender"] = sender
message.data["sender_name"] = sender_name
message.data["session_id"] = session_id
else:
message = Record(
text=message,
data={
"sender": sender,
"sender_name": sender_name,
"session_id": session_id,
},
)
if not message:
message = ""
self.status = message
return message
return super().build(
sender=sender,
sender_name=sender_name,
input_value=input_value,
session_id=session_id,
return_record=return_record,
)

View file

@ -9,11 +9,11 @@ class TextInput(CustomComponent):
description = "Used to pass text input to the next component."
field_config = {
"value": {"display_name": "Value"},
"input_value": {"display_name": "Value", "multiline": True},
}
def build(self, value: Optional[str] = "") -> Text:
self.status = value
if not value:
value = ""
return value
def build(self, input_value: Optional[str] = "") -> Text:
self.status = input_value
if not input_value:
input_value = ""
return input_value

View file

@ -0,0 +1,102 @@
import warnings
from typing import Optional, Union
from langflow import CustomComponent
from langflow.field_typing import Text
from langflow.memory import add_messages
from langflow.schema import Record
class ChatComponent(CustomComponent):
display_name = "Chat Component"
description = "Use as base for chat components."
def build_config(self):
return {
"input_value": {
"input_types": ["Text"],
"display_name": "Message",
"multiline": True,
},
"sender": {
"options": ["Machine", "User"],
"display_name": "Sender Type",
},
"sender_name": {"display_name": "Sender Name"},
"session_id": {
"display_name": "Session ID",
"info": "If provided, the message will be stored in the memory.",
},
"return_record": {
"display_name": "Return Record",
"info": "Return the message as a record containing the sender, sender_name, and session_id.",
},
}
def store_message(
self,
message: Union[Text, Record],
session_id: Optional[str] = None,
sender: Optional[str] = None,
sender_name: Optional[str] = None,
) -> list[Record]:
if not message:
warnings.warn("No message provided.")
return []
if not session_id or not sender or not sender_name:
raise ValueError(
"All of session_id, sender, and sender_name must be provided."
)
if isinstance(message, Record):
record = message
record.data.update(
{
"session_id": session_id,
"sender": sender,
"sender_name": sender_name,
}
)
else:
record = Record(
text=message,
data={
"session_id": session_id,
"sender": sender,
"sender_name": sender_name,
},
)
self.status = record
records = add_messages([record])
return records[0]
def build(
self,
sender: Optional[str] = "User",
sender_name: Optional[str] = "User",
input_value: Optional[str] = None,
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
) -> Union[Text, Record]:
if return_record:
if isinstance(input_value, Record):
# Update the data of the record
input_value.data["sender"] = sender
input_value.data["sender_name"] = sender_name
input_value.data["session_id"] = session_id
else:
input_value = Record(
text=input_value,
data={
"sender": sender,
"sender_name": sender_name,
"session_id": session_id,
},
)
if not input_value:
input_value = ""
self.status = input_value
if session_id:
self.store_message(input_value, session_id, sender, sender_name)
return input_value

View file

@ -2,13 +2,14 @@ from typing import Optional
from langchain_community.chat_models.bedrock import BedrockChat
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class AmazonBedrockComponent(CustomComponent):
class AmazonBedrockComponent(LCModelComponent):
display_name: str = "Amazon Bedrock Model"
description: str = "Generate text using LLM model from Amazon Bedrock."
icon = "AmazonBedrock"
def build_config(self):
return {
@ -34,12 +35,16 @@ class AmazonBedrockComponent(CustomComponent):
"model_kwargs": {"display_name": "Model Kwargs"},
"cache": {"display_name": "Cache"},
"code": {"advanced": True},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
inputs: str,
input_value: str,
model_id: str = "anthropic.claude-instant-v1",
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
@ -47,6 +52,7 @@ class AmazonBedrockComponent(CustomComponent):
endpoint_url: Optional[str] = None,
streaming: bool = False,
cache: Optional[bool] = None,
stream: bool = False,
) -> Text:
try:
output = BedrockChat(
@ -60,7 +66,5 @@ class AmazonBedrockComponent(CustomComponent):
) # type: ignore
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,13 +3,16 @@ from typing import Optional
from langchain_community.chat_models.anthropic import ChatAnthropic
from pydantic.v1 import SecretStr
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class AnthropicLLM(CustomComponent):
class AnthropicLLM(LCModelComponent):
display_name: str = "AnthropicModel"
description: str = "Generate text using Anthropic Chat&Completion large language models."
description: str = (
"Generate text using Anthropic Chat&Completion large language models."
)
icon = "Anthropic"
def build_config(self):
return {
@ -47,17 +50,22 @@ class AnthropicLLM(CustomComponent):
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
model: str,
inputs: str,
input_value: str,
anthropic_api_key: Optional[str] = None,
max_tokens: Optional[int] = None,
temperature: Optional[float] = None,
api_endpoint: Optional[str] = None,
stream: bool = False,
) -> Text:
# Set default API endpoint if not provided
if not api_endpoint:
@ -66,14 +74,14 @@ class AnthropicLLM(CustomComponent):
try:
output = ChatAnthropic(
model_name=model,
anthropic_api_key=(SecretStr(anthropic_api_key) if anthropic_api_key else None),
anthropic_api_key=(
SecretStr(anthropic_api_key) if anthropic_api_key else None
),
max_tokens_to_sample=max_tokens, # type: ignore
temperature=temperature,
anthropic_api_url=api_endpoint,
)
except Exception as e:
raise ValueError("Could not connect to Anthropic API.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,14 +3,17 @@ from typing import Optional
from langchain.llms.base import BaseLanguageModel
from langchain_openai import AzureChatOpenAI
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
class AzureChatOpenAIComponent(CustomComponent):
class AzureChatOpenAIComponent(LCModelComponent):
display_name: str = "AzureOpenAI Model"
description: str = "Generate text using LLM model from Azure OpenAI."
documentation: str = "https://python.langchain.com/docs/integrations/llms/azure_openai"
documentation: str = (
"https://python.langchain.com/docs/integrations/llms/azure_openai"
)
beta = False
icon = "Azure"
AZURE_OPENAI_MODELS = [
"gpt-35-turbo",
@ -71,19 +74,24 @@ class AzureChatOpenAIComponent(CustomComponent):
"info": "Maximum number of tokens to generate.",
},
"code": {"show": False},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
model: str,
azure_endpoint: str,
inputs: str,
input_value: str,
azure_deployment: str,
api_key: str,
api_version: str,
temperature: float = 0.7,
max_tokens: Optional[int] = 1000,
stream: bool = False,
) -> BaseLanguageModel:
try:
output = AzureChatOpenAI(
@ -97,7 +105,5 @@ class AzureChatOpenAIComponent(CustomComponent):
)
except Exception as e:
raise ValueError("Could not connect to AzureOpenAI API.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,16 +3,17 @@ from typing import Optional
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from pydantic.v1 import SecretStr
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class QianfanChatEndpointComponent(CustomComponent):
class QianfanChatEndpointComponent(LCModelComponent):
display_name: str = "QianfanChat Model"
description: str = (
"Generate text using Baidu Qianfan chat models. Get more detail from "
"https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint."
)
icon = "BaiduQianfan"
def build_config(self):
return {
@ -68,12 +69,16 @@ class QianfanChatEndpointComponent(CustomComponent):
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
},
"code": {"show": False},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
inputs: str,
input_value: str,
model: str = "ERNIE-Bot-turbo",
qianfan_ak: Optional[str] = None,
qianfan_sk: Optional[str] = None,
@ -81,6 +86,7 @@ class QianfanChatEndpointComponent(CustomComponent):
temperature: Optional[float] = None,
penalty_score: Optional[float] = None,
endpoint: Optional[str] = None,
stream: bool = False,
) -> Text:
try:
output = QianfanChatEndpoint( # type: ignore
@ -94,7 +100,5 @@ class QianfanChatEndpointComponent(CustomComponent):
)
except Exception as e:
raise ValueError("Could not connect to Baidu Qianfan API.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -2,11 +2,11 @@ from typing import Dict, Optional
from langchain_community.llms.ctransformers import CTransformers
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class CTransformersComponent(CustomComponent):
class CTransformersComponent(LCModelComponent):
display_name = "CTransformersModel"
description = "Generate text using CTransformers LLM models"
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
@ -28,19 +28,24 @@ class CTransformersComponent(CustomComponent):
"field_type": "dict",
"value": '{"top_k":40,"top_p":0.95,"temperature":0.8,"repetition_penalty":1.1,"last_n_tokens":64,"seed":-1,"max_new_tokens":256,"stop":"","stream":"False","reset":"True","batch_size":8,"threads":-1,"context_length":-1,"gpu_layers":0}',
},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
model: str,
model_file: str,
inputs: str,
input_value: str,
model_type: str,
config: Optional[Dict] = None,
stream: Optional[bool] = False,
) -> Text:
output = CTransformers(model=model, model_file=model_file, model_type=model_type, config=config)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
output = CTransformers(
model=model, model_file=model_file, model_type=model_type, config=config
)
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -1,14 +1,16 @@
from langchain_community.chat_models.cohere import ChatCohere
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class CohereComponent(CustomComponent):
class CohereComponent(LCModelComponent):
display_name = "CohereModel"
description = "Generate text using Cohere large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
icon = "Cohere"
def build_config(self):
return {
"cohere_api_key": {
@ -28,23 +30,24 @@ class CohereComponent(CustomComponent):
"type": "float",
"show": True,
},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
cohere_api_key: str,
inputs: str,
input_value: str,
max_tokens: int = 256,
temperature: float = 0.75,
stream: bool = False,
) -> Text:
output = ChatCohere(
cohere_api_key=cohere_api_key,
max_tokens=max_tokens,
temperature=temperature,
)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -1,16 +1,16 @@
from typing import Optional
from langchain_google_genai import ChatGoogleGenerativeAI # type: ignore
from pydantic.v1.types import SecretStr
from langchain_google_genai import ChatGoogleGenerativeAI
from pydantic.v1 import SecretStr
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import RangeSpec, Text
class GoogleGenerativeAIComponent(CustomComponent):
class GoogleGenerativeAIComponent(LCModelComponent):
display_name: str = "Google Generative AIModel"
description: str = "Generate text using Google Generative AI to generate text."
documentation: str = "http://docs.langflow.org/components/custom"
icon = "GoogleGenerativeAI"
def build_config(self):
return {
@ -50,19 +50,24 @@ class GoogleGenerativeAIComponent(CustomComponent):
"code": {
"advanced": True,
},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input", "info": "The input to the model."},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
google_api_key: str,
model: str,
inputs: str,
input_value: str,
max_output_tokens: Optional[int] = None,
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
n: Optional[int] = 1,
stream: bool = False,
) -> Text:
output = ChatGoogleGenerativeAI(
model=model,
@ -73,7 +78,4 @@ class GoogleGenerativeAIComponent(CustomComponent):
n=n or 1,
google_api_key=SecretStr(google_api_key),
)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,14 +3,14 @@ from typing import Optional
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class HuggingFaceEndpointsComponent(CustomComponent):
class HuggingFaceEndpointsComponent(LCModelComponent):
display_name: str = "Hugging Face Inference API models"
description: str = "Generate text using LLM model from Hugging Face Inference API."
icon = "HuggingFace"
def build_config(self):
return {
@ -25,16 +25,21 @@ class HuggingFaceEndpointsComponent(CustomComponent):
"field_type": "code",
},
"code": {"show": False},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
inputs: str,
input_value: str,
endpoint_url: str,
task: str = "text2text-generation",
huggingfacehub_api_token: Optional[str] = None,
model_kwargs: Optional[dict] = None,
stream: bool = False,
) -> Text:
try:
llm = HuggingFaceEndpoint(
@ -46,7 +51,4 @@ class HuggingFaceEndpointsComponent(CustomComponent):
except Exception as e:
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
output = ChatHuggingFace(llm=llm)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -2,11 +2,11 @@ from typing import Any, Dict, List, Optional
from langchain_community.llms.llamacpp import LlamaCpp
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class LlamaCppComponent(CustomComponent):
class LlamaCppComponent(LCModelComponent):
display_name = "LlamaCppModel"
description = "Generate text using llama.cpp model."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
@ -56,13 +56,17 @@ class LlamaCppComponent(CustomComponent):
"use_mmap": {"display_name": "Use Mmap", "advanced": True},
"verbose": {"display_name": "Verbose", "advanced": True},
"vocab_only": {"display_name": "Vocab Only", "advanced": True},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
model_path: str,
inputs: str,
input_value: str,
grammar: Optional[str] = None,
cache: Optional[bool] = None,
client: Optional[Any] = None,
@ -97,6 +101,7 @@ class LlamaCppComponent(CustomComponent):
use_mmap: Optional[bool] = True,
verbose: bool = True,
vocab_only: bool = False,
stream: bool = False,
) -> Text:
output = LlamaCpp(
model_path=model_path,
@ -135,9 +140,5 @@ class LlamaCppComponent(CustomComponent):
verbose=verbose,
vocab_only=vocab_only,
)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,17 +3,19 @@ from typing import Any, Dict, List, Optional
# from langchain_community.chat_models import ChatOllama
from langchain_community.chat_models import ChatOllama
from langflow.components.models.base.model import LCModelComponent
# from langchain.chat_models import ChatOllama
from langflow import CustomComponent
from langflow.field_typing import Text
# whe When a callback component is added to Langflow, the comment must be uncommented.
# from langchain.callbacks.manager import CallbackManager
class ChatOllamaComponent(CustomComponent):
class ChatOllamaComponent(LCModelComponent):
display_name = "ChatOllamaModel"
description = "Generate text using Local LLM for chat with Ollama."
icon = "Ollama"
def build_config(self) -> dict:
return {
@ -164,14 +166,18 @@ class ChatOllamaComponent(CustomComponent):
"info": "Template to use for generating text.",
"advanced": True,
},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
base_url: Optional[str],
model: str,
inputs: str,
input_value: str,
mirostat: Optional[str],
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
@ -197,6 +203,7 @@ class ChatOllamaComponent(CustomComponent):
timeout: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[int] = None,
stream: Optional[bool] = False,
) -> Text:
if not base_url:
base_url = "http://localhost:11434"
@ -250,7 +257,5 @@ class ChatOllamaComponent(CustomComponent):
output = ChatOllama(**llm_params) # type: ignore
except Exception as e:
raise ValueError("Could not initialize Ollama LLM.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -1,17 +1,19 @@
from typing import Optional
from langchain_openai import ChatOpenAI
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import NestedDict, Text
class OpenAIModelComponent(CustomComponent):
class OpenAIModelComponent(LCModelComponent):
display_name = "OpenAI Model"
description = "Generates text using OpenAI's models."
icon = "OpenAI"
def build_config(self):
return {
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": False,
@ -56,21 +58,26 @@ class OpenAIModelComponent(CustomComponent):
"required": False,
"value": 0.7,
},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
inputs: Text,
input_value: Text,
max_tokens: Optional[int] = 256,
model_kwargs: NestedDict = {},
model_name: str = "gpt-4-1106-preview",
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = None,
temperature: float = 0.7,
stream: Optional[bool] = False,
) -> Text:
if not openai_api_base:
openai_api_base = "https://api.openai.com/v1"
model = ChatOpenAI(
output = ChatOpenAI(
max_tokens=max_tokens,
model_kwargs=model_kwargs,
model=model_name,
@ -79,7 +86,4 @@ class OpenAIModelComponent(CustomComponent):
temperature=temperature,
)
message = model.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -2,11 +2,11 @@ from typing import List, Optional
from langchain_core.messages.base import BaseMessage
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class ChatVertexAIComponent(CustomComponent):
class ChatVertexAIComponent(LCModelComponent):
display_name = "ChatVertexAIModel"
description = "Generate text using Vertex AI Chat large language models API."
@ -57,12 +57,16 @@ class ChatVertexAIComponent(CustomComponent):
"value": False,
"advanced": True,
},
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
self,
inputs: str,
input_value: str,
credentials: Optional[str],
project: str,
examples: Optional[List[BaseMessage]] = [],
@ -73,6 +77,7 @@ class ChatVertexAIComponent(CustomComponent):
top_k: int = 40,
top_p: float = 0.95,
verbose: bool = False,
stream: bool = False,
) -> Text:
try:
from langchain_google_vertexai import ChatVertexAI
@ -92,7 +97,5 @@ class ChatVertexAIComponent(CustomComponent):
top_p=top_p,
verbose=verbose,
)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -0,0 +1,28 @@
from langchain_core.runnables import Runnable
from langflow import CustomComponent
class LCModelComponent(CustomComponent):
display_name: str = "Model Name"
description: str = "Model Description"
def get_result(self, output: Runnable, stream: bool, input_value: str):
"""
Retrieves the result from the output of a Runnable object.
Args:
output (Runnable): The output object to retrieve the result from.
stream (bool): Indicates whether to use streaming or invocation mode.
input_value (str): The input value to pass to the output object.
Returns:
The result obtained from the output object.
"""
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -15,7 +15,7 @@ class RunnableExecComponent(CustomComponent):
"display_name": "Input Key",
"info": "The key to use for the input.",
},
"inputs": {
"input_value": {
"display_name": "Inputs",
"info": "The inputs to pass to the runnable.",
},
@ -32,7 +32,7 @@ class RunnableExecComponent(CustomComponent):
def build(
self,
input_key: str,
inputs: str,
input_value: str,
runnable: Runnable,
output_key: str = "output",
) -> Text:

View file

@ -2,12 +2,13 @@ from typing import List, Optional
import chromadb # type: ignore
from langchain_community.vectorstores.chroma import Chroma
from langflow import CustomComponent
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.field_typing import Embeddings, Text
from langflow.schema import Record, docs_to_records
from langflow.schema import Record
class ChromaSearchComponent(CustomComponent):
class ChromaSearchComponent(LCVectorStoreComponent):
"""
A custom component for implementing a Vector Store using Chroma.
"""
@ -25,7 +26,7 @@ class ChromaSearchComponent(CustomComponent):
- dict: A dictionary containing the configuration options for the component.
"""
return {
"inputs": {"display_name": "Input"},
"input_value": {"display_name": "Input"},
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
@ -57,7 +58,7 @@ class ChromaSearchComponent(CustomComponent):
def build(
self,
inputs: Text,
input_value: Text,
search_type: str,
collection_name: str,
embedding: Embeddings,
@ -92,24 +93,19 @@ class ChromaSearchComponent(CustomComponent):
if chroma_server_host is not None:
chroma_settings = chromadb.config.Settings(
chroma_server_cors_allow_origins=chroma_server_cors_allow_origins or None,
chroma_server_cors_allow_origins=chroma_server_cors_allow_origins
or None,
chroma_server_host=chroma_server_host,
chroma_server_port=chroma_server_port or None,
chroma_server_grpc_port=chroma_server_grpc_port or None,
chroma_server_ssl_enabled=chroma_server_ssl_enabled,
)
index_directory = self.resolve_path(index_directory)
chroma = Chroma(
vector_store = Chroma(
embedding_function=embedding,
collection_name=collection_name,
persist_directory=index_directory,
client_settings=chroma_settings,
)
# Validate the inputs
docs = []
if inputs and isinstance(inputs, str):
docs = chroma.search(query=inputs, search_type=search_type.lower())
else:
raise ValueError("Invalid inputs provided.")
return docs_to_records(docs)
return self.search_with_vector_store(input_value, search_type, vector_store)

View file

@ -3,24 +3,36 @@ from typing import List, Union
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.faiss import FAISS
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings
class FAISSComponent(CustomComponent):
display_name = "FAISS"
description = "Construct FAISS wrapper from raw documents."
description = "Ingest documents into FAISS Vector Store."
documentation = "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"folder_path": {
"display_name": "Folder Path",
"info": "Path to save the FAISS index. It will be relative to where Langflow is running.",
},
"index_name": {"display_name": "Index Name"},
}
def build(
self,
embedding: Embeddings,
documents: List[Document],
folder_path: str,
index_name: str = "langflow_index",
) -> Union[VectorStore, FAISS, BaseRetriever]:
return FAISS.from_documents(documents=documents, embedding=embedding)
vector_store = FAISS.from_documents(documents=documents, embedding=embedding)
if not folder_path:
raise ValueError("Folder path is required to save the FAISS index.")
path = self.resolve_path(folder_path)
vector_store.save_local(str(path), index_name)

View file

@ -0,0 +1,45 @@
from typing import List
from langchain_community.vectorstores.faiss import FAISS
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.field_typing import Embeddings
from langflow.schema import Record
class FAISSSearchComponent(LCVectorStoreComponent):
display_name = "FAISS Search"
description = "Search a FAISS Vector Store for similar documents."
documentation = "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"folder_path": {
"display_name": "Folder Path",
"info": "Path to save the FAISS index. It will be relative to where Langflow is running.",
},
"input_value": {"display_name": "Input"},
"index_name": {"display_name": "Index Name"},
}
def build(
self,
input_value: str,
embedding: Embeddings,
folder_path: str,
index_name: str = "langflow_index",
) -> List[Record]:
if not folder_path:
raise ValueError("Folder path is required to save the FAISS index.")
path = self.resolve_path(folder_path)
vector_store = FAISS.load_local(
folder_path=str(path), embeddings=embedding, index_name=index_name
)
if not vector_store:
raise ValueError("Failed to load the FAISS index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type="similarity"
)

View file

@ -0,0 +1,56 @@
from typing import List, Optional
from langchain_community.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings, NestedDict
class MongoDBAtlasComponent(CustomComponent):
display_name = "MongoDB Atlas"
description = (
"Construct a `MongoDB Atlas Vector Search` vector store from raw documents."
)
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"collection_name": {"display_name": "Collection Name"},
"db_name": {"display_name": "Database Name"},
"index_name": {"display_name": "Index Name"},
"mongodb_atlas_cluster_uri": {"display_name": "MongoDB Atlas Cluster URI"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
}
def build(
self,
embedding: Embeddings,
documents: List[Document] = None,
collection_name: str = "",
db_name: str = "",
index_name: str = "",
mongodb_atlas_cluster_uri: str = "",
search_kwargs: Optional[NestedDict] = None,
) -> MongoDBAtlasVectorSearch:
search_kwargs = search_kwargs or {}
if documents:
vector_store = MongoDBAtlasVectorSearch.from_documents(
documents=documents,
embedding=embedding,
collection_name=collection_name,
db_name=db_name,
index_name=index_name,
mongodb_atlas_cluster_uri=mongodb_atlas_cluster_uri,
search_kwargs=search_kwargs,
)
else:
vector_store = MongoDBAtlasVectorSearch(
embedding=embedding,
collection_name=collection_name,
db_name=db_name,
index_name=index_name,
mongodb_atlas_cluster_uri=mongodb_atlas_cluster_uri,
search_kwargs=search_kwargs,
)
return vector_store

View file

@ -1,22 +1,22 @@
from typing import List, Optional
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langflow import CustomComponent
from langflow.field_typing import (
Document,
Embeddings,
NestedDict,
)
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.MongoDBAtlasVector import MongoDBAtlasComponent
from langflow.field_typing import Embeddings, NestedDict
from langflow.schema import Record
class MongoDBAtlasComponent(CustomComponent):
display_name = "MongoDB Atlas"
description = "Construct a `MongoDB Atlas Vector Search` vector store from raw documents."
class MongoDBAtlasSearchComponent(MongoDBAtlasComponent, LCVectorStoreComponent):
display_name = "MongoDB Atlas Search"
description = "Search a MongoDB Atlas Vector Store for similar documents."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"embedding": {"display_name": "Embedding"},
"collection_name": {"display_name": "Collection Name"},
"db_name": {"display_name": "Database Name"},
@ -27,17 +27,16 @@ class MongoDBAtlasComponent(CustomComponent):
def build(
self,
documents: List[Document],
input_value: str,
search_type: str,
embedding: Embeddings,
collection_name: str = "",
db_name: str = "",
index_name: str = "",
mongodb_atlas_cluster_uri: str = "",
search_kwargs: Optional[NestedDict] = None,
) -> MongoDBAtlasVectorSearch:
search_kwargs = search_kwargs or {}
return MongoDBAtlasVectorSearch(
documents=documents,
) -> List[Record]:
vector_store = super().build(
embedding=embedding,
collection_name=collection_name,
db_name=db_name,
@ -45,3 +44,8 @@ class MongoDBAtlasComponent(CustomComponent):
mongodb_atlas_cluster_uri=mongodb_atlas_cluster_uri,
search_kwargs=search_kwargs,
)
if not vector_store:
raise ValueError("Failed to create MongoDB Atlas Vector Store")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
)

View file

@ -5,6 +5,7 @@ import pinecone # type: ignore
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.pinecone import Pinecone
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings
@ -12,6 +13,7 @@ from langflow.field_typing import Document, Embeddings
class PineconeComponent(CustomComponent):
display_name = "Pinecone"
description = "Construct Pinecone wrapper from raw documents."
icon = "Pinecone"
def build_config(self):
return {
@ -19,10 +21,23 @@ class PineconeComponent(CustomComponent):
"embedding": {"display_name": "Embedding"},
"index_name": {"display_name": "Index Name"},
"namespace": {"display_name": "Namespace"},
"pinecone_api_key": {"display_name": "Pinecone API Key", "default": "", "password": True, "required": True},
"pinecone_env": {"display_name": "Pinecone Environment", "default": "", "required": True},
"pinecone_api_key": {
"display_name": "Pinecone API Key",
"default": "",
"password": True,
"required": True,
},
"pinecone_env": {
"display_name": "Pinecone Environment",
"default": "",
"required": True,
},
"search_kwargs": {"display_name": "Search Kwargs", "default": "{}"},
"pool_threads": {"display_name": "Pool Threads", "default": 1, "advanced": True},
"pool_threads": {
"display_name": "Pool Threads",
"default": 1,
"advanced": True,
},
}
def build(

View file

@ -0,0 +1,70 @@
from typing import List, Optional
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.Pinecone import PineconeComponent
from langflow.field_typing import Embeddings
from langflow.schema import Record
class PineconeSearchComponent(PineconeComponent, LCVectorStoreComponent):
display_name = "Pinecone Search"
description = "Search a Pinecone Vector Store for similar documents."
icon = "Pinecone"
def build_config(self):
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"embedding": {"display_name": "Embedding"},
"index_name": {"display_name": "Index Name"},
"namespace": {"display_name": "Namespace"},
"pinecone_api_key": {
"display_name": "Pinecone API Key",
"default": "",
"password": True,
"required": True,
},
"pinecone_env": {
"display_name": "Pinecone Environment",
"default": "",
"required": True,
},
"search_kwargs": {"display_name": "Search Kwargs", "default": "{}"},
"pool_threads": {
"display_name": "Pool Threads",
"default": 1,
"advanced": True,
},
}
def build(
self,
input_value: str,
embedding: Embeddings,
pinecone_env: str,
text_key: str = "text",
pool_threads: int = 4,
index_name: Optional[str] = None,
pinecone_api_key: Optional[str] = None,
namespace: Optional[str] = "default",
search_type: str = "similarity",
) -> List[Record]:
vector_store = super().build(
embedding=embedding,
pinecone_env=pinecone_env,
documents=[],
text_key=text_key,
pool_threads=pool_threads,
index_name=index_name,
pinecone_api_key=pinecone_api_key,
namespace=namespace,
)
if not vector_store:
raise ValueError("Failed to load the Pinecone index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
)

View file

@ -0,0 +1,91 @@
from typing import List, Optional
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.Qdrant import QdrantComponent
from langflow.field_typing import Embeddings, NestedDict
from langflow.schema import Record
class QdrantSearchComponent(QdrantComponent, LCVectorStoreComponent):
display_name = "Qdrant"
description = "Construct Qdrant wrapper from a list of texts."
def build_config(self):
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"embedding": {"display_name": "Embedding"},
"api_key": {"display_name": "API Key", "password": True, "advanced": True},
"collection_name": {"display_name": "Collection Name"},
"content_payload_key": {
"display_name": "Content Payload Key",
"advanced": True,
},
"distance_func": {"display_name": "Distance Function", "advanced": True},
"grpc_port": {"display_name": "gRPC Port", "advanced": True},
"host": {"display_name": "Host", "advanced": True},
"https": {"display_name": "HTTPS", "advanced": True},
"location": {"display_name": "Location", "advanced": True},
"metadata_payload_key": {
"display_name": "Metadata Payload Key",
"advanced": True,
},
"path": {"display_name": "Path", "advanced": True},
"port": {"display_name": "Port", "advanced": True},
"prefer_grpc": {"display_name": "Prefer gRPC", "advanced": True},
"prefix": {"display_name": "Prefix", "advanced": True},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"timeout": {"display_name": "Timeout", "advanced": True},
"url": {"display_name": "URL", "advanced": True},
}
def build(
self,
input_value: str,
embedding: Embeddings,
collection_name: str,
search_type: str = "similarity",
api_key: Optional[str] = None,
content_payload_key: str = "page_content",
distance_func: str = "Cosine",
grpc_port: int = 6334,
https: bool = False,
host: Optional[str] = None,
location: Optional[str] = None,
metadata_payload_key: str = "metadata",
path: Optional[str] = None,
port: Optional[int] = 6333,
prefer_grpc: bool = False,
prefix: Optional[str] = None,
search_kwargs: Optional[NestedDict] = None,
timeout: Optional[int] = None,
url: Optional[str] = None,
) -> List[Record]:
vector_store = super().build(
embedding=embedding,
collection_name=collection_name,
api_key=api_key,
content_payload_key=content_payload_key,
distance_func=distance_func,
grpc_port=grpc_port,
https=https,
host=host,
location=location,
metadata_payload_key=metadata_payload_key,
path=path,
port=port,
prefer_grpc=prefer_grpc,
prefix=prefix,
search_kwargs=search_kwargs,
timeout=timeout,
url=url,
)
if not vector_store:
raise ValueError("Failed to load the Qdrant index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
)

View file

@ -0,0 +1,77 @@
from typing import List, Optional
from langchain.embeddings.base import Embeddings
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.Redis import RedisComponent
from langflow.schema import Record
class RedisSearchComponent(RedisComponent, LCVectorStoreComponent):
"""
A custom component for implementing a Vector Store using Redis.
"""
display_name: str = "Redis Search"
description: str = "Search a Redis Vector Store for similar documents."
documentation = "https://python.langchain.com/docs/integrations/vectorstores/redis"
beta = True
def build_config(self):
"""
Builds the configuration for the component.
Returns:
- dict: A dictionary containing the configuration options for the component.
"""
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"index_name": {"display_name": "Index Name", "value": "your_index"},
"code": {"show": False, "display_name": "Code"},
"documents": {"display_name": "Documents", "is_list": True},
"embedding": {"display_name": "Embedding"},
"schema": {"display_name": "Schema", "file_types": [".yaml"]},
"redis_server_url": {
"display_name": "Redis Server Connection String",
"advanced": False,
},
"redis_index_name": {"display_name": "Redis Index", "advanced": False},
}
def build(
self,
input_value: str,
search_type: str,
embedding: Embeddings,
redis_server_url: str,
redis_index_name: str,
schema: Optional[str] = None,
) -> List[Record]:
"""
Builds the Vector Store or BaseRetriever object.
Args:
- embedding (Embeddings): The embeddings to use for the Vector Store.
- documents (Optional[Document]): The documents to use for the Vector Store.
- redis_index_name (str): The name of the Redis index.
- redis_server_url (str): The URL for the Redis server.
Returns:
- VectorStore: The Vector Store object.
"""
vector_store = super().build(
embedding=embedding,
redis_server_url=redis_server_url,
redis_index_name=redis_index_name,
schema=schema,
)
if not vector_store:
raise ValueError("Failed to load the Redis index.")
return self.search_with_vector_store(
input_value=input_value, search_type=search_type, vector_store=vector_store
)

View file

@ -0,0 +1,49 @@
from typing import List
from langchain_community.vectorstores.supabase import SupabaseVectorStore
from supabase.client import Client, create_client
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.field_typing import Embeddings
from langflow.schema import Record
class SupabaseSearchComponent(LCVectorStoreComponent):
display_name = "Supabase Search"
description = "Search a Supabase Vector Store for similar documents."
def build_config(self):
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"embedding": {"display_name": "Embedding"},
"query_name": {"display_name": "Query Name"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"supabase_service_key": {"display_name": "Supabase Service Key"},
"supabase_url": {"display_name": "Supabase URL"},
"table_name": {"display_name": "Table Name", "advanced": True},
}
def build(
self,
input_value: str,
search_type: str,
embedding: Embeddings,
query_name: str = "",
supabase_service_key: str = "",
supabase_url: str = "",
table_name: str = "",
) -> List[Record]:
supabase: Client = create_client(
supabase_url, supabase_key=supabase_service_key
)
vector_store = SupabaseVectorStore(
client=supabase,
embedding=embedding,
table_name=table_name,
query_name=query_name,
)
return self.search_with_vector_store(input_value, search_type, vector_store)

View file

@ -8,12 +8,15 @@ from langchain_community.vectorstores.vectara import Vectara
from langchain_core.vectorstores import VectorStore
from langflow import CustomComponent
from langflow.field_typing import BaseRetriever, Document
from langchain_community.vectorstores.vectara import Vectara
class VectaraComponent(CustomComponent):
display_name: str = "Vectara"
description: str = "Implementation of Vector Store using Vectara"
documentation = "https://python.langchain.com/docs/integrations/vectorstores/vectara"
documentation = (
"https://python.langchain.com/docs/integrations/vectorstores/vectara"
)
beta = True
field_config = {
"vectara_customer_id": {
@ -26,7 +29,10 @@ class VectaraComponent(CustomComponent):
"display_name": "Vectara API Key",
"password": True,
},
"documents": {"display_name": "Documents", "info": "If provided, will be upserted to corpus (optional)"},
"documents": {
"display_name": "Documents",
"info": "If provided, will be upserted to corpus (optional)",
},
"files_url": {
"display_name": "Files Url",
"info": "Make vectara object using url of files (optional)",

View file

@ -0,0 +1,64 @@
from typing import List
from langchain_community.vectorstores.vectara import Vectara
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.Vectara import VectaraComponent
from langflow.schema import Record
class VectaraSearchComponent(VectaraComponent, LCVectorStoreComponent):
display_name: str = "Vectara Search"
description: str = "Search a Vectara Vector Store for similar documents."
documentation = (
"https://python.langchain.com/docs/integrations/vectorstores/vectara"
)
beta = True
field_config = {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"vectara_customer_id": {
"display_name": "Vectara Customer ID",
},
"vectara_corpus_id": {
"display_name": "Vectara Corpus ID",
},
"vectara_api_key": {
"display_name": "Vectara API Key",
"password": True,
},
"documents": {
"display_name": "Documents",
"info": "If provided, will be upserted to corpus (optional)",
},
"files_url": {
"display_name": "Files Url",
"info": "Make vectara object using url of files (optional)",
},
}
def build(
self,
input_value: str,
search_type: str,
vectara_customer_id: str,
vectara_corpus_id: str,
vectara_api_key: str,
) -> List[Record]:
source = "Langflow"
vector_store = Vectara(
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key,
source=source,
)
if not vector_store:
raise ValueError("Failed to create Vectara Vector Store")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
)

View file

@ -8,10 +8,12 @@ from langchain_community.vectorstores import VectorStore, Weaviate
from langflow import CustomComponent
class WeaviateVectorStore(CustomComponent):
class WeaviateVectorStoreComponent(CustomComponent):
display_name: str = "Weaviate"
description: str = "Implementation of Vector Store using Weaviate"
documentation = "https://python.langchain.com/docs/integrations/vectorstores/weaviate"
documentation = (
"https://python.langchain.com/docs/integrations/vectorstores/weaviate"
)
beta = True
field_config = {
"url": {"display_name": "Weaviate URL", "value": "http://localhost:8080"},
@ -24,7 +26,12 @@ class WeaviateVectorStore(CustomComponent):
"display_name": "Index name",
"required": False,
},
"text_key": {"display_name": "Text Key", "required": False, "advanced": True, "value": "text"},
"text_key": {
"display_name": "Text Key",
"required": False,
"advanced": True,
"value": "text",
},
"documents": {"display_name": "Documents", "is_list": True},
"embedding": {"display_name": "Embedding"},
"attributes": {
@ -34,7 +41,11 @@ class WeaviateVectorStore(CustomComponent):
"field_type": "str",
"advanced": True,
},
"search_by_text": {"display_name": "Search By Text", "field_type": "bool", "advanced": True},
"search_by_text": {
"display_name": "Search By Text",
"field_type": "bool",
"advanced": True,
},
"code": {"show": False},
}

View file

@ -0,0 +1,82 @@
from typing import List, Optional
from langchain.embeddings.base import Embeddings
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.Weaviate import WeaviateVectorStoreComponent
from langflow.schema import Record
class WeaviateSearchVectorStore(WeaviateVectorStoreComponent, LCVectorStoreComponent):
display_name: str = "Weaviate Search"
description: str = "Search a Weaviate Vector Store for similar documents."
documentation = (
"https://python.langchain.com/docs/integrations/vectorstores/weaviate"
)
beta = True
field_config = {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"url": {"display_name": "Weaviate URL", "value": "http://localhost:8080"},
"api_key": {
"display_name": "API Key",
"password": True,
"required": False,
},
"index_name": {
"display_name": "Index name",
"required": False,
},
"text_key": {
"display_name": "Text Key",
"required": False,
"advanced": True,
"value": "text",
},
"documents": {"display_name": "Documents", "is_list": True},
"embedding": {"display_name": "Embedding"},
"attributes": {
"display_name": "Attributes",
"required": False,
"is_list": True,
"field_type": "str",
"advanced": True,
},
"search_by_text": {
"display_name": "Search By Text",
"field_type": "bool",
"advanced": True,
},
"code": {"show": False},
}
def build(
self,
input_value: str,
search_type: str,
url: str,
search_by_text: bool = False,
api_key: Optional[str] = None,
index_name: Optional[str] = None,
text_key: str = "text",
embedding: Optional[Embeddings] = None,
attributes: Optional[list] = None,
) -> List[Record]:
vector_store = super().build(
url=url,
api_key=api_key,
index_name=index_name,
text_key=text_key,
embedding=embedding,
attributes=attributes,
search_by_text=search_by_text,
)
if not vector_store:
raise ValueError("Failed to load the Weaviate index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
)

View file

@ -0,0 +1,41 @@
from typing import List
from langchain_core.vectorstores import VectorStore
from langflow import CustomComponent
from langflow.field_typing import Text
from langflow.schema import Record, docs_to_records
class LCVectorStoreComponent(CustomComponent):
display_name: str = "LC Vector Store"
description: str = "Search a LC Vector Store for similar documents."
beta: bool = True
def search_with_vector_store(
self, input_value: Text, search_type: str, vector_store: VectorStore
) -> List[Record]:
"""
Search for records in the vector store based on the input value and search type.
Args:
input_value (Text): The input value to search for.
search_type (str): The type of search to perform.
vector_store (VectorStore): The vector store to search in.
Returns:
List[Record]: A list of records matching the search criteria.
Raises:
ValueError: If invalid inputs are provided.
"""
docs = []
if input_value and isinstance(input_value, str):
docs = vector_store.search(
query=input_value, search_type=search_type.lower()
)
else:
raise ValueError("Invalid inputs provided.")
return docs_to_records(docs)

View file

@ -0,0 +1,73 @@
from typing import List, Optional
from langchain.embeddings.base import Embeddings
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.pgvector import PGVectorComponent
from langflow.schema import Record
class PGVectorSearchComponent(PGVectorComponent, LCVectorStoreComponent):
"""
A custom component for implementing a Vector Store using PostgreSQL.
"""
display_name: str = "PGVector Search"
description: str = "Search a PGVector Store for similar documents."
documentation = (
"https://python.langchain.com/docs/integrations/vectorstores/pgvector"
)
def build_config(self):
"""
Builds the configuration for the component.
Returns:
- dict: A dictionary containing the configuration options for the component.
"""
return {
"code": {"show": False},
"embedding": {"display_name": "Embedding"},
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"pg_server_url": {
"display_name": "PostgreSQL Server Connection String",
"advanced": False,
},
"collection_name": {"display_name": "Table", "advanced": False},
"input_value": {"display_name": "Input"},
}
def build(
self,
input_value: str,
embedding: Embeddings,
pg_server_url: str,
collection_name: str,
search_type: Optional[str] = None,
) -> List[Record]:
"""
Builds the Vector Store or BaseRetriever object.
Args:
- input_value (str): The input value to search for.
- embedding (Embeddings): The embeddings to use for the Vector Store.
- collection_name (str): The name of the PG table.
- pg_server_url (str): The URL for the PG server.
Returns:
- VectorStore: The Vector Store object.
"""
try:
vector_store = super().build(
embedding=embedding,
pg_server_url=pg_server_url,
collection_name=collection_name,
)
except Exception as e:
raise RuntimeError(f"Failed to build PGVector: {e}")
return self.search_with_vector_store(
input_value=input_value, search_type=search_type, vector_store=vector_store
)

View file

@ -11,31 +11,6 @@ agents:
documentation: ""
SQLAgent:
documentation: ""
chains:
# LLMChain:
# documentation: "https://python.langchain.com/docs/modules/chains/foundational/llm_chain"
LLMMathChain:
documentation: "https://python.langchain.com/docs/modules/chains/additional/llm_math"
LLMCheckerChain:
documentation: "https://python.langchain.com/docs/modules/chains/additional/llm_checker"
# ConversationChain:
# documentation: ""
SeriesCharacterChain:
documentation: ""
MidJourneyPromptChain:
documentation: ""
TimeTravelGuideChain:
documentation: ""
SQLDatabaseChain:
documentation: ""
RetrievalQA:
documentation: "https://python.langchain.com/docs/modules/chains/popular/vector_db_qa"
RetrievalQAWithSourcesChain:
documentation: ""
ConversationalRetrievalChain:
documentation: "https://python.langchain.com/docs/modules/chains/popular/chat_vector_db"
CombineDocsChain:
documentation: ""
documentloaders:
AirbyteJSONLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/airbyte_json"
@ -243,24 +218,7 @@ retrievers:
# https://github.com/supabase-community/supabase-py/issues/482
# ZepRetriever:
# documentation: "https://python.langchain.com/docs/modules/data_connection/retrievers/integrations/zep_memorystore"
vectorstores:
# Chroma:
# documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/chroma"
Qdrant:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/qdrant"
FAISS:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
Pinecone:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/pinecone"
ElasticsearchStore:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/elasticsearch"
SupabaseVectorStore:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/supabase"
MongoDBAtlasVectorSearch:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/mongodb_atlas"
# Requires docarray >=0.32.0 but langchain-serve requires jina 3.15.2 which doesn't support docarray >=0.32.0
# DocArrayInMemorySearch:
# documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/docarray_in_memory"
wrappers:
RequestsWrapper:
documentation: ""

View file

@ -4,6 +4,7 @@ from loguru import logger
from pydantic import BaseModel, Field
from langflow.graph.edge.utils import build_clean_params
from langflow.graph.schema import INPUT_FIELD_NAME
from langflow.services.deps import get_monitor_service
from langflow.services.monitor.utils import log_message
@ -12,7 +13,9 @@ if TYPE_CHECKING:
class SourceHandle(BaseModel):
baseClasses: List[str] = Field(..., description="List of base classes for the source handle.")
baseClasses: List[str] = Field(
..., description="List of base classes for the source handle."
)
dataType: str = Field(..., description="Data type for the source handle.")
id: str = Field(..., description="Unique identifier for the source handle.")
@ -20,7 +23,9 @@ class SourceHandle(BaseModel):
class TargetHandle(BaseModel):
fieldName: str = Field(..., description="Field name for the target handle.")
id: str = Field(..., description="Unique identifier for the target handle.")
inputTypes: Optional[List[str]] = Field(None, description="List of input types for the target handle.")
inputTypes: Optional[List[str]] = Field(
None, description="List of input types for the target handle."
)
type: str = Field(..., description="Type of the target handle.")
@ -49,16 +54,24 @@ class Edge:
def validate_handles(self, source, target) -> None:
if self.target_handle.inputTypes is None:
self.valid_handles = self.target_handle.type in self.source_handle.baseClasses
self.valid_handles = (
self.target_handle.type in self.source_handle.baseClasses
)
else:
self.valid_handles = (
any(baseClass in self.target_handle.inputTypes for baseClass in self.source_handle.baseClasses)
any(
baseClass in self.target_handle.inputTypes
for baseClass in self.source_handle.baseClasses
)
or self.target_handle.type in self.source_handle.baseClasses
)
if not self.valid_handles:
logger.debug(self.source_handle)
logger.debug(self.target_handle)
raise ValueError(f"Edge between {source.vertex_type} and {target.vertex_type} " f"has invalid handles")
raise ValueError(
f"Edge between {source.vertex_type} and {target.vertex_type} "
f"has invalid handles"
)
def __setstate__(self, state):
self.source_id = state["source_id"]
@ -75,7 +88,11 @@ class Edge:
# Both lists contain strings and sometimes a string contains the value we are
# looking for e.g. comgin_out=["Chain"] and target_reqs=["LLMChain"]
# so we need to check if any of the strings in source_types is in target_reqs
self.valid = any(output in target_req for output in self.source_types for target_req in self.target_reqs)
self.valid = any(
output in target_req
for output in self.source_types
for target_req in self.target_reqs
)
# Get what type of input the target node is expecting
self.matched_type = next(
@ -86,7 +103,10 @@ class Edge:
if no_matched_type:
logger.debug(self.source_types)
logger.debug(self.target_reqs)
raise ValueError(f"Edge between {source.vertex_type} and {target.vertex_type} " f"has no matched type")
raise ValueError(
f"Edge between {source.vertex_type} and {target.vertex_type} "
f"has no matched type"
)
def __repr__(self) -> str:
return (
@ -98,7 +118,11 @@ class Edge:
return hash(self.__repr__())
def __eq__(self, __value: object) -> bool:
return self.__repr__() == __value.__repr__() if isinstance(__value, Edge) else False
return (
self.__repr__() == __value.__repr__()
if isinstance(__value, Edge)
else False
)
class ContractEdge(Edge):
@ -137,14 +161,15 @@ class ContractEdge(Edge):
log_transaction(self, source, target, "success")
# If the target vertex is a power component we log messages
if target.vertex_type == "ChatOutput" and (
isinstance(target.params.get("message"), str) or isinstance(target.params.get("message"), dict)
isinstance(target.params.get(INPUT_FIELD_NAME), str)
or isinstance(target.params.get(INPUT_FIELD_NAME), dict)
):
if target.params.get("message") == "":
return self.result
await log_message(
sender=target.params.get("sender", ""),
sender_name=target.params.get("sender_name", ""),
message=target.params.get("message", {}),
message=target.params.get(INPUT_FIELD_NAME, {}),
session_id=target.params.get("session_id", ""),
artifacts=target.artifacts,
)
@ -154,7 +179,9 @@ class ContractEdge(Edge):
return f"{self.source_id} -[{self.target_param}]-> {self.target_id}"
def log_transaction(edge: ContractEdge, source: "Vertex", target: "Vertex", status, error=None):
def log_transaction(
edge: ContractEdge, source: "Vertex", target: "Vertex", status, error=None
):
try:
monitor_service = get_monitor_service()
clean_params = build_clean_params(target)

View file

@ -1,5 +1,6 @@
import asyncio
from collections import defaultdict, deque
from typing import Dict, Generator, List, Optional, Type, Union
from typing import TYPE_CHECKING, Dict, Generator, List, Optional, Type, Union
from langchain.chains.base import Chain
from loguru import logger
@ -7,13 +8,21 @@ from loguru import logger
from langflow.graph.edge.base import ContractEdge
from langflow.graph.graph.constants import lazy_load_vertex_dict
from langflow.graph.graph.utils import process_flow
from langflow.graph.schema import InterfaceComponentTypes
from langflow.graph.schema import INPUT_FIELD_NAME, InterfaceComponentTypes
from langflow.graph.vertex.base import Vertex
from langflow.graph.vertex.types import (ChatVertex, FileToolVertex, LLMVertex,
RoutingVertex, ToolkitVertex)
from langflow.graph.vertex.types import (
ChatVertex,
FileToolVertex,
LLMVertex,
RoutingVertex,
ToolkitVertex,
)
from langflow.interface.tools.constants import FILE_TOOLS
from langflow.utils import payload
if TYPE_CHECKING:
from langflow.graph.schema import ResultData
class Graph:
"""A class representing a graph of vertices and edges."""
@ -24,14 +33,16 @@ class Graph:
edges: List[Dict[str, str]],
flow_id: Optional[str] = None,
) -> None:
self.inputs = []
self.outputs = []
self._vertices = nodes
self._edges = edges
self.raw_graph_data = {"nodes": nodes, "edges": edges}
self._runs = 0
self._updates = 0
self.flow_id = flow_id
self._is_input_vertices = []
self._is_output_vertices = []
self._has_session_id_vertices = []
self._sorted_vertices_layers = []
self.top_level_vertices = []
for vertex in self._vertices:
@ -44,6 +55,67 @@ class Graph:
self.inactive_vertices = set()
self._build_graph()
self.build_graph_maps()
self.define_vertices_lists()
@property
def sorted_vertices_layers(self):
if not self._sorted_vertices_layers:
self.sort_vertices()
return self._sorted_vertices_layers
def define_vertices_lists(self):
"""
Defines the lists of vertices that are inputs, outputs, and have session_id.
"""
attributes = ["is_input", "is_output", "has_session_id"]
for vertex in self.vertices:
for attribute in attributes:
if getattr(vertex, attribute):
getattr(self, f"_{attribute}_vertices").append(vertex.id)
async def _run(self, inputs: Dict[str, str], stream: bool) -> List["ResultData"]:
"""Runs the graph with the given inputs."""
for vertex_id in self._is_input_vertices:
vertex = self.get_vertex(vertex_id)
if vertex is None:
raise ValueError(f"Vertex {vertex_id} not found")
vertex.update_raw_params(inputs)
try:
await self.process()
self.increment_run_count()
except Exception as exc:
logger.exception(exc)
raise ValueError(f"Error running graph: {exc}") from exc
outputs = []
for vertex_id in self._is_output_vertices:
vertex = self.get_vertex(vertex_id)
if vertex is None:
raise ValueError(f"Vertex {vertex_id} not found")
if not stream and hasattr(vertex, "consume_async_generator"):
await vertex.consume_async_generator()
outputs.append(vertex.result)
return outputs
async def run(
self, inputs: Dict[str, Union[str, list[str]]], stream: bool
) -> List["ResultData"]:
"""Runs the graph with the given inputs."""
# inputs is {"message": "Hello, world!"}
# we need to go through self.inputs and update the self._raw_params
# of the vertices that are inputs
# if the value is a list, we need to run multiple times
outputs = []
inputs_values = inputs.get(INPUT_FIELD_NAME)
if not isinstance(inputs_values, list):
inputs_values = [inputs_values]
for input_value in inputs_values:
run_outputs = await self._run(
{INPUT_FIELD_NAME: input_value}, stream=stream
)
logger.debug(f"Run outputs: {run_outputs}")
outputs.extend(run_outputs)
return outputs
@property
def metadata(self):
@ -157,7 +229,10 @@ class Graph:
self.edges = new_edges
def vertex_data_is_identical(self, vertex: Vertex, other_vertex: Vertex) -> bool:
return vertex.__repr__() == other_vertex.__repr__()
data_is_equivalent = vertex.__repr__() == other_vertex.__repr__()
if not data_is_equivalent:
return False
return self.vertex_edges_are_identical(vertex, other_vertex)
def vertex_edges_are_identical(self, vertex: Vertex, other_vertex: Vertex) -> bool:
same_length = len(vertex.edges) == len(other_vertex.edges)
@ -246,28 +321,6 @@ class Graph:
# Now that we have the vertices and edges
# We need to map the vertices that are connected to
# to ChatVertex instances
self._map_chat_vertices()
def _map_chat_vertices(self) -> None:
"""Maps the vertices that are connected to ChatVertex instances."""
# For each edge, we need to check if the source or target vertex is a ChatVertex
# If it is, we need to update the other vertex `is_external` attribute
# and store the id of the ChatVertex in the attributes self.inputs and self.outputs
for edge in self.edges:
source_vertex = self.get_vertex(edge.source_id)
target_vertex = self.get_vertex(edge.target_id)
if isinstance(source_vertex, ChatVertex):
# The source vertex is a ChatVertex
# thus the target vertex is an external vertex
# and the source vertex is an input
target_vertex.has_external_input = True
self.inputs.append(source_vertex.id)
if isinstance(target_vertex, ChatVertex):
# The target vertex is a ChatVertex
# thus the source vertex is an external vertex
# and the target vertex is an output
source_vertex.has_external_output = True
self.outputs.append(target_vertex.id)
def remove_vertex(self, vertex_id: str) -> None:
"""Removes a vertex from the graph."""
@ -317,12 +370,20 @@ class Graph:
except KeyError:
raise ValueError(f"Vertex {vertex_id} not found")
def get_vertex_edges(self, vertex_id: str) -> List[ContractEdge]:
def get_vertex_edges(
self,
vertex_id: str,
is_target: Optional[bool] = None,
is_source: Optional[bool] = None,
) -> List[ContractEdge]:
"""Returns a list of edges for a given vertex."""
# The idea here is to return the edges that have the vertex_id as source or target
# or both
return [
edge
for edge in self.edges
if edge.source_id == vertex_id or edge.target_id == vertex_id
if (edge.source_id == vertex_id and is_source is not False)
or (edge.target_id == vertex_id and is_target is not False)
]
def get_vertices_with_target(self, vertex_id: str) -> List[Vertex]:
@ -344,6 +405,38 @@ class Graph:
raise ValueError("No root vertex found")
return await root_vertex.build()
async def process(self) -> "Graph":
"""Processes the graph with vertices in each layer run in parallel."""
vertices_layers = self.sorted_vertices_layers
for layer_index, layer in enumerate(vertices_layers):
tasks = []
for vertex_id in layer:
vertex = self.get_vertex(vertex_id)
task = asyncio.create_task(
vertex.build(), name=f"layer-{layer_index}-vertex-{vertex_id}"
)
tasks.append(task)
logger.debug(f"Running layer {layer_index} with {len(tasks)} tasks")
await self._execute_tasks(tasks)
logger.debug("Graph processing complete")
return self
async def _execute_tasks(self, tasks):
"""Executes tasks in parallel, handling exceptions for each task."""
results = []
for i, task in enumerate(asyncio.as_completed(tasks)):
try:
result = await task
results.append(result)
except Exception as e:
# Log the exception along with the task name for easier debugging
# task_name = task.get_name()
# coroutine has not attribute get_name
task_name = tasks[i].get_name()
logger.error(f"Task {task_name} failed with exception: {e}")
return results
def topological_sort(self) -> List[Vertex]:
"""
Performs a topological sort of the vertices in the graph.
@ -611,6 +704,7 @@ class Graph:
vertices_layers = self.sort_by_avg_build_time(vertices_layers)
vertices_layers = self.sort_chat_inputs_first(vertices_layers)
self.increment_run_count()
self._sorted_vertices_layers = vertices_layers
return vertices_layers
def sort_interface_components_first(

View file

@ -1,8 +1,39 @@
from enum import Enum
from typing import Any, Optional
from langflow.graph.utils import serialize_field
from pydantic import BaseModel, Field, field_serializer
class InterfaceComponentTypes(Enum):
class ResultData(BaseModel):
results: Optional[Any] = Field(default_factory=dict)
artifacts: Optional[Any] = Field(default_factory=dict)
timedelta: Optional[float] = None
duration: Optional[str] = None
@field_serializer("results")
def serialize_results(self, value):
if isinstance(value, dict):
return {key: serialize_field(val) for key, val in value.items()}
return serialize_field(value)
class InterfaceComponentTypes(str, Enum):
# ChatInput and ChatOutput are the only ones that are
# power components
ChatInput = "ChatInput"
ChatOutput = "ChatOutput"
TextInput = "TextInput"
TextOutput = "TextOutput"
INPUT_COMPONENTS = [
InterfaceComponentTypes.ChatInput,
InterfaceComponentTypes.TextInput,
]
OUTPUT_COMPONENTS = [
InterfaceComponentTypes.ChatOutput,
InterfaceComponentTypes.TextOutput,
]
INPUT_FIELD_NAME = "input_value"

View file

@ -1,5 +1,8 @@
from typing import Any, Union
from langchain_core.documents import Document
from pydantic import BaseModel
from langflow.interface.utils import extract_input_variables_from_prompt
@ -33,3 +36,17 @@ def flatten_list(list_of_lists: list[Union[list, Any]]) -> list:
else:
new_list.append(item)
return new_list
def serialize_field(value):
"""Unified serialization function for handling both BaseModel and Document types,
including handling lists of these types."""
if isinstance(value, (list, tuple)):
return [serialize_field(v) for v in value]
elif isinstance(value, Document):
return value.to_json()
elif isinstance(value, BaseModel):
return value.model_dump()
elif isinstance(value, str):
return {"result": value}
return value

View file

@ -7,7 +7,8 @@ from typing import (TYPE_CHECKING, Any, Callable, Coroutine, Dict, List,
from loguru import logger
from langflow.graph.schema import InterfaceComponentTypes
from langflow.graph.schema import (INPUT_COMPONENTS, OUTPUT_COMPONENTS,
InterfaceComponentTypes, ResultData)
from langflow.graph.utils import UnbuiltObject, UnbuiltResult
from langflow.graph.vertex.utils import generate_result
from langflow.interface.initialize import loading
@ -17,12 +18,11 @@ from langflow.utils.constants import DIRECT_TYPES
from langflow.utils.util import sync_to_async
if TYPE_CHECKING:
from langflow.api.v1.schemas import ResultData
from langflow.graph.edge.base import ContractEdge
from langflow.graph.graph.base import Graph
class VertexStates(Enum):
class VertexStates(str, Enum):
"""Vertex are related to it being active, inactive, or in an error state."""
ACTIVE = "active"
@ -41,10 +41,21 @@ class Vertex:
) -> None:
# is_external means that the Vertex send or receives data from
# an external source (e.g the chat)
self.will_stream = False
self.updated_raw_params = False
self.id: str = data["id"]
self.is_input = any(
input_component_name in self.id for input_component_name in INPUT_COMPONENTS
)
self.is_output = any(
output_component_name in self.id
for output_component_name in OUTPUT_COMPONENTS
)
self.has_session_id = None
self._custom_component = None
self.has_external_input = False
self.has_external_output = False
self.graph = graph
self.id: str = data["id"]
self._data = data
self.base_type: Optional[str] = base_type
self._parse_data()
@ -61,7 +72,7 @@ class Vertex:
self.parent_is_top_level = False
self.layer = None
self.should_run = True
self.result: Optional["ResultData"] = None
self.result: Optional[ResultData] = None
try:
self.is_interface_component = InterfaceComponentTypes(self.vertex_type)
except ValueError:
@ -116,7 +127,7 @@ class Vertex:
)
return edge_results
def set_result(self, result: "ResultData") -> None:
def set_result(self, result: ResultData) -> None:
self.result = result
def get_built_result(self):
@ -202,12 +213,17 @@ class Vertex:
self.output = self.data["node"]["base_classes"]
self.display_name = self.data["node"]["display_name"]
self.pinned = self.data["node"].get("pinned", False)
self.selected_output_type = self.data["node"].get("selected_output_type")
self.is_input = self.data["node"].get("is_input") or self.is_input
self.is_output = self.data["node"].get("is_output") or self.is_output
template_dicts = {
key: value
for key, value in self.data["node"]["template"].items()
if isinstance(value, dict)
}
self.has_session_id = "session_id" in template_dicts
self.required_inputs = [
template_dicts[key]["type"]
for key, value in template_dicts.items()
@ -267,6 +283,10 @@ class Vertex:
if self.graph is None:
raise ValueError("Graph not found")
if self.updated_raw_params:
self.updated_raw_params = False
return
template_dict = {
key: value
for key, value in self.data["node"]["template"].items()
@ -358,6 +378,22 @@ class Vertex:
self.params = params
self._raw_params = params.copy()
def update_raw_params(self, new_params: Dict[str, str]):
"""
Update the raw parameters of the vertex with the given new parameters.
Args:
new_params (Dict[str, Any]): The new parameters to update.
Raises:
ValueError: If any key in new_params is not found in self._raw_params.
"""
# First check if the input_value in _raw_params is not a vertex
if any(isinstance(self._raw_params.get(key), Vertex) for key in new_params):
return
self._raw_params.update(new_params)
self.updated_raw_params = True
async def _build(self, user_id=None):
"""
Initiate the build process.
@ -369,6 +405,18 @@ class Vertex:
self._built = True
def _finalize_build(self):
result_dict = self.get_built_result()
# We need to set the artifacts to pass information
# to the frontend
self.set_artifacts()
artifacts = self.artifacts
result_dict = ResultData(
results=result_dict,
artifacts=artifacts,
)
self.set_result(result_dict)
async def _run(
self,
user_id: str,
@ -406,6 +454,8 @@ class Vertex:
await self._build_node_and_update_params(key, value, user_id)
elif isinstance(value, list) and self._is_list_of_nodes(value):
await self._build_list_of_nodes_and_update_params(key, value, user_id)
elif key not in self.params:
self.params[key] = value
def _is_node(self, value):
"""
@ -500,15 +550,18 @@ class Vertex:
if self.base_type is None:
raise ValueError(f"Base type for node {self.display_name} not found")
try:
result = await loading.instantiate_class(
node_type=self.vertex_type,
base_type=self.base_type,
params=self.params,
user_id=user_id,
vertex=self,
)
self._update_built_object_and_artifacts(result)
except Exception as exc:
logger.exception(exc)
raise ValueError(
f"Error building node {self.display_name}: {str(exc)}"
) from exc
@ -518,7 +571,10 @@ class Vertex:
Updates the built object and its artifacts.
"""
if isinstance(result, tuple):
self._built_object, self.artifacts = result
if len(result) == 2:
self._built_object, self.artifacts = result
elif len(result) == 3:
self._custom_component, self._built_object, self.artifacts = result
else:
self._built_object = result
@ -535,7 +591,7 @@ class Vertex:
logger.warning(message)
def _reset(self):
def _reset(self, params_update: Optional[Dict[str, Any]] = None):
self._built = False
self._built_object = UnbuiltObject()
self._built_result = UnbuiltResult()
@ -573,6 +629,8 @@ class Vertex:
step(user_id=user_id, **kwargs)
self.steps_ran.append(step)
self._finalize_build()
return await self.get_requester_result(requester)
async def get_requester_result(self, requester: Optional["Vertex"]):

View file

@ -6,11 +6,12 @@ import yaml
from langchain_core.messages import AIMessage
from loguru import logger
from langflow.graph.schema import INPUT_FIELD_NAME
from langflow.graph.utils import UnbuiltObject, flatten_list
from langflow.graph.vertex.base import StatefulVertex, StatelessVertex
from langflow.interface.utils import extract_input_variables_from_prompt
from langflow.schema import Record
from langflow.services.monitor.utils import log_message
from langflow.services.monitor.utils import log_vertex_build
from langflow.utils.schemas import ChatOutputResponse
@ -344,17 +345,6 @@ class ChatVertex(StatelessVertex):
def build_stream_url(self):
return f"/api/v1/build/{self.graph.flow_id}/{self.id}/stream"
async def _build(self, user_id=None):
"""
Initiate the build process.
"""
logger.debug(f"Building {self.vertex_type}")
await self._build_each_node_in_params_dict(user_id)
await self._get_and_instantiate_class(user_id)
self._validate_built_object()
self._built = True
def _built_object_repr(self):
if self.task_id and self.is_task:
if task := self.get_task():
@ -373,7 +363,7 @@ class ChatVertex(StatelessVertex):
artifacts = None
sender = self.params.get("sender", None)
sender_name = self.params.get("sender_name", None)
message = self.params.get("message", None)
message = self.params.get(INPUT_FIELD_NAME, None)
stream_url = None
if isinstance(self._built_object, AIMessage):
artifacts = ChatOutputResponse.from_message(
@ -404,10 +394,12 @@ class ChatVertex(StatelessVertex):
sender_name=sender_name,
stream_url=stream_url,
)
self.will_stream = stream_url is not None
if artifacts:
self.artifacts = artifacts.model_dump()
if isinstance(self._built_object, (AsyncIterator, Iterator)):
if self.params["as_record"]:
if self.params["return_record"]:
self._built_object = Record(text=message, data=self.artifacts)
else:
self._built_object = message
@ -417,7 +409,7 @@ class ChatVertex(StatelessVertex):
await super()._run(*args, **kwargs)
async def stream(self):
iterator = self.params.get("message", None)
iterator = self.params.get(INPUT_FIELD_NAME, None)
if not isinstance(iterator, (AsyncIterator, Iterator)):
raise ValueError("The message must be an iterator or an async iterator.")
is_async = isinstance(iterator, AsyncIterator)
@ -434,24 +426,35 @@ class ChatVertex(StatelessVertex):
message = message.text if hasattr(message, "text") else message
yield message
complete_message += message
self._built_object = Record(text=complete_message, data=self.artifacts)
self._built_result = complete_message
# Update artifacts with the message
# and remove the stream_url
self.artifacts = ChatOutputResponse(
message=complete_message,
sender=self.params.get("sender", ""),
sender_name=self.params.get("sender_name", ""),
).model_dump()
self.params[INPUT_FIELD_NAME] = complete_message
self._built_object = Record(text=complete_message, data=self.artifacts)
self._built_result = complete_message
# Update artifacts with the message
# and remove the stream_url
self._finalize_build()
logger.debug(f"Streamed message: {complete_message}")
await log_message(
sender=self.params.get("sender", ""),
sender_name=self.params.get("sender_name", ""),
message=complete_message,
session_id=self.params.get("session_id", ""),
await log_vertex_build(
flow_id=self.graph.flow_id,
vertex_id=self.id,
valid=True,
params=self._built_object_repr(),
data=self.result,
artifacts=self.artifacts,
)
self._validate_built_object()
self._built = True
async def consume_async_generator(self):
async for _ in self.stream():
pass
class RoutingVertex(StatelessVertex):
def __init__(self, data: Dict, graph):

View file

@ -37,4 +37,6 @@ ATTR_FUNC_MAPPING = {
"documentation": getattr_return_str,
"icon": validate_icon,
"pinned": getattr_return_bool,
"is_input": getattr_return_bool,
"is_output": getattr_return_bool,
}

View file

@ -6,6 +6,9 @@ from typing import Any, Dict, List, Type, Union
from cachetools import TTLCache, cachedmethod, keys
from fastapi import HTTPException
from loguru import logger
from langflow.interface.custom.eval import eval_custom_component_code
from langflow.interface.custom.schema import CallableCodeDetails, ClassCodeDetails
@ -92,7 +95,9 @@ class CodeParser:
elif isinstance(node, ast.ImportFrom):
for alias in node.names:
if alias.asname:
self.data["imports"].append((node.module, f"{alias.name} as {alias.asname}"))
self.data["imports"].append(
(node.module, f"{alias.name} as {alias.asname}")
)
else:
self.data["imports"].append((node.module, alias.name))
@ -141,7 +146,9 @@ class CodeParser:
return_type = None
if node.returns:
return_type_str = ast.unparse(node.returns)
eval_env = self.construct_eval_env(return_type_str, tuple(self.data["imports"]))
eval_env = self.construct_eval_env(
return_type_str, tuple(self.data["imports"])
)
try:
return_type = eval(return_type_str, eval_env)
@ -183,14 +190,22 @@ class CodeParser:
num_defaults = len(node.args.defaults)
num_missing_defaults = num_args - num_defaults
missing_defaults = [None] * num_missing_defaults
default_values = [ast.unparse(default).strip("'") if default else None for default in node.args.defaults]
default_values = [
ast.unparse(default).strip("'") if default else None
for default in node.args.defaults
]
# Now check all default values to see if there
# are any "None" values in the middle
default_values = [None if value == "None" else value for value in default_values]
default_values = [
None if value == "None" else value for value in default_values
]
defaults = missing_defaults + default_values
args = [self.parse_arg(arg, default) for arg, default in zip(node.args.args, defaults)]
args = [
self.parse_arg(arg, default)
for arg, default in zip(node.args.args, defaults)
]
return args
def parse_varargs(self, node: ast.FunctionDef) -> List[Dict[str, Any]]:
@ -208,11 +223,17 @@ class CodeParser:
"""
Parses the keyword-only arguments of a function or method node.
"""
kw_defaults = [None] * (len(node.args.kwonlyargs) - len(node.args.kw_defaults)) + [
ast.unparse(default) if default else None for default in node.args.kw_defaults
kw_defaults = [None] * (
len(node.args.kwonlyargs) - len(node.args.kw_defaults)
) + [
ast.unparse(default) if default else None
for default in node.args.kw_defaults
]
args = [self.parse_arg(arg, default) for arg, default in zip(node.args.kwonlyargs, kw_defaults)]
args = [
self.parse_arg(arg, default)
for arg, default in zip(node.args.kwonlyargs, kw_defaults)
]
return args
def parse_kwargs(self, node: ast.FunctionDef) -> List[Dict[str, Any]]:
@ -268,15 +289,28 @@ class CodeParser:
method = self.parse_callable_details(stmt)
return (method, True) if stmt.name == "__init__" else (method, False)
def get_base_classes(self):
"""
Returns the base classes of the custom component class.
"""
try:
bases = self.execute_and_inspect_classes(self.code)
except Exception as e:
# If the code cannot be executed, return an empty list
logger.exception(e)
bases = []
raise e
return bases
def parse_classes(self, node: ast.ClassDef) -> None:
"""
Extracts "classes" from the code, including inheritance and init methods.
"""
bases = self.get_base_classes() or [ast.unparse(b) for b in node.bases]
class_details = ClassCodeDetails(
name=node.name,
doc=ast.get_docstring(node),
bases=[ast.unparse(base) for base in node.bases],
bases=bases,
attributes=[],
methods=[],
init=None,
@ -303,11 +337,25 @@ class CodeParser:
Extracts global variables from the code.
"""
global_var = {
"targets": [t.id if hasattr(t, "id") else ast.dump(t) for t in node.targets],
"targets": [
t.id if hasattr(t, "id") else ast.dump(t) for t in node.targets
],
"value": ast.unparse(node.value),
}
self.data["global_vars"].append(global_var)
def execute_and_inspect_classes(self, code: str):
custom_component_class = eval_custom_component_code(code)
custom_component = custom_component_class()
dunder_class = custom_component.__class__
# Get the base classes at two levels of inheritance
bases = []
for base in dunder_class.__bases__:
bases.append(base.__name__)
for bases_base in base.__bases__:
bases.append(bases_base.__name__)
return bases
def parse_code(self) -> Dict[str, Any]:
"""
Runs all parsing operations and returns the resulting data.

View file

@ -1,6 +1,15 @@
import operator
from pathlib import Path
from typing import Any, Callable, ClassVar, List, Optional, Sequence, Union
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
List,
Optional,
Sequence,
Union,
)
from uuid import UUID
import yaml
@ -24,6 +33,10 @@ from langflow.services.deps import (
from langflow.services.storage.service import StorageService
from langflow.utils import validate
if TYPE_CHECKING:
from langflow.graph.edge.base import ContractEdge
from langflow.graph.vertex.base import Vertex
class CustomComponent(Component):
display_name: Optional[str] = None
@ -32,6 +45,12 @@ class CustomComponent(Component):
"""The description of the component. Defaults to None."""
icon: Optional[str] = None
"""The icon of the component. It should be an emoji. Defaults to None."""
is_input: Optional[bool] = None
"""The input state of the component. Defaults to None.
If True, the component must have a field named 'input_value'."""
is_output: Optional[bool] = None
"""The output state of the component. Defaults to None.
If True, the component must have a field named 'input_value'."""
code: Optional[str] = None
"""The code of the component. Defaults to None."""
field_config: dict = {}
@ -40,6 +59,12 @@ class CustomComponent(Component):
"""The field order of the component. Defaults to an empty list."""
pinned: Optional[bool] = False
"""The default pinned state of the component. Defaults to False."""
build_parameters: Optional[dict] = None
"""The build parameters of the component. Defaults to None."""
selected_output_type: Optional[str] = None
"""The selected output type of the component. Defaults to None."""
vertex: Optional["Vertex"] = None
"""The edge target parameter of the component. Defaults to None."""
code_class_base_inheritance: ClassVar[str] = "CustomComponent"
function_entrypoint_name: ClassVar[str] = "build"
function: Optional[Callable] = None
@ -47,6 +72,7 @@ class CustomComponent(Component):
user_id: Optional[Union[UUID, str]] = None
status: Optional[Any] = None
"""The status of the component. This is displayed on the frontend. Defaults to None."""
_tree: Optional[dict] = None
def __init__(self, **data):
@ -88,7 +114,9 @@ class CustomComponent(Component):
def tree(self):
return self.get_code_tree(self.code or "")
def to_records(self, data: Any, text_key: str = "text", data_key: str = "data") -> List[dict]:
def to_records(
self, data: Any, text_key: str = "text", data_key: str = "data"
) -> List[dict]:
"""
Convert data into a list of records.
@ -115,7 +143,9 @@ class CustomComponent(Component):
return records
def create_references_from_records(self, records: List[dict], include_data: bool = False) -> str:
def create_references_from_records(
self, records: List[dict], include_data: bool = False
) -> str:
"""
Create references from a list of records.
@ -150,7 +180,8 @@ class CustomComponent(Component):
detail={
"error": "Type hint Error",
"traceback": (
"Prompt type is not supported in the build method." " Try using PromptTemplate instead."
"Prompt type is not supported in the build method."
" Try using PromptTemplate instead."
),
},
)
@ -164,14 +195,20 @@ class CustomComponent(Component):
if not self.code:
return {}
component_classes = [cls for cls in self.tree["classes"] if self.code_class_base_inheritance in cls["bases"]]
component_classes = [
cls
for cls in self.tree["classes"]
if self.code_class_base_inheritance in cls["bases"]
]
if not component_classes:
return {}
# Assume the first Component class is the one we're interested in
component_class = component_classes[0]
build_methods = [
method for method in component_class["methods"] if method["name"] == self.function_entrypoint_name
method
for method in component_class["methods"]
if method["name"] == self.function_entrypoint_name
]
return build_methods[0] if build_methods else {}
@ -228,7 +265,9 @@ class CustomComponent(Component):
# Retrieve and decrypt the credential by name for the current user
db_service = get_db_service()
with session_getter(db_service) as session:
return credential_service.get_credential(user_id=self._user_id or "", name=name, session=session)
return credential_service.get_credential(
user_id=self._user_id or "", name=name, session=session
)
return get_credential
@ -238,7 +277,9 @@ class CustomComponent(Component):
credential_service = get_credential_service()
db_service = get_db_service()
with session_getter(db_service) as session:
return credential_service.list_credentials(user_id=self._user_id, session=session)
return credential_service.list_credentials(
user_id=self._user_id, session=session
)
def index(self, value: int = 0):
"""Returns a function that returns the value at the given index in the iterable."""
@ -289,7 +330,11 @@ class CustomComponent(Component):
if flow_id:
flow = session.query(Flow).get(flow_id)
elif flow_name:
flow = (session.query(Flow).filter(Flow.name == flow_name).filter(Flow.user_id == self.user_id)).first()
flow = (
session.query(Flow)
.filter(Flow.name == flow_name)
.filter(Flow.user_id == self.user_id)
).first()
else:
raise ValueError("Either flow_name or flow_id must be provided")

View file

@ -27,14 +27,18 @@ from langflow.utils import validate
from langflow.utils.util import get_base_classes
def add_output_types(frontend_node: CustomComponentFrontendNode, return_types: List[str]):
def add_output_types(
frontend_node: CustomComponentFrontendNode, return_types: List[str]
):
"""Add output types to the frontend node"""
for return_type in return_types:
if return_type is None:
raise HTTPException(
status_code=400,
detail={
"error": ("Invalid return type. Please check your code and try again."),
"error": (
"Invalid return type. Please check your code and try again."
),
"traceback": traceback.format_exc(),
},
)
@ -63,14 +67,18 @@ def reorder_fields(frontend_node: CustomComponentFrontendNode, field_order: List
frontend_node.template.fields = reordered_fields
def add_base_classes(frontend_node: CustomComponentFrontendNode, return_types: List[str]):
def add_base_classes(
frontend_node: CustomComponentFrontendNode, return_types: List[str]
):
"""Add base classes to the frontend node"""
for return_type_instance in return_types:
if return_type_instance is None:
raise HTTPException(
status_code=400,
detail={
"error": ("Invalid return type. Please check your code and try again."),
"error": (
"Invalid return type. Please check your code and try again."
),
"traceback": traceback.format_exc(),
},
)
@ -145,10 +153,14 @@ def add_new_custom_field(
# If options is a list, then it's a dropdown
# If options is None, then it's a list of strings
is_list = isinstance(field_config.get("options"), list)
field_config["is_list"] = is_list or field_config.get("is_list", False) or field_contains_list
field_config["is_list"] = (
is_list or field_config.get("is_list", False) or field_contains_list
)
if "name" in field_config:
warnings.warn("The 'name' key in field_config is used to build the object and can't be changed.")
warnings.warn(
"The 'name' key in field_config is used to build the object and can't be changed."
)
required = field_config.pop("required", field_required)
placeholder = field_config.pop("placeholder", "")
@ -179,7 +191,9 @@ def add_extra_fields(frontend_node, field_config, function_args):
if "name" not in extra_field or extra_field["name"] == "self":
continue
field_name, field_type, field_value, field_required = get_field_properties(extra_field)
field_name, field_type, field_value, field_required = get_field_properties(
extra_field
)
config = field_config.get(field_name, {})
frontend_node = add_new_custom_field(
frontend_node,
@ -217,7 +231,9 @@ def run_build_config(
raise HTTPException(
status_code=400,
detail={
"error": ("Invalid type convertion. Please check your code and try again."),
"error": (
"Invalid type convertion. Please check your code and try again."
),
"traceback": traceback.format_exc(),
},
) from exc
@ -245,7 +261,9 @@ def run_build_config(
raise HTTPException(
status_code=400,
detail={
"error": ("Invalid type convertion. Please check your code and try again."),
"error": (
"Invalid type convertion. Please check your code and try again."
),
"traceback": traceback.format_exc(),
},
) from exc
@ -300,16 +318,24 @@ def build_custom_component_template(
frontend_node = build_frontend_node(custom_component.template_config)
logger.debug("Updated attributes")
field_config, custom_instance = run_build_config(custom_component, user_id=user_id, update_field=update_field)
field_config, custom_instance = run_build_config(
custom_component, user_id=user_id, update_field=update_field
)
logger.debug("Built field config")
entrypoint_args = custom_component.get_function_entrypoint_args
add_extra_fields(frontend_node, field_config, entrypoint_args)
frontend_node = add_code_field(frontend_node, custom_component.code, field_config.get("code", {}))
frontend_node = add_code_field(
frontend_node, custom_component.code, field_config.get("code", {})
)
add_base_classes(frontend_node, custom_component.get_function_entrypoint_return_type)
add_output_types(frontend_node, custom_component.get_function_entrypoint_return_type)
add_base_classes(
frontend_node, custom_component.get_function_entrypoint_return_type
)
add_output_types(
frontend_node, custom_component.get_function_entrypoint_return_type
)
logger.debug("Added base classes")
reorder_fields(frontend_node, custom_instance._get_field_order())
@ -321,7 +347,9 @@ def build_custom_component_template(
raise HTTPException(
status_code=400,
detail={
"error": ("Invalid type convertion. Please check your code and try again."),
"error": (
"Invalid type convertion. Please check your code and try again."
),
"traceback": traceback.format_exc(),
},
) from exc
@ -345,7 +373,9 @@ def build_custom_components(settings_service):
if not settings_service.settings.COMPONENTS_PATH:
return {}
logger.info(f"Building custom components from {settings_service.settings.COMPONENTS_PATH}")
logger.info(
f"Building custom components from {settings_service.settings.COMPONENTS_PATH}"
)
custom_components_from_file = {}
processed_paths = set()
for path in settings_service.settings.COMPONENTS_PATH:
@ -356,7 +386,9 @@ def build_custom_components(settings_service):
custom_component_dict = build_custom_component_list_from_path(path_str)
if custom_component_dict:
category = next(iter(custom_component_dict))
logger.info(f"Loading {len(custom_component_dict[category])} component(s) from category {category}")
logger.info(
f"Loading {len(custom_component_dict[category])} component(s) from category {category}"
)
custom_components_from_file = merge_nested_dicts_with_renaming(
custom_components_from_file, custom_component_dict
)
@ -400,8 +432,9 @@ def sanitize_field_config(field_config: Dict):
def build_component(component):
"""Build a single component."""
component_name = determine_component_name(component)
logger.debug(f"Building component: {component_name}")
component_template = create_component_template(component)
logger.debug(f"Building component: {component_name, component.get('output_types')}")
return component_name, component_template

View file

@ -1,6 +1,6 @@
import inspect
import json
from typing import TYPE_CHECKING, Any, Callable, Dict, Sequence, Type
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Type
import orjson
from langchain.agents import agent as agent_module
@ -34,16 +34,17 @@ from langflow.utils import validate
if TYPE_CHECKING:
from langflow import CustomComponent
def build_vertex_in_params(params: Dict) -> Dict:
from langflow.graph.edge.base import ContractEdge
from langflow.graph.vertex.base import Vertex
# If any of the values in params is a Vertex, we will build it
return {key: value.build() if isinstance(value, Vertex) else value for key, value in params.items()}
async def instantiate_class(node_type: str, base_type: str, params: Dict, user_id=None) -> Any:
async def instantiate_class(
node_type: str,
base_type: str,
params: Dict,
user_id=None,
vertex: Optional["Vertex"] = None,
) -> Any:
"""Instantiate class from module type and key, and params"""
params = convert_params_to_sets(params)
params = convert_kwargs(params)
@ -55,7 +56,14 @@ async def instantiate_class(node_type: str, base_type: str, params: Dict, user_i
return custom_node(**params)
logger.debug(f"Instantiating {node_type} of type {base_type}")
class_object = import_by_type(_type=base_type, name=node_type)
return await instantiate_based_on_type(class_object, base_type, node_type, params, user_id=user_id)
return await instantiate_based_on_type(
class_object=class_object,
base_type=base_type,
node_type=node_type,
params=params,
user_id=user_id,
vertex=vertex,
)
def convert_params_to_sets(params):
@ -82,7 +90,14 @@ def convert_kwargs(params):
return params
async def instantiate_based_on_type(class_object, base_type, node_type, params, user_id):
async def instantiate_based_on_type(
class_object,
base_type,
node_type,
params,
user_id,
vertex,
):
if base_type == "agents":
return instantiate_agent(node_type, class_object, params)
elif base_type == "prompts":
@ -116,17 +131,32 @@ async def instantiate_based_on_type(class_object, base_type, node_type, params,
elif base_type == "memory":
return instantiate_memory(node_type, class_object, params)
elif base_type == "custom_components":
return await instantiate_custom_component(node_type, class_object, params, user_id)
return await instantiate_custom_component(
node_type,
class_object,
params,
user_id,
vertex,
)
elif base_type == "wrappers":
return instantiate_wrapper(node_type, class_object, params)
else:
return class_object(**params)
async def instantiate_custom_component(node_type, class_object, params, user_id):
async def instantiate_custom_component(
node_type, class_object, params, user_id, vertex
):
params_copy = params.copy()
class_object: Type["CustomComponent"] = eval_custom_component_code(params_copy.pop("code"))
custom_component: "CustomComponent" = class_object(user_id=user_id)
class_object: Type["CustomComponent"] = eval_custom_component_code(
params_copy.pop("code")
)
custom_component: "CustomComponent" = class_object(
user_id=user_id,
parameters=params_copy,
vertex=vertex,
selected_output_type=vertex.selected_output_type,
)
if "retriever" in params_copy and hasattr(params_copy["retriever"], "as_retriever"):
params_copy["retriever"] = params_copy["retriever"].as_retriever()
@ -141,7 +171,7 @@ async def instantiate_custom_component(node_type, class_object, params, user_id)
# Call the build method directly if it's sync
build_result = custom_component.build(**params_copy)
return build_result, {"repr": custom_component.custom_repr()}
return custom_component, build_result, {"repr": custom_component.custom_repr()}
def instantiate_wrapper(node_type, class_object, params):
@ -194,7 +224,9 @@ def instantiate_memory(node_type, class_object, params):
# I want to catch a specific attribute error that happens
# when the object does not have a cursor attribute
except Exception as exc:
if "object has no attribute 'cursor'" in str(exc) or 'object has no field "conn"' in str(exc):
if "object has no attribute 'cursor'" in str(
exc
) or 'object has no field "conn"' in str(exc):
raise AttributeError(
(
"Failed to build connection to database."
@ -237,7 +269,9 @@ def instantiate_agent(node_type, class_object: Type[agent_module.Agent], params:
if class_method := getattr(class_object, method, None):
agent = class_method(**params)
tools = params.get("tools", [])
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, handle_parsing_errors=True)
return AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, handle_parsing_errors=True
)
return load_agent_executor(class_object, params)
@ -293,7 +327,11 @@ def instantiate_embedding(node_type, class_object, params: Dict):
try:
return class_object(**params)
except ValidationError:
params = {key: value for key, value in params.items() if key in class_object.model_fields}
params = {
key: value
for key, value in params.items()
if key in class_object.model_fields
}
return class_object(**params)
@ -305,7 +343,9 @@ def instantiate_vectorstore(class_object: Type[VectorStore], params: Dict):
if "texts" in params:
params["documents"] = params.pop("texts")
if "documents" in params:
params["documents"] = [doc for doc in params["documents"] if isinstance(doc, Document)]
params["documents"] = [
doc for doc in params["documents"] if isinstance(doc, Document)
]
if initializer := vecstore_initializer.get(class_object.__name__):
vecstore = initializer(class_object, params)
else:
@ -320,7 +360,9 @@ def instantiate_vectorstore(class_object: Type[VectorStore], params: Dict):
return vecstore
def instantiate_documentloader(node_type: str, class_object: Type[BaseLoader], params: Dict):
def instantiate_documentloader(
node_type: str, class_object: Type[BaseLoader], params: Dict
):
if "file_filter" in params:
# file_filter will be a string but we need a function
# that will be used to filter the files using file_filter
@ -329,13 +371,17 @@ def instantiate_documentloader(node_type: str, class_object: Type[BaseLoader], p
# in x and if it is, we will return True
file_filter = params.pop("file_filter")
extensions = file_filter.split(",")
params["file_filter"] = lambda x: any(extension.strip() in x for extension in extensions)
params["file_filter"] = lambda x: any(
extension.strip() in x for extension in extensions
)
metadata = params.pop("metadata", None)
if metadata and isinstance(metadata, str):
try:
metadata = orjson.loads(metadata)
except json.JSONDecodeError as exc:
raise ValueError("The metadata you provided is not a valid JSON string.") from exc
raise ValueError(
"The metadata you provided is not a valid JSON string."
) from exc
if node_type == "WebBaseLoader":
if web_path := params.pop("web_path", None):
@ -368,12 +414,16 @@ def instantiate_textsplitter(
"Try changing the chunk_size of the Text Splitter."
) from exc
if ("separator_type" in params and params["separator_type"] == "Text") or "separator_type" not in params:
if (
"separator_type" in params and params["separator_type"] == "Text"
) or "separator_type" not in params:
params.pop("separator_type", None)
# separators might come in as an escaped string like \\n
# so we need to convert it to a string
if "separators" in params:
params["separators"] = params["separators"].encode().decode("unicode-escape")
params["separators"] = (
params["separators"].encode().decode("unicode-escape")
)
text_splitter = class_object(**params)
else:
from langchain.text_splitter import Language
@ -400,7 +450,8 @@ def replace_zero_shot_prompt_with_prompt_template(nodes):
tools = [
tool
for tool in nodes
if tool["type"] != "chatOutputNode" and "Tool" in tool["data"]["node"]["base_classes"]
if tool["type"] != "chatOutputNode"
and "Tool" in tool["data"]["node"]["base_classes"]
]
node["data"] = build_prompt_template(prompt=node["data"], tools=tools)
break
@ -414,7 +465,9 @@ def load_agent_executor(agent_class: type[agent_module.Agent], params, **kwargs)
# agent has hidden args for memory. might need to be support
# memory = params["memory"]
# if allowed_tools is not a list or set, make it a list
if not isinstance(allowed_tools, (list, set)) and isinstance(allowed_tools, BaseTool):
if not isinstance(allowed_tools, (list, set)) and isinstance(
allowed_tools, BaseTool
):
allowed_tools = [allowed_tools]
tool_names = [tool.name for tool in allowed_tools]
# Agent class requires an output_parser but Agent classes
@ -442,7 +495,10 @@ def build_prompt_template(prompt, tools):
format_instructions = prompt["node"]["template"]["format_instructions"]["value"]
tool_strings = "\n".join(
[f"{tool['data']['node']['name']}: {tool['data']['node']['description']}" for tool in tools]
[
f"{tool['data']['node']['name']}: {tool['data']['node']['description']}"
for tool in tools
]
)
tool_names = ", ".join([tool["data"]["node"]["name"] for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)

View file

@ -7,6 +7,9 @@ from langchain.schema import AgentAction, Document
from langchain_community.vectorstores import VectorStore
from langchain_core.messages import AIMessage
from langchain_core.runnables.base import Runnable
from loguru import logger
from pydantic import BaseModel
from langflow.graph.graph.base import Graph
from langflow.graph.vertex.base import Vertex
from langflow.interface.custom.custom_component import CustomComponent
@ -17,8 +20,6 @@ from langflow.interface.run import (
)
from langflow.services.deps import get_session_service
from langflow.services.session.service import SessionService
from loguru import logger
from pydantic import BaseModel
def fix_memory_inputs(langchain_object):
@ -146,7 +147,9 @@ async def process_runnable(runnable: Runnable, inputs: Union[dict, List[dict]]):
elif isinstance(inputs, dict) and hasattr(runnable, "ainvoke"):
result = await runnable.ainvoke(inputs)
else:
raise ValueError(f"Runnable {runnable} does not support inputs of type {type(inputs)}")
raise ValueError(
f"Runnable {runnable} does not support inputs of type {type(inputs)}"
)
# Check if the result is a list of AIMessages
if isinstance(result, list) and all(isinstance(r, AIMessage) for r in result):
result = [r.content for r in result]
@ -155,7 +158,9 @@ async def process_runnable(runnable: Runnable, inputs: Union[dict, List[dict]]):
return result
async def process_inputs_dict(built_object: Union[Chain, VectorStore, Runnable], inputs: dict):
async def process_inputs_dict(
built_object: Union[Chain, VectorStore, Runnable], inputs: dict
):
if isinstance(built_object, Chain):
if inputs is None:
raise ValueError("Inputs must be provided for a Chain")
@ -190,7 +195,9 @@ async def process_inputs_list(built_object: Runnable, inputs: List[dict]):
return await process_runnable(built_object, inputs)
async def generate_result(built_object: Union[Chain, VectorStore, Runnable], inputs: Union[dict, List[dict]]):
async def generate_result(
built_object: Union[Chain, VectorStore, Runnable], inputs: Union[dict, List[dict]]
):
if isinstance(inputs, dict):
result = await process_inputs_dict(built_object, inputs)
elif isinstance(inputs, List) and isinstance(built_object, Runnable):
@ -222,7 +229,9 @@ async def process_graph_cached(
if clear_cache:
session_service.clear_session(session_id)
if session_id is None:
session_id = session_service.generate_key(session_id=session_id, data_graph=data_graph)
session_id = session_service.generate_key(
session_id=session_id, data_graph=data_graph
)
# Load the graph using SessionService
session = await session_service.load_session(session_id, data_graph)
graph, artifacts = session if session else (None, None)
@ -258,14 +267,44 @@ async def build_graph_and_generate_result(
return Result(result=result, session_id=session_id)
def validate_input(graph_data: Dict[str, Any], tweaks: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
async def run_graph(
graph: Union["Graph", dict],
flow_id: str,
session_id: str,
stream: bool,
inputs: Optional[Union[dict, List[dict]]] = None,
artifacts: Optional[Dict[str, Any]] = None,
session_service: Optional[SessionService] = None,
):
"""Run the graph and generate the result"""
if isinstance(graph, dict):
graph_data = graph
graph = Graph.from_payload(graph, flow_id=flow_id)
else:
graph_data = graph._graph_data
if not session_id:
session_id = session_service.generate_key(
session_id=flow_id, data_graph=graph_data
)
outputs = await graph.run(inputs, stream=stream)
if session_id and session_service:
session_service.update_session(session_id, (graph, artifacts))
return outputs, session_id
def validate_input(
graph_data: Dict[str, Any], tweaks: Dict[str, Dict[str, Any]]
) -> List[Dict[str, Any]]:
if not isinstance(graph_data, dict) or not isinstance(tweaks, dict):
raise ValueError("graph_data and tweaks should be dictionaries")
nodes = graph_data.get("data", {}).get("nodes") or graph_data.get("nodes")
if not isinstance(nodes, list):
raise ValueError("graph_data should contain a list of nodes under 'data' key or directly under 'nodes' key")
raise ValueError(
"graph_data should contain a list of nodes under 'data' key or directly under 'nodes' key"
)
return nodes
@ -274,7 +313,9 @@ def apply_tweaks(node: Dict[str, Any], node_tweaks: Dict[str, Any]) -> None:
template_data = node.get("data", {}).get("node", {}).get("template")
if not isinstance(template_data, dict):
logger.warning(f"Template data for node {node.get('id')} should be a dictionary")
logger.warning(
f"Template data for node {node.get('id')} should be a dictionary"
)
return
for tweak_name, tweak_value in node_tweaks.items():
@ -289,7 +330,9 @@ def apply_tweaks_on_vertex(vertex: Vertex, node_tweaks: Dict[str, Any]) -> None:
vertex.params[tweak_name] = tweak_value
def process_tweaks(graph_data: Dict[str, Any], tweaks: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
def process_tweaks(
graph_data: Dict[str, Any], tweaks: Dict[str, Dict[str, Any]]
) -> Dict[str, Any]:
"""
This function is used to tweak the graph data using the node id and the tweaks dict.
@ -310,7 +353,9 @@ def process_tweaks(graph_data: Dict[str, Any], tweaks: Dict[str, Dict[str, Any]]
if node_tweaks := tweaks.get(node_id):
apply_tweaks(node, node_tweaks)
else:
logger.warning("Each node should be a dictionary with an 'id' key of type str")
logger.warning(
"Each node should be a dictionary with an 'id' key of type str"
)
return graph_data
@ -322,6 +367,8 @@ def process_tweaks_on_graph(graph: Graph, tweaks: Dict[str, Dict[str, Any]]):
if node_tweaks := tweaks.get(node_id):
apply_tweaks_on_vertex(vertex, node_tweaks)
else:
logger.warning("Each node should be a Vertex with an 'id' attribute of type str")
logger.warning(
"Each node should be a Vertex with an 'id' attribute of type str"
)
return graph

View file

@ -1,12 +1,13 @@
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, Union
import duckdb
from langflow.services.deps import get_monitor_service
from loguru import logger
from pydantic import BaseModel
from langflow.services.deps import get_monitor_service
if TYPE_CHECKING:
from langflow.api.v1.schemas import ResultData
from langflow.api.v1.schemas import ResultDataResponse
INDEX_KEY = "index"
@ -45,7 +46,9 @@ def model_to_sql_column_definitions(model: Type[BaseModel]) -> dict:
return columns
def drop_and_create_table_if_schema_mismatch(db_path: str, table_name: str, model: Type[BaseModel]):
def drop_and_create_table_if_schema_mismatch(
db_path: str, table_name: str, model: Type[BaseModel]
):
with duckdb.connect(db_path) as conn:
# Get the current schema from the database
try:
@ -66,8 +69,12 @@ def drop_and_create_table_if_schema_mismatch(db_path: str, table_name: str, mode
conn.execute(f"CREATE SEQUENCE seq_{table_name} START 1;")
except duckdb.CatalogException:
pass
desired_schema[INDEX_KEY] = f"INTEGER PRIMARY KEY DEFAULT NEXTVAL('seq_{table_name}')"
columns_sql = ", ".join(f"{name} {data_type}" for name, data_type in desired_schema.items())
desired_schema[INDEX_KEY] = (
f"INTEGER PRIMARY KEY DEFAULT NEXTVAL('seq_{table_name}')"
)
columns_sql = ", ".join(
f"{name} {data_type}" for name, data_type in desired_schema.items()
)
create_table_sql = f"CREATE TABLE {table_name} ({columns_sql})"
conn.execute(create_table_sql)
@ -138,7 +145,7 @@ async def log_vertex_build(
vertex_id: str,
valid: bool,
params: Any,
data: "ResultData",
data: "ResultDataResponse",
artifacts: Optional[dict] = None,
):
try:

View file

@ -2,14 +2,15 @@ import time
from typing import Callable
import socketio
from sqlmodel import select
from langflow.api.utils import format_elapsed_time
from langflow.api.v1.schemas import ResultData, VertexBuildResponse
from langflow.api.v1.schemas import ResultDataResponse, VertexBuildResponse
from langflow.graph.graph.base import Graph
from langflow.graph.vertex.base import StatelessVertex
from langflow.services.database.models.flow.model import Flow
from langflow.services.deps import get_session
from langflow.services.monitor.utils import log_vertex_build
from sqlmodel import select
def set_socketio_server(socketio_server):
@ -73,7 +74,7 @@ async def build_vertex(
artifacts = vertex.artifacts
timedelta = time.perf_counter() - start_time
duration = format_elapsed_time(timedelta)
result_dict = ResultData(
result_dict = ResultDataResponse(
results=result_dict,
artifacts=artifacts,
duration=duration,
@ -82,7 +83,7 @@ async def build_vertex(
except Exception as exc:
params = str(exc)
valid = False
result_dict = ResultData(results={})
result_dict = ResultDataResponse(results={})
artifacts = {}
set_cache(flow_id, graph)
await log_vertex_build(
@ -95,7 +96,9 @@ async def build_vertex(
)
# Emit the vertex build response
response = VertexBuildResponse(valid=valid, params=params, id=vertex.id, data=result_dict)
response = VertexBuildResponse(
valid=valid, params=params, id=vertex.id, data=result_dict
)
await sio.emit("vertex_build", data=response.model_dump(), to=sid)
except Exception as exc:

View file

@ -47,6 +47,12 @@ class FrontendNode(BaseModel):
"""Description of the frontend node."""
icon: Optional[str] = None
"""Icon of the frontend node."""
is_input: Optional[bool] = None
"""Whether the frontend node is used as an input when processing the Graph.
If True, there should be a field named 'input_value'."""
is_output: Optional[bool] = None
"""Whether the frontend node is used as an output when processing the Graph.
If True, there should be a field named 'input_value'."""
is_composition: Optional[bool] = None
"""Whether the frontend node is used for composition."""
base_classes: List[str]
@ -165,7 +171,9 @@ class FrontendNode(BaseModel):
return _type
@staticmethod
def handle_special_field(field, key: str, _type: str, SPECIAL_FIELD_HANDLERS) -> str:
def handle_special_field(
field, key: str, _type: str, SPECIAL_FIELD_HANDLERS
) -> str:
"""Handles special field by using the respective handler if present."""
handler = SPECIAL_FIELD_HANDLERS.get(key)
return handler(field) if handler else _type
@ -176,7 +184,11 @@ class FrontendNode(BaseModel):
if "dict" in _type.lower() and field.name == "dict_":
field.field_type = "file"
field.file_types = [".json", ".yaml", ".yml"]
elif _type.startswith("Dict") or _type.startswith("Mapping") or _type.startswith("dict"):
elif (
_type.startswith("Dict")
or _type.startswith("Mapping")
or _type.startswith("dict")
):
field.field_type = "dict"
return _type
@ -187,7 +199,9 @@ class FrontendNode(BaseModel):
field.value = value["default"]
@staticmethod
def handle_specific_field_values(field: TemplateField, key: str, name: Optional[str] = None) -> None:
def handle_specific_field_values(
field: TemplateField, key: str, name: Optional[str] = None
) -> None:
"""Handles specific field values for certain fields."""
if key == "headers":
field.value = """{"Authorization": "Bearer <token>"}"""
@ -195,7 +209,9 @@ class FrontendNode(BaseModel):
FrontendNode._handle_api_key_specific_field_values(field, key, name)
@staticmethod
def _handle_model_specific_field_values(field: TemplateField, key: str, name: Optional[str] = None) -> None:
def _handle_model_specific_field_values(
field: TemplateField, key: str, name: Optional[str] = None
) -> None:
"""Handles specific field values related to models."""
model_dict = {
"OpenAI": constants.OPENAI_MODELS,
@ -208,7 +224,9 @@ class FrontendNode(BaseModel):
field.is_list = True
@staticmethod
def _handle_api_key_specific_field_values(field: TemplateField, key: str, name: Optional[str] = None) -> None:
def _handle_api_key_specific_field_values(
field: TemplateField, key: str, name: Optional[str] = None
) -> None:
"""Handles specific field values related to API keys."""
if "api_key" in key and "OpenAI" in str(name):
field.display_name = "OpenAI API Key"
@ -248,7 +266,10 @@ class FrontendNode(BaseModel):
@staticmethod
def should_be_password(key: str, show: bool) -> bool:
"""Determines whether the field should be a password field."""
return any(text in key.lower() for text in {"password", "token", "api", "key"}) and show
return (
any(text in key.lower() for text in {"password", "token", "api", "key"})
and show
)
@staticmethod
def should_be_multiline(key: str) -> bool:

View file

@ -1,34 +0,0 @@
from typing import Any, Callable, Optional, Union
from langchain_core.prompts import PromptTemplate as LCPromptTemplate
from langflow.utils.prompt import GenericPromptTemplate
from llama_index.prompts import PromptTemplate as LIPromptTemplate
PromptTemplate = Union[LCPromptTemplate, LIPromptTemplate]
class ChatDefinition:
def __init__(
self,
func: Callable,
inputs: list[str],
output_key: Optional[str] = None,
prompt_template: Optional[PromptTemplate] = None,
):
self.func = func
self.input_keys = inputs
self.output_key = output_key
self.prompt_template = prompt_template
@classmethod
def from_prompt_template(cls, prompt_template: PromptTemplate, func: Callable, output_key: Optional[str] = None):
prompt = GenericPromptTemplate(prompt_template)
return cls(
func=func,
inputs=prompt.input_keys,
output_key=output_key,
prompt_template=prompt_template,
)
def __call__(self, inputs: dict, callbacks: Optional[Any] = None) -> dict:
return self.func(inputs, callbacks)

View file

@ -25,7 +25,10 @@ def patching(record):
def configure(log_level: Optional[str] = None, log_file: Optional[Path] = None):
if os.getenv("LANGFLOW_LOG_LEVEL") in VALID_LOG_LEVELS and log_level is None:
if (
os.getenv("LANGFLOW_LOG_LEVEL", "").upper() in VALID_LOG_LEVELS
and log_level is None
):
log_level = os.getenv("LANGFLOW_LOG_LEVEL")
if log_level is None:
log_level = "INFO"

View file

@ -1,58 +0,0 @@
from typing import Any, Union
from langchain_core.prompts import PromptTemplate as LCPromptTemplate
from llama_index.prompts import PromptTemplate as LIPromptTemplate
PromptTemplateTypes = Union[LCPromptTemplate, LIPromptTemplate]
class GenericPromptTemplate:
def __init__(self, prompt_template: PromptTemplateTypes):
object.__setattr__(self, "prompt_template", prompt_template)
@property
def input_keys(self):
prompt_template = object.__getattribute__(self, "prompt_template")
if isinstance(prompt_template, LCPromptTemplate):
return prompt_template.input_variables
elif isinstance(prompt_template, LIPromptTemplate):
return prompt_template.template_vars
else:
raise TypeError(f"Unknown prompt template type {type(prompt_template)}")
def to_lc_prompt(self):
prompt_template = object.__getattribute__(self, "prompt_template")
if isinstance(prompt_template, LCPromptTemplate):
return prompt_template
elif isinstance(prompt_template, LIPromptTemplate):
return LCPromptTemplate.from_template(prompt_template.get_template())
else:
raise TypeError(f"Unknown prompt template type {type(prompt_template)}")
def to_li_prompt(self):
prompt_template = object.__getattribute__(self, "prompt_template")
if isinstance(prompt_template, LIPromptTemplate):
return prompt_template
elif isinstance(prompt_template, LCPromptTemplate):
return LIPromptTemplate(template=prompt_template.template)
else:
raise TypeError(f"Unknown prompt template type {type(prompt_template)}")
def __or__(self, other):
prompt_template = object.__getattribute__(self, "prompt_template")
if isinstance(prompt_template, LIPromptTemplate):
return self.to_lc_prompt() | other
else:
raise TypeError(f"Unknown prompt template type {type(other)}")
def __getattribute__(self, name: str) -> Any:
if name in {
"input_keys",
"to_lc_prompt",
"to_li_prompt",
"__or__",
"prompt_template",
}:
return object.__getattribute__(self, name)
prompt_template = object.__getattribute__(self, "prompt_template")
return getattr(prompt_template, name)

View file

@ -45,7 +45,9 @@ def validate_code(code):
# Evaluate the function definition
for node in tree.body:
if isinstance(node, ast.FunctionDef):
code_obj = compile(ast.Module(body=[node], type_ignores=[]), "<string>", "exec")
code_obj = compile(
ast.Module(body=[node], type_ignores=[]), "<string>", "exec"
)
try:
exec(code_obj)
except Exception as e:
@ -89,15 +91,23 @@ def execute_function(code, function_name, *args, **kwargs):
exec_globals,
locals(),
)
exec_globals[alias.asname or alias.name] = importlib.import_module(alias.name)
exec_globals[alias.asname or alias.name] = importlib.import_module(
alias.name
)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(f"Module {alias.name} not found. Please install it and try again.") from e
raise ModuleNotFoundError(
f"Module {alias.name} not found. Please install it and try again."
) from e
function_code = next(
node for node in module.body if isinstance(node, ast.FunctionDef) and node.name == function_name
node
for node in module.body
if isinstance(node, ast.FunctionDef) and node.name == function_name
)
function_code.parent = None
code_obj = compile(ast.Module(body=[function_code], type_ignores=[]), "<string>", "exec")
code_obj = compile(
ast.Module(body=[function_code], type_ignores=[]), "<string>", "exec"
)
try:
exec(code_obj, exec_globals, locals())
except Exception as exc:
@ -124,15 +134,23 @@ def create_function(code, function_name):
if isinstance(node, ast.Import):
for alias in node.names:
try:
exec_globals[alias.asname or alias.name] = importlib.import_module(alias.name)
exec_globals[alias.asname or alias.name] = importlib.import_module(
alias.name
)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(f"Module {alias.name} not found. Please install it and try again.") from e
raise ModuleNotFoundError(
f"Module {alias.name} not found. Please install it and try again."
) from e
function_code = next(
node for node in module.body if isinstance(node, ast.FunctionDef) and node.name == function_name
node
for node in module.body
if isinstance(node, ast.FunctionDef) and node.name == function_name
)
function_code.parent = None
code_obj = compile(ast.Module(body=[function_code], type_ignores=[]), "<string>", "exec")
code_obj = compile(
ast.Module(body=[function_code], type_ignores=[]), "<string>", "exec"
)
with contextlib.suppress(Exception):
exec(code_obj, exec_globals, locals())
exec_globals[function_name] = locals()[function_name]
@ -194,9 +212,13 @@ def prepare_global_scope(code, module):
if isinstance(node, ast.Import):
for alias in node.names:
try:
exec_globals[alias.asname or alias.name] = importlib.import_module(alias.name)
exec_globals[alias.asname or alias.name] = importlib.import_module(
alias.name
)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(f"Module {alias.name} not found. Please install it and try again.") from e
raise ModuleNotFoundError(
f"Module {alias.name} not found. Please install it and try again."
) from e
elif isinstance(node, ast.ImportFrom) and node.module is not None:
try:
imported_module = importlib.import_module(node.module)
@ -217,7 +239,11 @@ def extract_class_code(module, class_name):
:param class_name: Name of the class to extract
:return: AST node of the specified class
"""
class_code = next(node for node in module.body if isinstance(node, ast.ClassDef) and node.name == class_name)
class_code = next(
node
for node in module.body
if isinstance(node, ast.ClassDef) and node.name == class_name
)
class_code.parent = None
return class_code
@ -230,7 +256,9 @@ def compile_class_code(class_code):
:param class_code: AST node of the class
:return: Compiled code object of the class
"""
code_obj = compile(ast.Module(body=[class_code], type_ignores=[]), "<string>", "exec")
code_obj = compile(
ast.Module(body=[class_code], type_ignores=[]), "<string>", "exec"
)
return code_obj
@ -274,7 +302,9 @@ def get_default_imports(code_string):
langflow_imports = list(CUSTOM_COMPONENT_SUPPORTED_TYPES.keys())
necessary_imports = find_names_in_code(code_string, langflow_imports)
langflow_module = importlib.import_module("langflow.field_typing")
default_imports.update({name: getattr(langflow_module, name) for name in necessary_imports})
default_imports.update(
{name: getattr(langflow_module, name) for name in necessary_imports}
)
return default_imports

View file

@ -19,6 +19,8 @@ import { Button } from "../../../../components/ui/button";
import {
LANGFLOW_SUPPORTED_TYPES,
TOOLTIP_EMPTY,
inputHandleHover,
outputHandleHover,
} from "../../../../constants/constants";
import { postCustomComponentUpdate } from "../../../../controllers/API";
import useAlertStore from "../../../../stores/alertStore";
@ -182,8 +184,8 @@ export default function ParameterComponent({
{index === 0 && (
<span>
{left
? "Avaliable input components:"
: "Avaliable output components:"}
? inputHandleHover
: outputHandleHover}
</span>
)}
<span
@ -265,7 +267,7 @@ export default function ParameterComponent({
<div className="flex">
<ShadTooltip
styleClasses={"tooltip-fixed-width custom-scroll nowheel"}
delayDuration={0}
delayDuration={1000}
content={refHtml.current}
side={left ? "left" : "right"}
>

View file

@ -8,7 +8,11 @@ import Checkmark from "../../components/ui/checkmark";
import Loading from "../../components/ui/loading";
import { Textarea } from "../../components/ui/textarea";
import Xmark from "../../components/ui/xmark";
import { priorityFields } from "../../constants/constants";
import {
priorityFields,
statusBuild,
statusBuilding,
} from "../../constants/constants";
import { BuildStatus } from "../../constants/enums";
import NodeToolbarComponent from "../../pages/FlowPage/components/nodeToolbarComponent";
import { useDarkStore } from "../../stores/darkStore";
@ -135,7 +139,7 @@ export default function GenericNode({
const iconName =
iconElement || (data.node?.flow ? "group_components" : name);
const iconClassName = `generic-node-icon ${
!showNode ? "absolute inset-x-6 h-12 w-12" : ""
!showNode ? " absolute inset-x-6 h-12 w-12 " : ""
}`;
if (iconElement && isEmoji) {
return nodeIconFragment(iconElement);
@ -211,9 +215,7 @@ export default function GenericNode({
return "inactive-status";
}
if (buildStatus === BuildStatus.BUILT && isInvalid) {
return isDark
? "border-none ring ring-[#751C1C]"
: "built-invalid-status";
return isDark ? "built-invalid-status-dark" : "built-invalid-status";
} else if (buildStatus === BuildStatus.BUILDING) {
return "building-status";
} else {
@ -295,7 +297,7 @@ export default function GenericNode({
<div
className={
"generic-node-title-arrangement rounded-full" +
(!showNode && "justify-center")
(!showNode && " justify-center ")
}
>
{iconNodeRender()}
@ -331,22 +333,21 @@ export default function GenericNode({
) : (
<ShadTooltip content={data.node?.display_name}>
<div className="group flex items-center gap-2.5">
<ShadTooltip content={data.node?.display_name}>
<div
onDoubleClick={(event) => {
if (nameEditable) {
setInputName(true);
}
takeSnapshot();
event.stopPropagation();
event.preventDefault();
}}
data-testid={"title-" + data.node?.display_name}
className="generic-node-tooltip-div text-primary"
>
{data.node?.display_name}
</div>
</ShadTooltip>
<div
onDoubleClick={(event) => {
if (nameEditable) {
setInputName(true);
}
takeSnapshot();
event.stopPropagation();
event.preventDefault();
}}
data-testid={"title-" + data.node?.display_name}
className="generic-node-tooltip-div text-primary"
>
{data.node?.display_name}
</div>
{nameEditable && (
<div
onClick={(event) => {
@ -471,17 +472,17 @@ export default function GenericNode({
<ShadTooltip
content={
buildStatus === BuildStatus.BUILDING ? (
<span>Building...</span>
<span> {statusBuilding} </span>
) : !validationStatus ? (
<span className="flex">Build to validate status.</span>
<span className="flex">{statusBuild}</span>
) : (
<div className="max-h-96 overflow-auto">
{typeof validationStatus.params === "string"
? (`${durationString}\n${validationStatus.params}`
? `${durationString}\n${validationStatus.params}`
.split("\n")
.map((line, index) => (
<div key={index}>{line}</div>
)))
))
: durationString}
</div>
)

View file

@ -1,3 +1,4 @@
import { Cross2Icon } from "@radix-ui/react-icons";
import { useState } from "react";
import IconComponent from "../../components/genericIconComponent";
import {
@ -8,6 +9,7 @@ import {
import useAlertStore from "../../stores/alertStore";
import { AlertDropdownType } from "../../types/alerts";
import SingleAlert from "./components/singleAlertComponent";
import { zeroNotifications } from "../../constants/constants";
export default function AlertDropdown({
children,
@ -45,15 +47,15 @@ export default function AlertDropdown({
setTimeout(clearNotificationList, 100);
}}
>
<IconComponent name="Trash2" className="h-[1.1rem] w-[1.1rem]" />
<IconComponent name="Trash2" className="h-4 w-4" />
</button>
<button
className="text-foreground hover:text-status-red"
className="text-foreground opacity-70 hover:opacity-100"
onClick={() => {
setOpen(false);
}}
>
<IconComponent name="X" className="h-5 w-5" />
<Cross2Icon className="h-4 w-4" />
</button>
</div>
</div>
@ -68,7 +70,7 @@ export default function AlertDropdown({
))
) : (
<div className="flex h-full w-full items-center justify-center pb-16 text-ring">
No new notifications
{zeroNotifications}
</div>
)}
</div>

View file

@ -1,6 +1,6 @@
import { cloneDeep } from "lodash";
import { useEffect, useState } from "react";
import { CHAT_FORM_DIALOG_SUBTITLE } from "../../constants/constants";
import { CHAT_FORM_DIALOG_SUBTITLE, outputsModalTitle, textInputModalTitle } from "../../constants/constants";
import BaseModal from "../../modals/baseModal";
import useAlertStore from "../../stores/alertStore";
import useFlowStore from "../../stores/flowStore";
@ -56,7 +56,7 @@ export default function IOView({ children, open, setOpen }): JSX.Element {
const chatInputNode = nodes.find((node) => node.id === chatInput?.id);
if (chatInputNode) {
let newNode = cloneDeep(chatInputNode);
newNode.data.node!.template["message"].value = chatValue;
newNode.data.node!.template["input_value"].value = chatValue;
setNode(chatInput!.id, newNode);
}
for (let i = 0; i < count; i++) {
@ -97,14 +97,14 @@ export default function IOView({ children, open, setOpen }): JSX.Element {
{selectedTab !== 0 && (
<div
className={cn(
"mr-6 flex h-full w-2/6 flex-shrink-0 flex-col justify-start overflow-auto scrollbar-hide",
"mr-6 flex h-full w-2/6 flex-shrink-0 flex-col justify-start",
haveChat ? "w-2/6" : "w-full"
)}
>
<Tabs
value={selectedTab.toString()}
className={
"flex h-full flex-col overflow-hidden rounded-md border bg-muted text-center"
"flex h-full flex-col overflow-y-auto custom-scroll rounded-md border bg-muted text-center"
}
onValueChange={(value) => {
setSelectedTab(Number(value));
@ -127,7 +127,7 @@ export default function IOView({ children, open, setOpen }): JSX.Element {
>
<div className="mx-2 mb-2 flex items-center gap-2 text-sm font-bold">
<IconComponent className="h-4 w-4" name={"Type"} />
Text Inputs
{textInputModalTitle}
</div>
{nodes
.filter((node) =>
@ -188,7 +188,7 @@ export default function IOView({ children, open, setOpen }): JSX.Element {
>
<div className="mx-2 mb-2 flex items-center gap-2 text-sm font-bold">
<IconComponent className="h-4 w-4" name={"Braces"} />
Prompt Outputs
{outputsModalTitle}
</div>
{nodes
.filter((node) =>

View file

@ -8,7 +8,7 @@ import useFlowStore from "../../../stores/flowStore";
import { validateNodes } from "../../../utils/reactflowUtils";
import RadialProgressComponent from "../../RadialProgress";
import IconComponent from "../../genericIconComponent";
import { MISSED_ERROR_ALERT } from "../../../alerts_constants";
import { MISSED_ERROR_ALERT } from "../../../constants/alerts_constants";
export default function BuildTrigger({
open,

View file

@ -181,7 +181,7 @@ export default function CodeTabsComponent({
{tabs.map((tab, idx) => (
<TabsContent
value={idx.toString()}
className="api-modal-tabs-content"
className="api-modal-tabs-content overflow-hidden"
key={idx} // Remember to add a unique key prop
>
{idx < 4 ? (

View file

@ -17,6 +17,7 @@ import { cn } from "../../../../utils/utils";
import ShadTooltip from "../../../ShadTooltipComponent";
import IconComponent from "../../../genericIconComponent";
import { Button } from "../../../ui/button";
import { savedHover } from "../../../../constants/constants";
export const MenuBar = ({
removeFunction,
@ -127,7 +128,7 @@ export const MenuBar = ({
</div>
<ShadTooltip
content={
"Last saved at " +
savedHover +
new Date(currentFlow.updated_at ?? "").toLocaleString("en-US", {
hour: "numeric",
minute: "numeric",

View file

@ -4,7 +4,7 @@ import useAlertStore from "../../stores/alertStore";
import useFlowsManagerStore from "../../stores/flowsManagerStore";
import { FileComponentType } from "../../types/components";
import IconComponent from "../genericIconComponent";
import { CONSOLE_ERROR_MSG, CONSOLE_SUCCESS_MSG, INVALID_FILE_ALERT } from "../../alerts_constants";
import { CONSOLE_ERROR_MSG, CONSOLE_SUCCESS_MSG, INVALID_FILE_ALERT } from "../../constants/alerts_constants";
export default function InputFileComponent({
value,

View file

@ -3,6 +3,7 @@ import IconComponent from "../../../components/genericIconComponent";
import { Textarea } from "../../../components/ui/textarea";
import { chatInputType } from "../../../types/components";
import { classNames } from "../../../utils/utils";
import { chatInputPlaceholder, chatInputPlaceholderSend } from "../../../constants/constants";
export default function ChatInput({
lockChat,
@ -72,8 +73,8 @@ export default function ChatInput({
)}
placeholder={
noInput
? "No chat input variables found. Click to run your flow."
: "Send a message..."
? chatInputPlaceholder
: chatInputPlaceholderSend
}
/>
<div className="form-modal-send-icon-position">

View file

@ -1,5 +1,5 @@
import Convert from "ansi-to-html";
import { useEffect, useMemo, useState } from "react";
import { useEffect, useMemo, useRef, useState } from "react";
import Markdown from "react-markdown";
import rehypeMathjax from "rehype-mathjax";
import remarkGfm from "remark-gfm";
@ -9,6 +9,7 @@ import Robot from "../../../assets/robot.png";
import SanitizedHTMLWrapper from "../../../components/SanitizedHTMLWrapper";
import CodeTabsComponent from "../../../components/codeTabsComponent";
import IconComponent from "../../../components/genericIconComponent";
import useFlowStore from "../../../stores/flowStore";
import { chatMessagePropsType } from "../../../types/components";
import { classNames } from "../../../utils/utils";
import FileCard from "../fileComponent";
@ -18,14 +19,26 @@ export default function ChatMessage({
lockChat,
lastMessage,
updateChat,
setLockChat,
}: chatMessagePropsType): JSX.Element {
const convert = new Convert({ newline: true });
const [hidden, setHidden] = useState(true);
const template = chat.template;
const [promptOpen, setPromptOpen] = useState(false);
const [streamUrl, setStreamUrl] = useState(chat.stream_url);
const [chatMessage, setChatMessage] = useState(chat.message.toString());
// We need to check if message is not undefined because
// we need to run .toString() on it
const chatMessageString = chat.message ? chat.message.toString() : "";
const [chatMessage, setChatMessage] = useState(chatMessageString);
const [isStreaming, setIsStreaming] = useState(false);
const eventSource = useRef<EventSource | undefined>(undefined);
const updateFlowPool = useFlowStore((state) => state.updateFlowPool);
const chatMessageRef = useRef(chatMessage);
// Sync ref with state
useEffect(() => {
chatMessageRef.current = chatMessage;
}, [chatMessage]);
// The idea now is that chat.stream_url MAY be a URL if we should stream the output of the chat
// probably the message is empty when we have a stream_url
@ -33,49 +46,50 @@ export default function ChatMessage({
const streamChunks = (url: string) => {
setIsStreaming(true); // Streaming starts
return new Promise<boolean>((resolve, reject) => {
const eventSource = new EventSource(url);
eventSource.onmessage = (event) => {
eventSource.current = new EventSource(url);
eventSource.current.onmessage = (event) => {
let parsedData = JSON.parse(event.data);
if (parsedData.chunk) {
setChatMessage((prev) => prev + parsedData.chunk);
}
};
eventSource.onerror = (event) => {
eventSource.current.onerror = (event) => {
setIsStreaming(false);
eventSource.current?.close();
setStreamUrl(undefined);
reject(new Error("Streaming failed"));
setIsStreaming(false);
eventSource.close();
};
eventSource.addEventListener("close", (event) => {
setStreamUrl(null); // Update state to reflect the stream is closed
resolve(true);
eventSource.current.addEventListener("close", (event) => {
setStreamUrl(undefined); // Update state to reflect the stream is closed
eventSource.current?.close();
setIsStreaming(false);
eventSource.close();
resolve(true);
});
});
};
useEffect(() => {
if (streamUrl && chat.message === "") {
if (streamUrl && !isStreaming) {
setLockChat(true);
streamChunks(streamUrl)
.then(() => {
setLockChat(false);
if (updateChat) {
updateChat(chat, chatMessage, streamUrl);
updateChat(chat, chatMessageRef.current);
}
})
.catch((error) => {
console.error(error);
setLockChat(false);
});
}
}, [streamUrl]);
}, [streamUrl, chatMessage]);
useEffect(() => {
// This effect is specifically for calling updateChat after streaming ends
if (!isStreaming && streamUrl) {
if (updateChat) {
updateChat(chat, chatMessage, streamUrl);
}
}
}, [isStreaming]);
return () => {
eventSource.current?.close();
};
}, []);
useEffect(() => {
const element = document.getElementById("last-chat-message");
@ -146,7 +160,7 @@ export default function ChatMessage({
<div className="w-full">
{useMemo(
() =>
chat.message.toString() === "" && lockChat ? (
chatMessage === "" && lockChat ? (
<IconComponent
name="MoreHorizontal"
className="h-8 w-8 animate-pulse"
@ -288,11 +302,11 @@ dark:prose-invert"
}
return <p>{parts}</p>;
})
: chat.message.toString()}
: chatMessage}
</span>
</>
) : (
<span>{chat.message.toString()}</span>
<span>{chatMessage}</span>
)}
</div>
)}

View file

@ -1,6 +1,10 @@
import _ from "lodash";
import { useEffect, useRef, useState } from "react";
import IconComponent from "../../components/genericIconComponent";
import { NOCHATOUTPUT_NOTICE_ALERT } from "../../constants/alerts_constants";
import {
chatFirstInitialText,
chatSecondInitialText,
} from "../../constants/constants";
import { deleteFlowPool } from "../../controllers/API";
import useAlertStore from "../../stores/alertStore";
import useFlowStore from "../../stores/flowStore";
@ -14,7 +18,6 @@ import {
import { classNames } from "../../utils/utils";
import ChatInput from "./chatInput";
import ChatMessage from "./chatMessage";
import { INFO_MISSING_ALERT, NOCHATOUTPUT_NOTICE_ALERT } from "../../alerts_constants";
export default function NewChatView({
sendMessage,
@ -33,6 +36,7 @@ export default function NewChatView({
const inputIds = inputs.map((obj) => obj.id);
const outputIds = outputs.map((obj) => obj.id);
const outputTypes = outputs.map((obj) => obj.type);
const updateFlowPool = useFlowStore((state) => state.updateFlowPool);
useEffect(() => {
if (!outputTypes.includes("ChatOutput")) {
@ -66,14 +70,12 @@ export default function NewChatView({
const { sender, message, sender_name, stream_url } = output.data
.artifacts as ChatOutputType;
const componentId = output.id + index;
const is_ai = sender === "Machine" || sender === null;
return {
isSend: !is_ai,
message: message,
sender_name,
id: componentId,
componentId: output.id,
stream_url: stream_url,
};
} catch (e) {
@ -82,7 +84,7 @@ export default function NewChatView({
isSend: false,
message: "Error parsing message",
sender_name: "Error",
id: output.id + index,
componentId: output.id,
};
}
});
@ -119,27 +121,28 @@ export default function NewChatView({
function updateChat(
chat: ChatMessageType,
message: string,
stream_url: string | null
stream_url?: string
) {
if (message === "") return;
console.log(`updateChat: ${message}`);
console.log("chatHistory:", chatHistory);
chat.message = message;
chat.stream_url = stream_url;
// chat is one of the chatHistory
setChatHistory((oldChatHistory) => {
const index = oldChatHistory.findIndex((ch) => ch.id === chat.id);
if (index === -1) return oldChatHistory;
let newChatHistory = _.cloneDeep(oldChatHistory);
newChatHistory = [
...newChatHistory.slice(0, index),
chat,
...newChatHistory.slice(index + 1),
];
console.log("newChatHistory:", newChatHistory);
return newChatHistory;
updateFlowPool(chat.componentId, {
message,
sender_name: chat.sender_name ?? "Bot",
sender: chat.isSend ? "User" : "Machine",
});
// setChatHistory((oldChatHistory) => {
// const index = oldChatHistory.findIndex((ch) => ch.id === chat.id);
// if (index === -1) return oldChatHistory;
// let newChatHistory = _.cloneDeep(oldChatHistory);
// newChatHistory = [
// ...newChatHistory.slice(0, index),
// chat,
// ...newChatHistory.slice(index + 1),
// ];
// console.log("newChatHistory:", newChatHistory);
// return newChatHistory;
// });
}
return (
@ -163,10 +166,11 @@ export default function NewChatView({
{chatHistory?.length > 0 ? (
chatHistory.map((chat, index) => (
<ChatMessage
setLockChat={setLockChat}
lockChat={lockChat}
chat={chat}
lastMessage={chatHistory.length - 1 === index ? true : false}
key={`${chat.id}-${index}`}
key={`${chat.componentId}-${index}`}
updateChat={updateChat}
/>
))
@ -178,14 +182,14 @@ export default function NewChatView({
<br />
<div className="langflow-chat-desc">
<span className="langflow-chat-desc-span">
Start a conversation and click the agent's thoughts{" "}
{chatFirstInitialText}{" "}
<span>
<IconComponent
name="MessageSquare"
className="mx-1 inline h-5 w-5 animate-bounce "
/>
</span>{" "}
to inspect the chaining process.
{chatSecondInitialText}
</span>
</div>
</div>

View file

@ -4,6 +4,7 @@ import GenericModal from "../../modals/genericModal";
import { TextAreaComponentType } from "../../types/components";
import IconComponent from "../genericIconComponent";
import { Input } from "../ui/input";
import { editTextModalTitle } from "../../constants/constants";
export default function TextAreaComponent({
value,
@ -37,7 +38,7 @@ export default function TextAreaComponent({
<GenericModal
type={TypeModal.TEXT}
buttonText="Finish Editing"
modalTitle="Edit Text"
modalTitle={editTextModalTitle}
value={value}
setValue={(value: string) => {
onChange(value);

View file

@ -682,3 +682,35 @@ export const priorityFields = new Set(["code", "template"]);
export const INPUT_TYPES = new Set(["ChatInput", "TextInput"]);
export const OUTPUT_TYPES = new Set(["ChatOutput", "TextOutput"]);
export const chatFirstInitialText = "Start a conversation and click the agent's thoughts";
export const chatSecondInitialText = "to inspect the chaining process.";
export const zeroNotifications = "No new notifications";
export const successBuild = "Built sucessfully ✨";
export const alertSaveWApi = "Caution: Uncheck this box only removes API keys from fields specifically designated for API keys."
export const saveWApiCheckbox = "Save with my API keys";
export const editTextModalTitle = "Edit Text"
export const editTextPlaceholder = "Type message here."
export const inputHandleHover = "Avaliable input components:";
export const outputHandleHover = "Avaliable output components:";
export const textInputModalTitle = "Text Inputs";
export const outputsModalTitle = "Prompt Outputs"
export const langflowChatTitle = "Langflow Chat"
export const chatInputPlaceholder = "No chat input variables found. Click to run your flow."
export const chatInputPlaceholderSend = "Send a message..."
export const editCodeTitle = "Edit Code"
export const myCollectionDesc = "Manage your personal projects. Download or upload your collection."
export const storeDesc = "Search flows and components from the community.";
export const storeTitle = "Langflow Store"
export const noApi = "You don't have an API key. "
export const insertApi = "Insert your Langflow API key."
export const invalidApi = "Your API key is not valid. "
export const createApi = `Dont have an API key? Sign up at`
export const statusBuild = "Build to validate status."
export const statusBuilding = "Building..."
export const savedHover = "Last saved at "

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 156 KiB

After

Width:  |  Height:  |  Size: 406 KiB

Before After
Before After

View file

@ -8,7 +8,7 @@ import useAlertStore from "../../stores/alertStore";
import { ApiKeyType } from "../../types/components";
import { nodeIconsLucide } from "../../utils/styleUtils";
import BaseModal from "../baseModal";
import { COPIED_NOTICE_ALERT } from "../../alerts_constants";
import { COPIED_NOTICE_ALERT } from "../../constants/alerts_constants";
export default function SecretKeyModal({
title,

View file

@ -9,7 +9,8 @@ import useAlertStore from "../../stores/alertStore";
import { useStoreStore } from "../../stores/storeStore";
import { StoreApiKeyType } from "../../types/components";
import BaseModal from "../baseModal";
import { API_ERROR_ALERT, API_SUCCESS_ALERT } from "../../alerts_constants";
import { API_ERROR_ALERT, API_SUCCESS_ALERT } from "../../constants/alerts_constants";
import { createApi, insertApi, invalidApi, noApi } from "../../constants/constants";
export default function StoreApiKeyModal({
children,
@ -60,10 +61,10 @@ export default function StoreApiKeyModal({
<BaseModal.Header
description={
(hasApiKey && !validApiKey
? "Your API key is not valid. "
? invalidApi
: !hasApiKey
? "You don't have an API key. "
: "") + "Insert your Langflow API key."
? noApi
: "") + insertApi
}
>
<span className="pr-2">API Key</span>
@ -98,7 +99,7 @@ export default function StoreApiKeyModal({
</div>
<div className="flex items-end justify-between">
<span className="pr-1 text-xs text-muted-foreground">
Dont have an API key? Sign up at{" "}
{createApi} {" "}
<a
className="text-high-indigo underline"
href="https://langflow.store/"

View file

@ -9,14 +9,14 @@ import AceEditor from "react-ace";
import IconComponent from "../../components/genericIconComponent";
import { Button } from "../../components/ui/button";
import { Input } from "../../components/ui/input";
import { CODE_PROMPT_DIALOG_SUBTITLE } from "../../constants/constants";
import { CODE_PROMPT_DIALOG_SUBTITLE, editCodeTitle } from "../../constants/constants";
import { postCustomComponent, postValidateCode } from "../../controllers/API";
import useAlertStore from "../../stores/alertStore";
import { useDarkStore } from "../../stores/darkStore";
import useFlowStore from "../../stores/flowStore";
import { codeAreaModalPropsType } from "../../types/components";
import BaseModal from "../baseModal";
import { BUG_ALERT, CODE_ERROR_ALERT, CODE_SUCCESS_ALERT, FUNC_ERROR_ALERT, IMPORT_ERROR_ALERT } from "../../alerts_constants";
import { BUG_ALERT, CODE_ERROR_ALERT, CODE_SUCCESS_ALERT, FUNC_ERROR_ALERT, IMPORT_ERROR_ALERT } from "../../constants/alerts_constants";
export default function CodeAreaModal({
value,
@ -144,7 +144,7 @@ export default function CodeAreaModal({
<BaseModal open={open} setOpen={setOpen}>
<BaseModal.Trigger>{children}</BaseModal.Trigger>
<BaseModal.Header description={CODE_PROMPT_DIALOG_SUBTITLE}>
<span className="pr-2">Edit Code</span>
<span className="pr-2"> {editCodeTitle} </span>
<IconComponent
name="prompts"
className="h-6 w-6 pl-1 text-primary "

View file

@ -3,13 +3,13 @@ import EditFlowSettings from "../../components/EditFlowSettingsComponent";
import IconComponent from "../../components/genericIconComponent";
import { Button } from "../../components/ui/button";
import { Checkbox } from "../../components/ui/checkbox";
import { EXPORT_DIALOG_SUBTITLE } from "../../constants/constants";
import { EXPORT_DIALOG_SUBTITLE, alertSaveWApi, saveWApiCheckbox } from "../../constants/constants";
import useAlertStore from "../../stores/alertStore";
import { useDarkStore } from "../../stores/darkStore";
import useFlowsManagerStore from "../../stores/flowsManagerStore";
import { downloadFlow, removeApiKeys } from "../../utils/reactflowUtils";
import BaseModal from "../baseModal";
import { API_WARNING_NOTICE_ALERT } from "../../alerts_constants";
import { API_WARNING_NOTICE_ALERT } from "../../constants/alerts_constants";
const ExportModal = forwardRef(
(props: { children: ReactNode }, ref): JSX.Element => {
@ -52,12 +52,11 @@ const ExportModal = forwardRef(
}}
/>
<label htmlFor="terms" className="export-modal-save-api text-sm ">
Save with my API keys
{saveWApiCheckbox}
</label>
</div>
<span className=" text-xs text-destructive ">
Caution: Uncheck this box only removes API keys from fields
specifically designated for API keys.
{alertSaveWApi}
</span>
</BaseModal.Content>

View file

@ -3,6 +3,7 @@ import IconComponent from "../../../components/genericIconComponent";
import { Textarea } from "../../../components/ui/textarea";
import { chatInputType } from "../../../types/components";
import { classNames } from "../../../utils/utils";
import { chatInputPlaceholder, chatInputPlaceholderSend } from "../../../constants/constants";
export default function ChatInput({
lockChat,
@ -51,7 +52,7 @@ export default function ChatInput({
? "Thinking..."
: typeof chatValue === "object" &&
Object.keys(chatValue)?.length === 0
? "No chat input variables found. Click to run your flow."
? chatInputPlaceholder
: chatValue
}
onChange={(event): void => {
@ -68,8 +69,8 @@ export default function ChatInput({
)}
placeholder={
noInput
? "No chat input variables found. Click to run your flow."
: "Send a message..."
? chatInputPlaceholder
: chatInputPlaceholderSend
}
/>
<div className="form-modal-send-icon-position">

View file

@ -20,14 +20,14 @@ import {
DialogTrigger,
} from "../../components/ui/dialog";
import { Textarea } from "../../components/ui/textarea";
import { CHAT_FORM_DIALOG_SUBTITLE } from "../../constants/constants";
import { CHAT_FORM_DIALOG_SUBTITLE, chatFirstInitialText, chatSecondInitialText, langflowChatTitle } from "../../constants/constants";
import { AuthContext } from "../../contexts/authContext";
import { getBuildStatus } from "../../controllers/API";
import useAlertStore from "../../stores/alertStore";
import useFlowStore from "../../stores/flowStore";
import { FlowState } from "../../types/tabs";
import { validateNodes } from "../../utils/reactflowUtils";
import { CHAT_ERROR_ALERT, INFO_MISSING_ALERT, MSG_ERROR_ALERT } from "../../alerts_constants";
import { CHAT_ERROR_ALERT, INFO_MISSING_ALERT, MSG_ERROR_ALERT } from "../../constants/alerts_constants";
export default function FormModal({
flow,
@ -585,20 +585,20 @@ export default function FormModal({
<span>
👋{" "}
<span className="langflow-chat-span">
Langflow Chat
{langflowChatTitle}
</span>
</span>
<br />
<div className="langflow-chat-desc">
<span className="langflow-chat-desc-span">
Start a conversation and click the agent's thoughts{" "}
{chatFirstInitialText} {" "}
<span>
<IconComponent
name="MessageSquare"
className="mx-1 inline h-5 w-5 animate-bounce "
/>
</span>{" "}
to inspect the chaining process.
{chatSecondInitialText}
</span>
</div>
</div>

View file

@ -10,6 +10,8 @@ import {
MAX_WORDS_HIGHLIGHT,
PROMPT_DIALOG_SUBTITLE,
TEXT_DIALOG_SUBTITLE,
editTextModalTitle,
editTextPlaceholder,
regexHighlight,
} from "../../constants/constants";
import { TypeModal } from "../../constants/enums";
@ -19,7 +21,7 @@ import { genericModalPropsType } from "../../types/components";
import { handleKeyDown } from "../../utils/reactflowUtils";
import { classNames, varHighlightHTML } from "../../utils/utils";
import BaseModal from "../baseModal";
import { BUG_ALERT, PROMPT_ERROR_ALERT, PROMPT_SUCCESS_ALERT, TEMP_NOTICE_ALERT } from "../../alerts_constants";
import { BUG_ALERT, PROMPT_ERROR_ALERT, PROMPT_SUCCESS_ALERT, TEMP_NOTICE_ALERT } from "../../constants/alerts_constants";
export default function GenericModal({
field_name = "",
@ -211,7 +213,7 @@ export default function GenericModal({
setInputValue(event.target.value);
checkVariables(event.target.value);
}}
placeholder="Type message here."
placeholder={editTextPlaceholder}
onKeyDown={(e) => {
handleKeyDown(e, inputValue, "");
}}
@ -233,7 +235,7 @@ export default function GenericModal({
onChange={(event) => {
setInputValue(event.target.value);
}}
placeholder="Type message here."
placeholder={editTextPlaceholder}
onKeyDown={(e) => {
handleKeyDown(e, value, "");
}}

View file

@ -11,7 +11,7 @@ import {
inputHandlerEventType,
loginInputStateType,
} from "../../../types/components";
import { SIGNIN_ERROR_ALERT } from "../../../alerts_constants";
import { SIGNIN_ERROR_ALERT } from "../../../constants/alerts_constants";
export default function LoginAdminPage() {
const navigate = useNavigate();

View file

@ -33,7 +33,7 @@ import useAlertStore from "../../stores/alertStore";
import useFlowsManagerStore from "../../stores/flowsManagerStore";
import { Users } from "../../types/api";
import { UserInputType } from "../../types/components";
import { USER_ADD_ERROR_ALERT, USER_ADD_SUCCESS_ALERT, USER_DEL_ERROR_ALERT, USER_DEL_SUCCESS_ALERT, USER_EDIT_ERROR_ALERT, USER_EDIT_SUCCESS_ALERT } from "../../alerts_constants";
import { USER_ADD_ERROR_ALERT, USER_ADD_SUCCESS_ALERT, USER_DEL_ERROR_ALERT, USER_DEL_SUCCESS_ALERT, USER_EDIT_ERROR_ALERT, USER_EDIT_SUCCESS_ALERT } from "../../constants/alerts_constants";
export default function AdminPage() {
const [inputValue, setInputValue] = useState("");

View file

@ -26,7 +26,7 @@ import {
} from "../../constants/constants";
import useAlertStore from "../../stores/alertStore";
import { ApiKey } from "../../types/components";
import { DEL_KEY_ERROR_ALERT, DEL_KEY_SUCCESS_ALERT } from "../../alerts_constants";
import { DEL_KEY_ERROR_ALERT, DEL_KEY_SUCCESS_ALERT } from "../../constants/alerts_constants";
export default function ApiKeysPage() {
const [loadingKeys, setLoadingKeys] = useState(true);

View file

@ -31,7 +31,7 @@ import { getRandomName, isWrappedWithClass } from "../../../../utils/utils";
import ConnectionLineComponent from "../ConnectionLineComponent";
import SelectionMenu from "../SelectionMenuComponent";
import ExtraSidebar from "../extraSidebarComponent";
import { INVALID_SELECTION_ERROR_ALERT, UPLOAD_ALERT_LIST, UPLOAD_ERROR_ALERT, WRONG_FILE_ERROR_ALERT } from "../../../../alerts_constants";
import { INVALID_SELECTION_ERROR_ALERT, UPLOAD_ALERT_LIST, UPLOAD_ERROR_ALERT, WRONG_FILE_ERROR_ALERT } from "../../../../constants/alerts_constants";
const nodeTypes = {
genericNode: GenericNode,
@ -87,8 +87,30 @@ export default function Page({
const [lastSelection, setLastSelection] =
useState<OnSelectionChangeParams | null>(null);
const setNode = useFlowStore((state) => state.setNode);
useEffect(() => {
const onKeyDown = (event: KeyboardEvent) => {
const selectedNode = nodes.filter((obj) => obj.selected);
if ((event.ctrlKey || event.metaKey) && event.key === "p" && selectedNode.length > 0) {
event.preventDefault();
setNode(selectedNode[0].id, (old) => ({
...old,
data: {
...old.data,
node: {
...old.data.node,
pinned: old.data?.node?.pinned ? false : true,
},
},
}));
}
if ((event.ctrlKey || event.metaKey) && event.key === "d" && selectedNode.length > 0) {
event.preventDefault();
paste({nodes: selectedNode, edges: []}, {
x: position.current.x,
y: position.current.y,
});
}
if (!isWrappedWithClass(event, "noundo")) {
if (
(event.key === "y" || (event.key === "z" && event.shiftKey)) &&

View file

@ -25,7 +25,7 @@ import {
} from "../../../../utils/utils";
import DisclosureComponent from "../DisclosureComponent";
import SidebarDraggableComponent from "./sideBarDraggableComponent";
import { UPLOAD_ERROR_ALERT } from "../../../../alerts_constants";
import { UPLOAD_ERROR_ALERT } from "../../../../constants/alerts_constants";
export default function ExtraSidebar(): JSX.Element {
const data = useTypesStore((state) => state.data);

Some files were not shown because too many files have changed in this diff Show more