Merge remote-tracking branch 'origin/cz/mergeAll' into fix/edited_component

This commit is contained in:
Lucas Oliveira 2024-06-07 20:06:50 -03:00
commit 98dcadc797
95 changed files with 1389 additions and 716 deletions

View file

@ -1,13 +1,6 @@
# syntax=docker/dockerfile:1
# Keep this syntax directive! It's used to enable Docker BuildKit
FROM node:20-bookworm-slim as builder-node
WORKDIR /app
COPY src/frontend/package.json src/frontend/package-lock.json ./
RUN npm install
COPY src/frontend/ ./
RUN npm run build
################################
# BUILDER-BASE
@ -56,7 +49,6 @@ COPY pyproject.toml poetry.lock README.md ./
COPY src/ ./src
COPY scripts/ ./scripts
RUN python -m pip install requests --user && cd ./scripts && python update_dependencies.py
COPY --from=builder-node /app/build ./src/backend/base/langflow/frontend
RUN $POETRY_HOME/bin/poetry lock --no-update \
&& $POETRY_HOME/bin/poetry build -f wheel \
&& $POETRY_HOME/bin/poetry run pip install dist/*.whl --force-reinstall

View file

@ -4,7 +4,7 @@ You can run Langflow in `--backend-only` mode to expose your Langflow app as an
Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
The terminal prints ` Welcome to ⛓ Langflow `, and a blank window opens at `http://127.0.0.1:7864/all`.
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
Langflow will now serve requests to its API without the frontend running.
## Prerequisites
@ -42,7 +42,7 @@ Note the flow ID of `ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef`. You can find this ID
1. Stop Langflow with Ctrl+C.
2. Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
The terminal prints ` Welcome to ⛓ Langflow `, and a blank window opens at `http://127.0.0.1:7864/all`.
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
Langflow will now serve requests to its API.
3. Run the curl code you copied from the UI.
You should get a result like this:

67
poetry.lock generated
View file

@ -367,13 +367,13 @@ files = [
[[package]]
name = "bce-python-sdk"
version = "0.9.11"
version = "0.9.14"
description = "BCE SDK for python"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,<4,>=2.7"
files = [
{file = "bce_python_sdk-0.9.11-py3-none-any.whl", hash = "sha256:3afb9717f6c0c5f5fe3104a8bea4c111bf2ab3fe87ae73b05492566bc2b5d11a"},
{file = "bce_python_sdk-0.9.11.tar.gz", hash = "sha256:d9e977f059fef6466eebdbb34ad1e27b6f76ef90338807ab959693a78a761e7d"},
{file = "bce_python_sdk-0.9.14-py3-none-any.whl", hash = "sha256:5704aa454151ee608b01ddda7531457433f9b4bb8afbd00706dd368f3b4339a1"},
{file = "bce_python_sdk-0.9.14.tar.gz", hash = "sha256:7cbd182ec1e21034f10d3cdb812f3171d31908f1a783d6cf643039272942d8e8"},
]
[package.dependencies]
@ -2556,13 +2556,13 @@ uritemplate = ">=3.0.1,<5"
[[package]]
name = "google-auth"
version = "2.29.0"
version = "2.30.0"
description = "Google Authentication Library"
optional = false
python-versions = ">=3.7"
files = [
{file = "google-auth-2.29.0.tar.gz", hash = "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360"},
{file = "google_auth-2.29.0-py2.py3-none-any.whl", hash = "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"},
{file = "google-auth-2.30.0.tar.gz", hash = "sha256:ab630a1320f6720909ad76a7dbdb6841cdf5c66b328d690027e4867bdfb16688"},
{file = "google_auth-2.30.0-py2.py3-none-any.whl", hash = "sha256:8df7da660f62757388b8a7f249df13549b3373f24388cb5d2f1dd91cc18180b5"},
]
[package.dependencies]
@ -2594,13 +2594,13 @@ httplib2 = ">=0.19.0"
[[package]]
name = "google-cloud-aiplatform"
version = "1.53.0"
version = "1.54.0"
description = "Vertex AI API client library"
optional = false
python-versions = ">=3.8"
files = [
{file = "google-cloud-aiplatform-1.53.0.tar.gz", hash = "sha256:574cfad8ac5fa5d57ef717f5335ce05636a5fa9b8aeea0f5c325b46b9448e6b1"},
{file = "google_cloud_aiplatform-1.53.0-py2.py3-none-any.whl", hash = "sha256:9dfb1f110e6d4795b45afcfab79108fc5c8ed9aa4eaf899e433bc2ca1b76c778"},
{file = "google-cloud-aiplatform-1.54.0.tar.gz", hash = "sha256:6f5187d35a32951028465804fbb42b478362bf41e2b634ddd22b150299f6e1d8"},
{file = "google_cloud_aiplatform-1.54.0-py2.py3-none-any.whl", hash = "sha256:7b3ed849b9fb59a01bd6f44444ccbb7d18495b867a26f913542f6b2d4c3de252"},
]
[package.dependencies]
@ -2621,7 +2621,7 @@ autologging = ["mlflow (>=1.27.0,<=2.1.1)"]
cloud-profiler = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"]
datasets = ["pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)"]
endpoint = ["requests (>=2.28.1)"]
full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "starlette (>=0.17.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)"]
full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)"]
langchain = ["langchain (>=0.1.16,<0.2)", "langchain-core (<0.2)", "langchain-google-vertexai (<2)"]
langchain-testing = ["absl-py", "cloudpickle (>=2.2.1,<4.0)", "langchain (>=0.1.16,<0.2)", "langchain-core (<0.2)", "langchain-google-vertexai (<2)", "pydantic (>=2.6.3,<3)", "pytest-xdist"]
lit = ["explainable-ai-sdk (>=1.0.0)", "lit-nlp (==0.4.0)", "pandas (>=1.0.0)", "tensorflow (>=2.3.0,<3.0.0dev)"]
@ -2631,11 +2631,11 @@ prediction = ["docker (>=5.0.3)", "fastapi (>=0.71.0,<=0.109.1)", "httpx (>=0.23
preview = ["cloudpickle (<3.0)", "google-cloud-logging (<4.0)"]
private-endpoints = ["requests (>=2.28.1)", "urllib3 (>=1.21.1,<1.27)"]
rapid-evaluation = ["nest-asyncio (>=1.0.0,<1.6.0)", "pandas (>=1.0.0,<2.2.0)"]
ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)"]
ray-testing = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pytest-xdist", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "ray[train] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "scikit-learn", "tensorflow", "torch (>=2.0.0,<2.1.0)", "xgboost", "xgboost-ray"]
ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "setuptools (<70.0.0)"]
ray-testing = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pytest-xdist", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "ray[train] (==2.9.3)", "scikit-learn", "setuptools (<70.0.0)", "tensorflow", "torch (>=2.0.0,<2.1.0)", "xgboost", "xgboost-ray"]
reasoningengine = ["cloudpickle (>=2.2.1,<4.0)", "pydantic (>=2.6.3,<3)"]
tensorboard = ["tensorflow (>=2.3.0,<3.0.0dev)"]
testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (==2.16.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "torch (>=2.2.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost"]
testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (==2.16.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "torch (>=2.2.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost"]
vizier = ["google-vizier (>=0.1.6)"]
xai = ["tensorflow (>=2.3.0,<3.0.0dev)"]
@ -4025,13 +4025,13 @@ adal = ["adal (>=1.0.2)"]
[[package]]
name = "langchain"
version = "0.2.2"
version = "0.2.3"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langchain-0.2.2-py3-none-any.whl", hash = "sha256:58ca0c47bcdd156da66f50a0a4fcedc49bf6950827f4a6b06c8c4842d55805f3"},
{file = "langchain-0.2.2.tar.gz", hash = "sha256:9d61e50e9cdc2bea659bc5e6c03650ba048fda63a307490ae368e539f61a0d3a"},
{file = "langchain-0.2.3-py3-none-any.whl", hash = "sha256:5dc33cd9c8008693d328b7cb698df69073acecc89ad9c2a95f243b3314f8d834"},
{file = "langchain-0.2.3.tar.gz", hash = "sha256:81962cc72cce6515f7bd71e01542727870789bf8b666c6913d85559080c1a201"},
]
[package.dependencies]
@ -4047,20 +4047,6 @@ requests = ">=2,<3"
SQLAlchemy = ">=1.4,<3"
tenacity = ">=8.1.0,<9.0.0"
[package.extras]
azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"]
clarifai = ["clarifai (>=9.1.0)"]
cli = ["typer (>=0.9.0,<0.10.0)"]
cohere = ["cohere (>=4,<6)"]
docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"]
embeddings = ["sentence-transformers (>=2,<3)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
javascript = ["esprima (>=4.0.1,<5.0.0)"]
llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
openai = ["openai (<2)", "tiktoken (>=0.7,<1.0)"]
qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"]
text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
[[package]]
name = "langchain-anthropic"
version = "0.1.15"
@ -4127,13 +4113,13 @@ langchain-core = ">=0.1.42,<0.3"
[[package]]
name = "langchain-community"
version = "0.2.3"
version = "0.2.4"
description = "Community contributed LangChain integrations."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langchain_community-0.2.3-py3-none-any.whl", hash = "sha256:aa895545be2f3f4aa2fea36f6da2e3b4ec50ce61ec986e8f146901a1e9138138"},
{file = "langchain_community-0.2.3.tar.gz", hash = "sha256:a3c35af215e47b700e7cb4e548fa8b45c6d46d52b5a5a65af2577c5a0104fc9f"},
{file = "langchain_community-0.2.4-py3-none-any.whl", hash = "sha256:8582e9800f4837660dc297cccd2ee1ddc1d8c440d0fe8b64edb07620f0373b0e"},
{file = "langchain_community-0.2.4.tar.gz", hash = "sha256:2bb6a1a36b8500a564d25d76469c02457b1a7c3afea6d4a609a47c06b993e3e4"},
]
[package.dependencies]
@ -4148,19 +4134,15 @@ requests = ">=2,<3"
SQLAlchemy = ">=1.4,<3"
tenacity = ">=8.1.0,<9.0.0"
[package.extras]
cli = ["typer (>=0.9.0,<0.10.0)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-search-documents (==11.4.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.6,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpathlib (>=0.18,<0.19)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "httpx-sse (>=0.4.0,<0.5.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "oracledb (>=2.2.0,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pyjwt (>=2.8.0,<3.0.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "simsimd (>=4.3.1,<5.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
[[package]]
name = "langchain-core"
version = "0.2.4"
version = "0.2.5"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langchain_core-0.2.4-py3-none-any.whl", hash = "sha256:5212f7ec78a525e88a178ed3aefe2fd7134b03fb92573dfbab9914f1d92d6ec5"},
{file = "langchain_core-0.2.4.tar.gz", hash = "sha256:82bdcc546eb0341cefcf1f4ecb3e49836fff003903afddda2d1312bb8491ef81"},
{file = "langchain_core-0.2.5-py3-none-any.whl", hash = "sha256:abe5138f22acff23a079ec538be5268bbf97cf023d51987a0dd474d2a16cae3e"},
{file = "langchain_core-0.2.5.tar.gz", hash = "sha256:4a5c2f56b22396a63ef4790043660e393adbfa6832b978f023ca996a04b8e752"},
]
[package.dependencies]
@ -4171,9 +4153,6 @@ pydantic = ">=1,<3"
PyYAML = ">=5.3"
tenacity = ">=8.1.0,<9.0.0"
[package.extras]
extended-testing = ["jinja2 (>=3,<4)"]
[[package]]
name = "langchain-experimental"
version = "0.0.60"
@ -4327,7 +4306,7 @@ types-requests = ">=2.31.0.2,<3.0.0.0"
[[package]]
name = "langflow-base"
version = "0.0.59"
version = "0.0.60"
description = "A Python package with a built-in web application"
optional = false
python-versions = ">=3.10,<3.13"

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "1.0.0a48"
version = "1.0.0a49"
description = "A Python package with a built-in web application"
authors = ["Langflow <contact@langflow.org>"]
maintainers = [

View file

@ -86,9 +86,9 @@ def update_frontend_node_with_template_values(frontend_node, raw_frontend_node):
update_template_values(frontend_node["template"], raw_frontend_node["template"])
old_code = raw_frontend_node['template']['code']['value']
new_code = frontend_node['template']['code']['value']
frontend_node['edited'] = old_code != new_code
old_code = raw_frontend_node["template"]["code"]["value"]
new_code = frontend_node["template"]["code"]["value"]
frontend_node["edited"] = old_code != new_code
return frontend_node
@ -208,16 +208,18 @@ def format_elapsed_time(elapsed_time: float) -> str:
return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}"
async def build_and_cache_graph_from_db(
flow_id: str,
session: Session,
chat_service: "ChatService",
):
async def build_and_cache_graph_from_db(flow_id: str, session: Session, chat_service: "ChatService"):
"""Build and cache the graph."""
flow: Optional[Flow] = session.get(Flow, flow_id)
if not flow or not flow.data:
raise ValueError("Invalid flow ID")
graph = Graph.from_payload(flow.data, flow_id)
for vertex_id in graph._has_session_id_vertices:
vertex = graph.get_vertex(vertex_id)
if vertex is None:
raise ValueError(f"Vertex {vertex_id} not found")
if not vertex._raw_params.get("session_id"):
vertex.update_raw_params({"session_id": flow_id})
await chat_service.set_cache(flow_id, graph)
return graph
@ -321,3 +323,4 @@ def parse_exception(exc):
if hasattr(exc, "body"):
return exc.body["message"]
return str(exc)
return str(exc)

View file

@ -168,9 +168,9 @@ async def build_vertex(
next_runnable_vertices,
top_level_vertices,
result_dict,
log_message,
params,
valid,
log_type,
artifacts,
vertex,
) = await graph.build_vertex(
lock=lock,
@ -180,22 +180,22 @@ async def build_vertex(
inputs_dict=inputs.model_dump() if inputs else {},
files=files,
)
log_obj = Log(message=vertex.artifacts_raw, type=vertex.artifacts_type)
result_data_response = ResultDataResponse(**result_dict.model_dump())
except Exception as exc:
logger.exception(f"Error building vertex: {exc}")
log_message = format_exception_message(exc)
log_type = type(exc).__name__
params = format_exception_message(exc)
valid = False
log_obj = Log(message=params, type="error")
result_data_response = ResultDataResponse(results={})
log_object = Log(message=log_message, type=log_type)
artifacts = {}
# If there's an error building the vertex
# we need to clear the cache
await chat_service.clear_cache(flow_id_str)
result_data_response.logs.append(log_object)
result_data_response.message = artifacts
result_data_response.logs.append(log_obj)
# Log the vertex build
if not vertex.will_stream:
@ -204,8 +204,9 @@ async def build_vertex(
flow_id=flow_id_str,
vertex_id=vertex_id,
valid=valid,
logs=result_data_response.logs,
params=params,
data=result_data_response,
artifacts=artifacts,
)
timedelta = time.perf_counter() - start_time
@ -231,6 +232,7 @@ async def build_vertex(
next_vertices_ids=next_runnable_vertices,
top_level_vertices=top_level_vertices,
valid=valid,
params=params,
id=vertex.id,
data=result_data_response,
)

View file

@ -117,6 +117,21 @@ async def get_transactions(
dicts = monitor_service.get_transactions(
source=source, target=target, status=status, order_by=order_by, flow_id=flow_id
)
return [TransactionModelResponse(**d) for d in dicts]
result = []
for d in dicts:
d = TransactionModelResponse(
index=d["index"],
timestamp=d["timestamp"],
vertex_id=d["vertex_id"],
inputs=d["inputs"],
outputs=d["outputs"],
status=d["status"],
error=d["error"],
flow_id=d["flow_id"],
source=d["vertex_id"],
target=d["target_id"],
)
result.append(d)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

View file

@ -2,6 +2,7 @@ from datetime import datetime, timezone
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from typing_extensions import TypedDict
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_serializer
@ -243,11 +244,11 @@ class VerticesOrderResponse(BaseModel):
run_id: UUID
vertices_to_run: List[str]
class ResultDataResponse(BaseModel):
results: Optional[Any] = Field(default_factory=dict)
logs: List[Log | None] = Field(default_factory=list)
messages: List[ChatOutputResponse | None] = Field(default_factory=list)
message: Optional[Any] = Field(default_factory=dict)
artifacts: Optional[Any] = Field(default_factory=dict)
timedelta: Optional[float] = None
duration: Optional[str] = None
used_frozen_result: Optional[bool] = False
@ -259,6 +260,8 @@ class VertexBuildResponse(BaseModel):
next_vertices_ids: Optional[List[str]] = None
top_level_vertices: Optional[List[str]] = None
valid: bool
params: Optional[Any] = Field(default_factory=dict)
"""JSON string of the params."""
data: ResultDataResponse
"""Mapping of vertex ids to result dict containing the param name and result value."""
timestamp: Optional[datetime] = Field(default_factory=lambda: datetime.now(timezone.utc))

View file

@ -1,10 +1,13 @@
import warnings
from typing import Optional, Union
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.llms import LLM
from langchain_core.load import load
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
class LCModelComponent(CustomComponent):
@ -82,7 +85,7 @@ class LCModelComponent(CustomComponent):
return status_message
def get_chat_result(
self, runnable: BaseChatModel, stream: bool, input_value: str, system_message: Optional[str] = None
self, runnable: BaseChatModel, stream: bool, input_value: str | Record, system_message: Optional[str] = None
):
messages: list[Union[HumanMessage, SystemMessage]] = []
if not input_value and not system_message:
@ -90,7 +93,16 @@ class LCModelComponent(CustomComponent):
if system_message:
messages.append(SystemMessage(content=system_message))
if input_value:
messages.append(HumanMessage(content=input_value))
if isinstance(input_value, Record):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if "prompt" in input_value:
prompt = load(input_value.prompt)
runnable = prompt | runnable
else:
messages.append(input_value.to_lc_message())
else:
messages.append(HumanMessage(content=input_value))
if stream:
return runnable.stream(messages)
else:

View file

@ -1,9 +1,10 @@
import base64
from copy import deepcopy
from langchain_core.documents import Document
from langflow.schema import Record
from langflow.services.deps import get_storage_service
def record_to_string(record: Record) -> str:
@ -19,7 +20,7 @@ def record_to_string(record: Record) -> str:
return record.get_text()
def dict_values_to_string(d: dict) -> dict:
async def dict_values_to_string(d: dict) -> dict:
"""
Converts the values of a dictionary to strings.
@ -36,16 +37,43 @@ def dict_values_to_string(d: dict) -> dict:
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, Record):
d_copy[key][i] = record_to_string(item)
d_copy[key][i] = item.to_lc_message()
elif isinstance(item, Document):
d_copy[key][i] = document_to_string(item)
elif isinstance(value, Record):
d_copy[key] = record_to_string(value)
if "files" in value and value.files:
files = await get_file_paths(value.files)
value.files = files
d_copy[key] = value.to_lc_message()
elif isinstance(value, Document):
d_copy[key] = document_to_string(value)
return d_copy
async def get_file_paths(files: list[str]):
storage_service = get_storage_service()
file_paths = []
for file in files:
flow_id, file_name = file.split("/")
file_paths.append(storage_service.build_full_path(flow_id=flow_id, file_name=file_name))
return file_paths
async def get_files(
file_paths: str,
convert_to_base64: bool = False,
):
storage_service = get_storage_service()
file_objects = []
for file_path in file_paths:
flow_id, file_name = file_path.split("/")
file_object = await storage_service.get_file(flow_id=flow_id, file_name=file_name)
if convert_to_base64:
file_object = base64.b64encode(file_object).decode("utf-8")
file_objects.append(file_object)
return file_objects
def document_to_string(document: Document) -> str:
"""
Convert a document to a string.

View file

@ -1,7 +1,9 @@
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts import ChatPromptTemplate
from langflow.base.prompts.utils import dict_values_to_string
from langflow.custom import CustomComponent
from langflow.field_typing import Prompt, TemplateField, Text
from langflow.schema.schema import Record
class PromptComponent(CustomComponent):
@ -15,19 +17,14 @@ class PromptComponent(CustomComponent):
"code": TemplateField(advanced=True),
}
def build(
async def build(
self,
template: Prompt,
**kwargs,
) -> Text:
from langflow.base.prompts.utils import dict_values_to_string
prompt_template = PromptTemplate.from_template(Text(template))
kwargs = dict_values_to_string(kwargs)
kwargs = {k: "\n".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}
try:
formated_prompt = prompt_template.format(**kwargs)
except Exception as exc:
raise ValueError(f"Error formatting prompt: {exc}") from exc
self.status = f'Prompt:\n"{formated_prompt}"'
return formated_prompt
) -> Record:
prompt_template = ChatPromptTemplate.from_template(Text(template))
kwargs = await dict_values_to_string(kwargs)
messages = list(kwargs.values())
prompt = prompt_template + messages
self.status = f'Prompt:\n"{template}"'
return Record(data={"prompt": prompt.to_json()})

View file

@ -58,7 +58,7 @@ class AmazonBedrockComponent(LCModelComponent):
"advanced": True,
},
"cache": {"display_name": "Cache"},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",

View file

@ -63,7 +63,7 @@ class AnthropicLLM(LCModelComponent):
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"advanced": True,

View file

@ -78,7 +78,7 @@ class AzureChatOpenAIComponent(LCModelComponent):
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -81,7 +81,7 @@ class QianfanChatEndpointComponent(LCModelComponent):
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -111,7 +111,7 @@ class ChatLiteLLMModelComponent(LCModelComponent):
"required": False,
"default": False,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -1,10 +1,11 @@
from typing import Optional
from langchain_cohere import ChatCohere
from pydantic.v1 import SecretStr
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langchain_cohere import ChatCohere
from langflow.field_typing import Text
class CohereComponent(LCModelComponent):
@ -42,7 +43,7 @@ class CohereComponent(LCModelComponent):
"type": "float",
"show": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
@ -69,3 +70,4 @@ class CohereComponent(LCModelComponent):
temperature=temperature,
)
return self.get_chat_result(output, stream, input_value, system_message)
return self.get_chat_result(output, stream, input_value, system_message)

View file

@ -2,9 +2,10 @@ from typing import Optional
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class HuggingFaceEndpointsComponent(LCModelComponent):
@ -36,7 +37,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
"advanced": True,
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
@ -72,3 +73,4 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
output = ChatHuggingFace(llm=llm)
return self.get_chat_result(output, stream, input_value, system_message)
return self.get_chat_result(output, stream, input_value, system_message)

View file

@ -27,7 +27,7 @@ class MistralAIModelComponent(LCModelComponent):
def build_config(self):
return {
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,

View file

@ -194,7 +194,7 @@ class ChatOllamaComponent(LCModelComponent):
"info": "Template to use for generating text.",
"advanced": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -28,7 +28,7 @@ class OpenAIModelComponent(LCModelComponent):
def build_config(self):
return {
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,

View file

@ -1,6 +1,5 @@
from typing import Optional
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
@ -74,7 +73,7 @@ class ChatVertexAIComponent(LCModelComponent):
"value": False,
"advanced": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -738,7 +738,9 @@ class Graph:
# Check the cache for the vertex
cached_result = await chat_service.get_cache(key=vertex.id)
if isinstance(cached_result, CacheMiss):
await vertex.build(user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars)
await vertex.build(
user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars, files=files
)
await chat_service.set_cache(key=vertex.id, data=vertex)
else:
cached_vertex = cached_result["result"]
@ -752,7 +754,9 @@ class Graph:
vertex.result.used_frozen_result = True
else:
await vertex.build(user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars)
await vertex.build(
user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars, files=files
)
if vertex.result is not None:
params = f"{vertex._built_object_repr()}{params}"
@ -765,11 +769,13 @@ class Graph:
next_runnable_vertices, top_level_vertices = await self.get_next_and_top_level_vertices(
lock, set_cache_coro, vertex
)
log_transaction(vertex, status="success")
flow_id = self.flow_id
log_transaction(flow_id, vertex, status="success")
return next_runnable_vertices, top_level_vertices, result_dict, params, valid, artifacts, vertex
except Exception as exc:
logger.exception(f"Error building vertex: {exc}")
log_transaction(vertex, status="failure", error=str(exc))
flow_id = self.flow_id
log_transaction(flow_id, vertex, status="failure", error=str(exc))
raise exc
async def get_next_and_top_level_vertices(

View file

@ -31,10 +31,10 @@ class ResultData(BaseModel):
if not values.get("logs") and values.get("artifacts"):
# Build the log from the artifacts
message = values["artifacts"]
if "stream_url" in message:
if "stream_url" in message and "type" in message:
stream_url = StreamURL(location=message["stream_url"])
values["logs"] = [Log(message=stream_url, type=message["type"])]
else:
elif "type" in message:
values["logs"] = [Log(message=message, type=message["type"])]
return values

View file

@ -2,6 +2,7 @@ from enum import Enum
from typing import Any, Generator, Union
from langchain_core.documents import Document
from langflow.schema.schema import Record
from pydantic import BaseModel
from langflow.interface.utils import extract_input_variables_from_prompt

View file

@ -10,11 +10,11 @@ from loguru import logger
from langflow.graph.schema import INPUT_COMPONENTS, OUTPUT_COMPONENTS, InterfaceComponentTypes, ResultData
from langflow.graph.utils import ArtifactType, UnbuiltObject, UnbuiltResult
from langflow.graph.vertex.utils import log_transaction
from langflow.interface.initialize import loading
from langflow.interface.listing import lazy_load_dict
from langflow.schema.schema import INPUT_FIELD_NAME
from langflow.services.deps import get_storage_service
from langflow.services.monitor.utils import log_transaction
from langflow.utils.constants import DIRECT_TYPES
from langflow.utils.schemas import ChatOutputResponse
from langflow.utils.util import sync_to_async, unescape_string
@ -529,12 +529,13 @@ class Vertex:
Returns:
The built result if use_result is True, else the built object.
"""
flow_id = self.graph.flow_id
if not self._built:
log_transaction(source=self, target=requester, flow_id=self.graph.flow_id, status="error")
log_transaction(flow_id, vertex=self, target=requester, status="error")
raise ValueError(f"Component {self.display_name} has not been built yet")
result = self._built_result if self.use_result else self._built_object
log_transaction(source=self, target=requester, flow_id=self.graph.flow_id, status="success")
log_transaction(flow_id, vertex=self, target=requester, status="success")
return result
async def _build_vertex_and_update_params(self, key, vertex: "Vertex"):
@ -628,9 +629,8 @@ class Vertex:
self._built_object, self.artifacts = result
elif len(result) == 3:
self._custom_component, self._built_object, self.artifacts = result
self.artifacts_raw = self.artifacts.get("raw")
self.artifacts_type = self.artifacts.get("type") or ArtifactType.UNKNOWN.value
self.artifacts_raw = self.artifacts.get("raw", None)
self.artifacts_type = self.artifacts.get("type", None) or ArtifactType.UNKNOWN.value
else:
self._built_object = result

View file

@ -116,7 +116,7 @@ class InterfaceVertex(Vertex):
sender_name=sender_name,
stream_url=stream_url,
files=files,
type=artifact_type.value,
type=artifact_type,
)
self.will_stream = stream_url is not None
@ -213,9 +213,9 @@ class InterfaceVertex(Vertex):
flow_id=self.graph.flow_id,
vertex_id=self.id,
valid=True,
logs=self._built_object_repr(),
params=self._built_object_repr(),
data=self.result,
messages=self.artifacts,
artifacts=self.artifacts,
)
self._validate_built_object()

View file

@ -1,9 +1,5 @@
from typing import TYPE_CHECKING
from loguru import logger
from langflow.services.deps import get_monitor_service
if TYPE_CHECKING:
from langflow.graph.vertex.base import Vertex
@ -21,34 +17,3 @@ def build_clean_params(target: "Vertex") -> dict:
if isinstance(value, list):
params[key] = [item for item in value if isinstance(item, (str, int, bool, float, list, dict))]
return params
def log_transaction(source: "Vertex", target: "Vertex", flow_id, status, error=None):
"""
Logs a transaction between two vertices.
Args:
source (Vertex): The source vertex of the transaction.
target (Vertex): The target vertex of the transaction.
status: The status of the transaction.
error (Optional): Any error associated with the transaction.
Raises:
Exception: If there is an error while logging the transaction.
"""
try:
monitor_service = get_monitor_service()
clean_params = build_clean_params(target)
data = {
"source": source.vertex_type,
"target": target.vertex_type,
"target_args": clean_params,
"timestamp": monitor_service.get_timestamp(),
"status": status,
"error": error,
"flow_id": flow_id,
}
monitor_service.add_row(table_name="transactions", data=data)
except Exception as e:
logger.error(f"Error logging transaction: {e}")

View file

@ -251,7 +251,7 @@ def get_flow_by_id_or_endpoint_name(
flow = db.get(Flow, flow_id)
except ValueError:
endpoint_name = flow_id_or_name
stmt = select(Flow).where(Flow.name == endpoint_name)
stmt = select(Flow).where(Flow.endpoint_name == endpoint_name)
if user_id:
stmt = stmt.where(Flow.user_id == user_id)
flow = db.exec(stmt).first()

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -140,7 +140,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -149,7 +149,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -444,7 +444,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -453,7 +453,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -589,7 +589,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -598,7 +598,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -524,7 +524,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -670,7 +670,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -679,7 +679,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -130,7 +130,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -789,7 +789,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -798,7 +798,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -1146,7 +1146,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -1155,7 +1155,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -784,7 +784,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -793,7 +793,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -1034,7 +1034,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -125,7 +125,6 @@ async def instantiate_custom_component(params, user_id, vertex, fallback_to_env_
custom_repr = build_result
if not isinstance(custom_repr, str):
custom_repr = str(custom_repr)
raw = custom_component.repr_value
if hasattr(raw, "data"):
raw = raw.data

View file

@ -1,11 +1,12 @@
import copy
import json
from typing import Literal, Optional, cast
from typing_extensions import TypedDict
from langchain_core.documents import Document
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from pydantic import BaseModel, model_validator
from typing_extensions import TypedDict
from langchain_core.prompts.image import ImagePromptTemplate
from pydantic import BaseModel, model_serializer, model_validator
class Record(BaseModel):
@ -30,6 +31,11 @@ class Record(BaseModel):
values["data"][key] = values[key]
return values
@model_serializer(mode="plain", when_used="json")
def serialize_model(self):
data = {k: v.to_json() if hasattr(v, "to_json") else v for k, v in self.data.items()}
return data
def get_text(self):
"""
Retrieves the text value from the data dictionary.
@ -103,7 +109,9 @@ class Record(BaseModel):
text = self.data.pop(self.text_key, self.default_value)
return Document(page_content=text, metadata=self.data)
def to_lc_message(self) -> BaseMessage:
def to_lc_message(
self,
) -> BaseMessage:
"""
Converts the Record to a BaseMessage.
@ -119,8 +127,22 @@ class Record(BaseModel):
raise ValueError(f"Missing required keys ('text', 'sender') in Record: {self.data}")
sender = self.data.get("sender", "Machine")
text = self.data.get("text", "")
files = self.data.get("files", [])
if sender == "User":
return HumanMessage(content=text)
if files:
contents = [{"type": "text", "text": text}]
for file_path in files:
image_template = ImagePromptTemplate()
image_prompt_value = image_template.invoke(input={"path": file_path})
contents.append({"type": "image_url", "image_url": image_prompt_value.image_url})
human_message = HumanMessage(content=contents)
else:
human_message = HumanMessage(
content=[{"type": "text", "text": text}],
)
return human_message
return AIMessage(content=text)
def __getattr__(self, key):
@ -170,8 +192,14 @@ class Record(BaseModel):
def __str__(self) -> str:
# return a JSON string representation of the Record atributes
try:
data = {k: v.to_json() if hasattr(v, "to_json") else v for k, v in self.data.items()}
return json.dumps(data, indent=4)
except Exception:
return str(self.data)
return json.dumps(self.data)
def __contains__(self, key):
return key in self.data
INPUT_FIELD_NAME = "input_value"

View file

@ -23,6 +23,9 @@ class CacheMiss:
def __repr__(self):
return "<CACHE_MISS>"
def __bool__(self):
return False
def create_cache_folder(func):
def wrapper(*args, **kwargs):

View file

@ -11,25 +11,26 @@ if TYPE_CHECKING:
class TransactionModel(BaseModel):
index: Optional[int] = Field(default=None)
timestamp: Optional[datetime] = Field(default_factory=datetime.now, alias="timestamp")
flow_id: str
source: str
target: str
target_args: dict
vertex_id: str
target_id: str | None = None
inputs: dict
outputs: Optional[dict] = None
status: str
error: Optional[str] = None
flow_id: Optional[str] = Field(default=None, alias="flow_id")
class Config:
from_attributes = True
populate_by_name = True
# validate target_args in case it is a JSON
@field_validator("target_args", mode="before")
@field_validator("outputs", "inputs", mode="before")
def validate_target_args(cls, v):
if isinstance(v, str):
return json.loads(v)
return v
@field_serializer("target_args")
@field_serializer("outputs", "inputs")
def serialize_target_args(v):
if isinstance(v, dict):
return json.dumps(v)
@ -39,19 +40,21 @@ class TransactionModel(BaseModel):
class TransactionModelResponse(BaseModel):
index: Optional[int] = Field(default=None)
timestamp: Optional[datetime] = Field(default_factory=datetime.now, alias="timestamp")
flow_id: str
source: str
target: str
target_args: dict
vertex_id: str
inputs: dict
outputs: Optional[dict] = None
status: str
error: Optional[str] = None
flow_id: Optional[str] = Field(default=None, alias="flow_id")
source: Optional[str] = None
target: Optional[str] = None
class Config:
from_attributes = True
populate_by_name = True
# validate target_args in case it is a JSON
@field_validator("target_args", mode="before")
@field_validator("outputs", "inputs", mode="before")
def validate_target_args(cls, v):
if isinstance(v, str):
return json.loads(v)
@ -129,15 +132,16 @@ class VertexBuildModel(BaseModel):
id: Optional[str] = Field(default=None, alias="id")
flow_id: str
valid: bool
logs: Any
params: Any
data: dict
artifacts: dict
timestamp: datetime = Field(default_factory=datetime.now)
class Config:
from_attributes = True
populate_by_name = True
@field_serializer("data")
@field_serializer("data", "artifacts")
def serialize_dict(v):
if isinstance(v, dict):
# check if the value of each key is a BaseModel or a list of BaseModels
@ -151,8 +155,8 @@ class VertexBuildModel(BaseModel):
return v.model_dump_json()
return v
@field_validator("logs", mode="before")
def validate_logs(cls, v):
@field_validator("params", mode="before")
def validate_params(cls, v):
if isinstance(v, str):
try:
return json.loads(v)
@ -160,7 +164,7 @@ class VertexBuildModel(BaseModel):
return v
return v
@field_serializer("logs")
@field_serializer("params")
def serialize_params(v):
if isinstance(v, list) and all(isinstance(i, BaseModel) for i in v):
return json.dumps([i.model_dump() for i in v])
@ -172,11 +176,17 @@ class VertexBuildModel(BaseModel):
return json.loads(v)
return v
@field_validator("artifacts", mode="before")
def validate_artifacts(cls, v):
if isinstance(v, str):
return json.loads(v)
elif isinstance(v, BaseModel):
return v.model_dump()
return v
class VertexBuildResponseModel(VertexBuildModel):
messages: list[MessageModel] = []
@field_serializer("data")
@field_serializer("data", "artifacts")
def serialize_dict(v):
return v

View file

@ -168,7 +168,9 @@ class MonitorService(Service):
order_by: Optional[str] = "timestamp",
flow_id: Optional[str] = None,
):
query = "SELECT index,flow_id, source, target, target_args, status, error, timestamp FROM transactions"
query = (
"SELECT index,flow_id, status, error, timestamp, vertex_id, inputs, outputs, target_id FROM transactions"
)
conditions = []
if source:
conditions.append(f"source = '{source}'")
@ -183,7 +185,7 @@ class MonitorService(Service):
query += " WHERE " + " AND ".join(conditions)
if order_by:
query += f" ORDER BY {order_by}"
query += f" ORDER BY {order_by} DESC"
with duckdb.connect(str(self.db_path)) as conn:
df = conn.execute(query).df()

View file

@ -119,21 +119,16 @@ async def log_message(
sender_name: str,
message: str,
session_id: str,
artifacts: Optional[dict] = None,
files: Optional[list] = None,
flow_id: Optional[str] = None,
):
try:
from langflow.graph.vertex.base import Vertex
if isinstance(session_id, Vertex):
session_id = await session_id.build() # type: ignore
monitor_service = get_monitor_service()
row = {
"sender": sender,
"sender_name": sender_name,
"message": message,
"artifacts": artifacts or {},
"files": files or [],
"session_id": session_id,
"timestamp": monitor_service.get_timestamp(),
"flow_id": flow_id,
@ -147,9 +142,9 @@ async def log_vertex_build(
flow_id: str,
vertex_id: str,
valid: bool,
logs: Any,
params: Any,
data: "ResultDataResponse",
messages: Optional[dict] = None,
artifacts: Optional[dict] = None,
):
try:
monitor_service = get_monitor_service()
@ -158,9 +153,9 @@ async def log_vertex_build(
"flow_id": flow_id,
"id": vertex_id,
"valid": valid,
"logs": logs,
"params": params,
"data": data.model_dump(),
"messages": messages or {},
"artifacts": artifacts or {},
"timestamp": monitor_service.get_timestamp(),
}
monitor_service.add_row(table_name="vertex_builds", data=row)
@ -183,17 +178,19 @@ def build_clean_params(target: "Vertex") -> dict:
return params
def log_transaction(vertex: "Vertex", status, error=None):
def log_transaction(flow_id, vertex: "Vertex", status, target: Optional["Vertex"] = None, error=None):
try:
monitor_service = get_monitor_service()
clean_params = build_clean_params(vertex)
data = {
"vertex_id": vertex.id,
"vertex_id": str(vertex.id),
"target_id": str(target.id) if target else None,
"inputs": clean_params,
"output": str(vertex.result),
"outputs": vertex.result.model_dump_json() if vertex.result else None,
"timestamp": monitor_service.get_timestamp(),
"status": status,
"error": error,
"flow_id": flow_id,
}
monitor_service.add_row(table_name="transactions", data=data)
except Exception as e:

View file

@ -90,9 +90,9 @@ async def build_vertex(
flow_id=flow_id,
vertex_id=vertex_id,
valid=valid,
logs=params,
params=params,
data=result_dict,
messages=artifacts,
artifacts=artifacts,
)
# Emit the vertex build response

View file

@ -159,7 +159,10 @@ def create_class(code, class_name):
# Replace from langflow import CustomComponent with from langflow.custom import CustomComponent
code = code.replace("from langflow import CustomComponent", "from langflow.custom import CustomComponent")
code = code.replace(
"from langflow.interface.custom.custom_component import CustomComponent",
"from langflow.custom import CustomComponent",
)
module = ast.parse(code)
exec_globals = prepare_global_scope(code, module)

View file

@ -1159,13 +1159,13 @@ files = [
[[package]]
name = "langchain"
version = "0.2.2"
version = "0.2.3"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langchain-0.2.2-py3-none-any.whl", hash = "sha256:58ca0c47bcdd156da66f50a0a4fcedc49bf6950827f4a6b06c8c4842d55805f3"},
{file = "langchain-0.2.2.tar.gz", hash = "sha256:9d61e50e9cdc2bea659bc5e6c03650ba048fda63a307490ae368e539f61a0d3a"},
{file = "langchain-0.2.3-py3-none-any.whl", hash = "sha256:5dc33cd9c8008693d328b7cb698df69073acecc89ad9c2a95f243b3314f8d834"},
{file = "langchain-0.2.3.tar.gz", hash = "sha256:81962cc72cce6515f7bd71e01542727870789bf8b666c6913d85559080c1a201"},
]
[package.dependencies]
@ -1181,29 +1181,15 @@ requests = ">=2,<3"
SQLAlchemy = ">=1.4,<3"
tenacity = ">=8.1.0,<9.0.0"
[package.extras]
azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"]
clarifai = ["clarifai (>=9.1.0)"]
cli = ["typer (>=0.9.0,<0.10.0)"]
cohere = ["cohere (>=4,<6)"]
docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"]
embeddings = ["sentence-transformers (>=2,<3)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
javascript = ["esprima (>=4.0.1,<5.0.0)"]
llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
openai = ["openai (<2)", "tiktoken (>=0.7,<1.0)"]
qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"]
text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
[[package]]
name = "langchain-community"
version = "0.2.3"
version = "0.2.4"
description = "Community contributed LangChain integrations."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langchain_community-0.2.3-py3-none-any.whl", hash = "sha256:aa895545be2f3f4aa2fea36f6da2e3b4ec50ce61ec986e8f146901a1e9138138"},
{file = "langchain_community-0.2.3.tar.gz", hash = "sha256:a3c35af215e47b700e7cb4e548fa8b45c6d46d52b5a5a65af2577c5a0104fc9f"},
{file = "langchain_community-0.2.4-py3-none-any.whl", hash = "sha256:8582e9800f4837660dc297cccd2ee1ddc1d8c440d0fe8b64edb07620f0373b0e"},
{file = "langchain_community-0.2.4.tar.gz", hash = "sha256:2bb6a1a36b8500a564d25d76469c02457b1a7c3afea6d4a609a47c06b993e3e4"},
]
[package.dependencies]
@ -1218,19 +1204,15 @@ requests = ">=2,<3"
SQLAlchemy = ">=1.4,<3"
tenacity = ">=8.1.0,<9.0.0"
[package.extras]
cli = ["typer (>=0.9.0,<0.10.0)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-search-documents (==11.4.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.6,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpathlib (>=0.18,<0.19)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "httpx-sse (>=0.4.0,<0.5.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "oracledb (>=2.2.0,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pyjwt (>=2.8.0,<3.0.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "simsimd (>=4.3.1,<5.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
[[package]]
name = "langchain-core"
version = "0.2.4"
version = "0.2.5"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langchain_core-0.2.4-py3-none-any.whl", hash = "sha256:5212f7ec78a525e88a178ed3aefe2fd7134b03fb92573dfbab9914f1d92d6ec5"},
{file = "langchain_core-0.2.4.tar.gz", hash = "sha256:82bdcc546eb0341cefcf1f4ecb3e49836fff003903afddda2d1312bb8491ef81"},
{file = "langchain_core-0.2.5-py3-none-any.whl", hash = "sha256:abe5138f22acff23a079ec538be5268bbf97cf023d51987a0dd474d2a16cae3e"},
{file = "langchain_core-0.2.5.tar.gz", hash = "sha256:4a5c2f56b22396a63ef4790043660e393adbfa6832b978f023ca996a04b8e752"},
]
[package.dependencies]
@ -1241,9 +1223,6 @@ pydantic = ">=1,<3"
PyYAML = ">=5.3"
tenacity = ">=8.1.0,<9.0.0"
[package.extras]
extended-testing = ["jinja2 (>=3,<4)"]
[[package]]
name = "langchain-experimental"
version = "0.0.60"

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow-base"
version = "0.0.59"
version = "0.0.60"
description = "A Python package with a built-in web application"
authors = ["Langflow <contact@langflow.org>"]
maintainers = [

View file

@ -44,9 +44,9 @@ import useFetchDataOnMount from "../../../hooks/use-fetch-data-on-mount";
import useHandleOnNewValue from "../../../hooks/use-handle-new-value";
import useHandleNodeClass from "../../../hooks/use-handle-node-class";
import useHandleRefreshButtonPress from "../../../hooks/use-handle-refresh-buttons";
import OutputModal from "../outputModal";
import TooltipRenderComponent from "../tooltipRenderComponent";
import { TEXT_FIELD_TYPES } from "./constants";
import OutputModal from "../outputModal";
export default function ParameterComponent({
left,
@ -274,7 +274,9 @@ export default function ParameterComponent({
: "Please build the component first"
}
>
<button
<Button
variant="none"
size="none"
disabled={!displayOutputPreview || unknownOutput}
onClick={() => setOpenOutputModal(true)}
data-testid={`output-inspection-${title.toLowerCase()}`}
@ -288,7 +290,7 @@ export default function ParameterComponent({
)}
name={"ScanEye"}
/>
</button>
</Button>
</ShadTooltip>
)}
</div>

View file

@ -1,10 +1,12 @@
import TableAutoCellRender from "../tableComponent/components/tableAutoCellRender";
export default function ArrayReader({ array }: { array: any[] }): JSX.Element {
//TODO check array type
return (
<div>
<ul>
{array.map((item, index) => (
<li key={index}>{item}</li>
<li key={index}>{<TableAutoCellRender value={item} />}</li>
))}
</ul>
</div>

View file

@ -115,6 +115,7 @@ function CsvOutputComponent({
style={{ height: "100%", width: "100%" }}
>
<TableComponent
key={"csv-output"}
rowData={rowData}
columnDefs={colDefs}
defaultColDef={defaultColDef}

View file

@ -13,7 +13,9 @@ function RecordsOutputComponent({
rows: any;
columnMode?: "intersection" | "union";
}) {
console.log("rows", rows);
const columns = extractColumnsFromRows(rows, columnMode);
console.log("columns", columns);
const columnDefs = columns.map((col, idx) => ({
...col,
@ -23,6 +25,7 @@ function RecordsOutputComponent({
return (
<TableComponent
key={"recordsOutputComponent"}
overlayNoRowsTemplate="No data available"
suppressRowClickSelection={true}
pagination={pagination}

View file

@ -0,0 +1,95 @@
import { cn } from "../../../../utils/utils";
import ShadTooltip from "../../../shadTooltipComponent";
import { Button } from "../../../ui/button";
import IconComponent from "../../../genericIconComponent";
export default function TableOptions({
resetGrid,
duplicateRow,
deleteRow,
hasSelection,
stateChange,
}: {
resetGrid: () => void;
duplicateRow?: () => void;
deleteRow?: () => void;
hasSelection: boolean;
stateChange: boolean;
}): JSX.Element {
return (
<div className={cn("absolute bottom-3 left-6")}>
<div className="flex items-center gap-2">
<div>
<ShadTooltip content="Reset Columns">
<Button
variant="none"
size="none"
onClick={() => {
resetGrid();
}}
disabled={!stateChange}
>
<IconComponent
name="RotateCcw"
className={cn("h-5 w-5 text-primary transition-all")}
/>
</Button>
</ShadTooltip>
</div>
{duplicateRow && (
<div>
<ShadTooltip
content={
!hasSelection ? (
<span>Select items to duplicate</span>
) : (
<span>Duplicate selected items</span>
)
}
>
<Button
variant="none"
size="none"
onClick={duplicateRow}
disabled={!hasSelection}
>
<IconComponent
name="Copy"
className={cn("h-5 w-5 text-primary transition-all")}
/>
</Button>
</ShadTooltip>
</div>
)}
{deleteRow && (
<div>
<ShadTooltip
content={
!hasSelection ? (
<span>Select items to delete</span>
) : (
<span>Delete selected items</span>
)
}
>
<Button
variant="none"
size="none"
onClick={deleteRow}
disabled={!hasSelection}
>
<IconComponent
name="Trash2"
className={cn(
"h-5 w-5 text-primary transition-all",
!hasSelection ? "" : "hover:text-destructive",
)}
/>
</Button>
</ShadTooltip>
</div>
)}{" "}
</div>
</div>
);
}

View file

@ -9,20 +9,17 @@ import { Badge } from "../../../ui/badge";
export default function TableAutoCellRender({
value,
}: CustomCellRendererProps) {
}: CustomCellRendererProps | { value: any }) {
function getCellType() {
switch (typeof value) {
case "object":
if (value === null) {
return String(value);
} else if (Array.isArray(value)) {
return <ArrayReader array={value} />;
} else if (value.definitions) {
// use a custom render defined by the sender
return <ObjectRender object={value} />;
} else {
return <ObjectRender object={value} />;
}
break;
case "string":
if (isTimeStampString(value)) {
return <DateReader date={value} />;

View file

@ -1,7 +1,7 @@
import "ag-grid-community/styles/ag-grid.css"; // Mandatory CSS required by the grid
import "ag-grid-community/styles/ag-theme-quartz.css"; // Optional Theme applied to the grid
import { AgGridReact, AgGridReactProps } from "ag-grid-react";
import { ElementRef, forwardRef, useRef } from "react";
import { ElementRef, forwardRef, useRef, useState } from "react";
import {
DEFAULT_TABLE_ALERT_MSG,
DEFAULT_TABLE_ALERT_TITLE,
@ -11,7 +11,8 @@ import "../../style/ag-theme-shadcn.css"; // Custom CSS applied to the grid
import { cn, toTitleCase } from "../../utils/utils";
import ForwardedIconComponent from "../genericIconComponent";
import { Alert, AlertDescription, AlertTitle } from "../ui/alert";
import ResetColumns from "./components/ResetColumns";
import TableOptions from "./components/TableOptions";
import { useParams } from "react-router-dom";
import resetGrid from "./utils/reset-grid-columns";
interface TableComponentProps extends AgGridReactProps {
@ -20,8 +21,8 @@ interface TableComponentProps extends AgGridReactProps {
alertTitle?: string;
alertDescription?: string;
editable?: boolean | string[];
onDelete?: (selectedRows: any) => void;
onDuplicate?: (selectedRows: any) => void;
onDelete?: () => void;
onDuplicate?: () => void;
}
const TableComponent = forwardRef<
@ -69,9 +70,12 @@ const TableComponent = forwardRef<
});
const gridRef = useRef(null);
// @ts-ignore
const realRef = ref?.current ? ref : gridRef;
const realRef: React.MutableRefObject<AgGridReact> = ref?.current
? ref
: gridRef;
const dark = useDarkStore((state) => state.dark);
const initialColumnDefs = useRef(colDef);
const [columnStateChange, setColumnStateChange] = useState(false);
const makeLastColumnNonResizable = (columnDefs) => {
columnDefs.forEach((colDef, index) => {
@ -87,6 +91,9 @@ const TableComponent = forwardRef<
params.api.setGridOption("columnDefs", updatedColumnDefs);
initialColumnDefs.current = params.api.getColumnDefs();
if (props.onGridReady) props.onGridReady(params);
setTimeout(() => {
setColumnStateChange(false);
}, 50);
};
const onColumnMoved = (params) => {
@ -121,7 +128,7 @@ const TableComponent = forwardRef<
>
<AgGridReact
{...props}
className={cn(props.className, "custom-scroll")}
className={cn(props.className, "cusm-scroll")}
defaultColDef={{
minWidth: 100,
autoHeight: true,
@ -131,8 +138,26 @@ const TableComponent = forwardRef<
pagination={true}
onGridReady={onGridReady}
onColumnMoved={onColumnMoved}
onStateUpdated={(e) => {
console.log(e);
if (e.sources.some((source) => source.includes("column"))) {
setColumnStateChange(true);
}
}}
/>
<TableOptions
stateChange={columnStateChange}
hasSelection={realRef.current?.api.getSelectedRows().length > 0}
duplicateRow={props.onDuplicate ? props.onDuplicate : undefined}
deleteRow={props.onDelete ? props.onDelete : undefined}
resetGrid={() => {
console.log("teste");
resetGrid(realRef, initialColumnDefs);
setTimeout(() => {
setColumnStateChange(false);
}, 100);
}}
/>
<ResetColumns resetGrid={() => resetGrid(realRef, initialColumnDefs)} />
</div>
);
},

View file

@ -982,9 +982,7 @@ export async function postBuildVertex(
files?: string[],
): Promise<AxiosResponse<VertexBuildTypeAPI>> {
// input_value is optional and is a query parameter
const data = input_value
? { inputs: { input_value: input_value } }
: undefined;
const data = { inputs: { input_value: input_value ?? "" } };
if (data && files) {
data["files"] = files;
}

View file

@ -44,43 +44,23 @@ export default function SessionView({ rows }: { rows: Array<any> }) {
}
return (
<div className="flex h-full w-full flex-col justify-between gap-6">
<>
<div className="flex w-full items-center justify-end gap-4 space-y-0.5">
<div className="flex flex-shrink-0 items-center gap-2">
<Button
data-testid="api-key-button-store"
variant="primary"
className="group px-2"
disabled={selectedRows.length === 0}
onClick={handleRemoveMessages}
>
<ForwardedIconComponent
name="Trash2"
className={cn(
"h-5 w-5 text-destructive group-disabled:text-primary",
)}
/>
</Button>
</div>
</div>
</>
<TableComponent
readOnlyEdit
onCellEditRequest={(event) => {
handleUpdateMessage(event);
}}
editable={["Sender Name", "Message"]}
overlayNoRowsTemplate="No data available"
onSelectionChanged={(event: SelectionChangedEvent) => {
setSelectedRows(event.api.getSelectedRows().map((row) => row.index));
}}
rowSelection="multiple"
suppressRowClickSelection={true}
pagination={true}
columnDefs={columns}
rowData={rows}
/>
</div>
<TableComponent
key={"sessionView"}
onDelete={handleRemoveMessages}
readOnlyEdit
onCellEditRequest={(event) => {
handleUpdateMessage(event);
}}
editable={["Sender Name", "Message"]}
overlayNoRowsTemplate="No data available"
onSelectionChanged={(event: SelectionChangedEvent) => {
setSelectedRows(event.api.getSelectedRows().map((row) => row.index));
}}
rowSelection="multiple"
suppressRowClickSelection={true}
pagination={true}
columnDefs={columns}
rowData={rows}
/>
);
}

View file

@ -0,0 +1,52 @@
import IconComponent from "../../../../../../../components/genericIconComponent";
import { Case } from "../../../../../../../shared/components/caseComponent";
import { classNames } from "../../../../../../../utils/utils";
const ButtonSendWrapper = ({
send,
lockChat,
noInput,
saveLoading,
chatValue,
}) => {
return (
<button
className={classNames(
"form-modal-send-button",
noInput
? "bg-high-indigo text-background"
: chatValue === ""
? "text-primary"
: "bg-chat-send text-background",
)}
disabled={lockChat || saveLoading}
onClick={(): void => send()}
>
<Case condition={lockChat || saveLoading}>
<IconComponent
name="Lock"
className="form-modal-lock-icon"
aria-hidden="true"
/>
</Case>
<Case condition={noInput}>
<IconComponent
name="Zap"
className="form-modal-play-icon"
aria-hidden="true"
/>
</Case>
<Case condition={!(lockChat || saveLoading) && !noInput}>
<IconComponent
name="LucideSend"
className="form-modal-send-icon "
aria-hidden="true"
/>
</Case>
</button>
);
};
export default ButtonSendWrapper;

View file

@ -0,0 +1,81 @@
import { Textarea } from "../../../../../../../components/ui/textarea";
import { classNames } from "../../../../../../../utils/utils";
const TextAreaWrapper = ({
checkSendingOk,
send,
lockChat,
noInput,
saveLoading,
chatValue,
setChatValue,
CHAT_INPUT_PLACEHOLDER,
CHAT_INPUT_PLACEHOLDER_SEND,
inputRef,
setInputFocus,
files,
isDragging,
}) => {
const getPlaceholderText = (
isDragging: boolean,
noInput: boolean,
): string => {
if (isDragging) {
return "Drop here";
} else if (noInput) {
return CHAT_INPUT_PLACEHOLDER;
} else {
return CHAT_INPUT_PLACEHOLDER_SEND;
}
};
const lockClass =
lockChat || saveLoading
? "form-modal-lock-true bg-input"
: noInput
? "form-modal-no-input bg-input"
: "form-modal-lock-false bg-background";
const fileClass =
files.length > 0
? "rounded-b-lg ring-0 focus:ring-0 focus:border-2 rounded-t-none border-t-0 border-border focus:border-t-0 focus:border-ring"
: "rounded-md border-t border-border focus:ring-0 focus:border-2 focus:border-ring";
const additionalClassNames = "form-modal-lockchat pl-14";
return (
<Textarea
onFocus={(e) => {
setInputFocus(true);
e.target.style.borderTopWidth = "0";
}}
onBlur={() => setInputFocus(false)}
onKeyDown={(event) => {
if (checkSendingOk(event)) {
send();
}
}}
rows={1}
ref={inputRef}
disabled={lockChat || noInput || saveLoading}
style={{
resize: "none",
bottom: `${inputRef?.current?.scrollHeight}px`,
maxHeight: "150px",
overflow: `${
inputRef.current && inputRef.current.scrollHeight > 150
? "auto"
: "hidden"
}`,
}}
value={lockChat ? "Thinking..." : saveLoading ? "Saving..." : chatValue}
onChange={(event): void => {
setChatValue(event.target.value);
}}
className={classNames(lockClass, fileClass, additionalClassNames)}
placeholder={getPlaceholderText(isDragging, noInput)}
/>
);
};
export default TextAreaWrapper;

View file

@ -0,0 +1,32 @@
import ForwardedIconComponent from "../../../../../../../components/genericIconComponent";
import { Button } from "../../../../../../../components/ui/button";
const UploadFileButton = ({
fileInputRef,
handleFileChange,
handleButtonClick,
lockChat,
}) => {
return (
<div>
<input
disabled={lockChat}
type="file"
ref={fileInputRef}
style={{ display: "none" }}
onChange={handleFileChange}
/>
<Button
disabled={lockChat}
className={`font-bold text-white transition-all ${lockChat ? "cursor-not-allowed" : "hover:text-muted-foreground"}`}
onClick={handleButtonClick}
variant="none"
size="none"
>
<ForwardedIconComponent name="PaperclipIcon" />
</Button>
</div>
);
};
export default UploadFileButton;

View file

@ -0,0 +1,7 @@
export const getClassNamesFilePreview = (inputFocus) => {
return `flex w-full items-center gap-4 rounded-t-lg bg-background px-14 py-5 overflow-auto custom-scroll ${
inputFocus
? "border border-b-0 border-ring border-2"
: "border border-b-0 border-border"
}`;
};

View file

@ -0,0 +1,14 @@
import { useEffect } from "react";
const useAutoResizeTextArea = (value, inputRef) => {
useEffect(() => {
if (inputRef.current && inputRef.current.scrollHeight! !== 0) {
inputRef.current.style!.height = "inherit"; // Reset the height
inputRef.current.style!.height = `${inputRef.current.scrollHeight!}px`; // Set it to the scrollHeight
}
}, [value]);
return inputRef;
};
export default useAutoResizeTextArea;

View file

@ -0,0 +1,55 @@
import ShortUniqueId from "short-unique-id";
import useFileUpload from "./use-file-upload";
const useDragAndDrop = (setIsDragging, setFiles, currentFlowId) => {
const dragOver = (e) => {
e.preventDefault();
if (e.dataTransfer.types.some((type) => type === "Files")) {
setIsDragging(true);
}
};
const dragEnter = (e) => {
if (e.dataTransfer.types.some((type) => type === "Files")) {
setIsDragging(true);
}
e.preventDefault();
};
const dragLeave = (e) => {
e.preventDefault();
setIsDragging(false);
};
const onDrop = (e) => {
e.preventDefault();
if (e.dataTransfer.files && e.dataTransfer.files.length > 0) {
handleFiles(e.dataTransfer.files, setFiles, currentFlowId);
e.dataTransfer.clearData();
}
setIsDragging(false);
};
return {
dragOver,
dragEnter,
dragLeave,
onDrop,
};
};
const handleFiles = (files, setFiles, currentFlowId) => {
if (files) {
const uid = new ShortUniqueId({ length: 3 });
const id = uid();
const type = files[0].type.split("/")[0];
const blob = files[0];
setFiles((prevFiles) => [
...prevFiles,
{ file: blob, loading: true, error: false, id, type },
]);
useFileUpload(blob, currentFlowId, setFiles, id);
}
};
export default useDragAndDrop;

View file

@ -0,0 +1,27 @@
import { uploadFile } from "../../../../../../controllers/API";
const useFileUpload = (blob, currentFlowId, setFiles, id) => {
uploadFile(blob, currentFlowId)
.then((res) => {
setFiles((prev) => {
const newFiles = [...prev];
const updatedIndex = newFiles.findIndex((file) => file.id === id);
newFiles[updatedIndex].loading = false;
newFiles[updatedIndex].path = res.data.file_path;
return newFiles;
});
})
.catch(() => {
setFiles((prev) => {
const newFiles = [...prev];
const updatedIndex = newFiles.findIndex((file) => file.id === id);
newFiles[updatedIndex].loading = false;
newFiles[updatedIndex].error = true;
return newFiles;
});
});
return null;
};
export default useFileUpload;

View file

@ -0,0 +1,13 @@
import { useEffect } from "react";
const useFocusOnUnlock = (lockChat, inputRef) => {
useEffect(() => {
if (!lockChat && inputRef.current) {
inputRef.current.focus();
}
}, [lockChat, inputRef]);
return inputRef;
};
export default useFocusOnUnlock;

View file

@ -0,0 +1,50 @@
import ShortUniqueId from "short-unique-id";
import useFileUpload from "./use-file-upload";
import useAlertStore from "../../../../../../stores/alertStore";
const fsErrorText =
"Please ensure your file has one of the following extensions:";
const snErrorTxt = "png, jpg, jpeg, gif, bmp, webp";
export const useHandleFileChange = (setFiles, currentFlowId) => {
const setErrorData = useAlertStore((state) => state.setErrorData);
const handleFileChange = async (
event: React.ChangeEvent<HTMLInputElement>,
) => {
const fileInput = event.target;
const file = fileInput.files?.[0];
if (file) {
const allowedExtensions = ["png", "jpg", "jpeg", "gif", "bmp", "webp"];
const fileExtension = file.name.split(".").pop()?.toLowerCase();
if (!fileExtension || !allowedExtensions.includes(fileExtension)) {
setErrorData({
title: "Error uploading file",
list: [fsErrorText, snErrorTxt],
});
return;
}
const uid = new ShortUniqueId({ length: 10 }); // Increase the length to ensure uniqueness
const id = uid();
const type = file.type.split("/")[0];
const blob = file;
setFiles((prevFiles) => [
...prevFiles,
{ file: blob, loading: true, error: false, id, type },
]);
useFileUpload(blob, currentFlowId, setFiles, id);
}
// Clear the file input value to ensure the change event is triggered even for the same file
fileInput.value = "";
};
return {
handleFileChange,
};
};
export default useHandleFileChange;

View file

@ -0,0 +1,61 @@
import { useEffect } from "react";
import ShortUniqueId from "short-unique-id";
import useFileUpload from "./use-file-upload";
import useAlertStore from "../../../../../../stores/alertStore";
const fsErrorText =
"Please ensure your file has one of the following extensions:";
const snErrorTxt = "png, jpg, jpeg, gif, bmp, webp";
const useUpload = (uploadFile, currentFlowId, setFiles, lockChat) => {
const setErrorData = useAlertStore((state) => state.setErrorData);
useEffect(() => {
const handlePaste = (event: ClipboardEvent): void => {
if (lockChat) {
return;
}
const items = event.clipboardData?.items;
if (items) {
for (let i = 0; i < items.length; i++) {
const type = items[0].type.split("/")[0];
const uid = new ShortUniqueId({ length: 3 });
const blob = items[i].getAsFile();
if (blob) {
const allowedExtensions = [
"png",
"jpg",
"jpeg",
"gif",
"bmp",
"webp",
];
const fileExtension = blob.name.split(".").pop()?.toLowerCase();
if (!fileExtension || !allowedExtensions.includes(fileExtension)) {
setErrorData({
title: "Error uploading file",
list: [fsErrorText, snErrorTxt],
});
return;
}
const id = uid();
setFiles((prevFiles) => [
...prevFiles,
{ file: blob, loading: true, error: false, id, type },
]);
useFileUpload(blob, currentFlowId, setFiles, id);
}
}
}
};
document.addEventListener("paste", handlePaste);
return () => {
document.removeEventListener("paste", handlePaste);
};
}, [uploadFile, currentFlowId, lockChat]);
return null;
};
export default useUpload;

View file

@ -1,7 +1,4 @@
import { useEffect, useState } from "react";
import ShortUniqueId from "short-unique-id";
import IconComponent from "../../../../../components/genericIconComponent";
import { Textarea } from "../../../../../components/ui/textarea";
import { useRef, useState } from "react";
import {
CHAT_INPUT_PLACEHOLDER,
CHAT_INPUT_PLACEHOLDER_SEND,
@ -9,11 +6,18 @@ import {
import { uploadFile } from "../../../../../controllers/API";
import useFlowsManagerStore from "../../../../../stores/flowsManagerStore";
import {
ChatInputType,
FilePreviewType,
chatInputType,
} from "../../../../../types/components";
import { classNames } from "../../../../../utils/utils";
import FilePreview from "../filePreviewChat";
import ButtonSendWrapper from "./components/buttonSendWrapper";
import TextAreaWrapper from "./components/textAreaWrapper";
import UploadFileButton from "./components/uploadFileButton";
import { getClassNamesFilePreview } from "./helpers/get-class-file-preview";
import useAutoResizeTextArea from "./hooks/use-auto-resize-text-area";
import useFocusOnUnlock from "./hooks/use-focus-unlock";
import useHandleFileChange from "./hooks/use-handle-file-change";
import useUpload from "./hooks/use-upload";
export default function ChatInput({
lockChat,
chatValue,
@ -21,199 +25,102 @@ export default function ChatInput({
setChatValue,
inputRef,
noInput,
}: chatInputType): JSX.Element {
files,
setFiles,
isDragging,
}: ChatInputType): JSX.Element {
const [repeat, setRepeat] = useState(1);
const saveLoading = useFlowsManagerStore((state) => state.saveLoading);
const currentFlowId = useFlowsManagerStore((state) => state.currentFlowId);
const uid = new ShortUniqueId({ length: 3 });
const [files, setFiles] = useState<FilePreviewType[]>([]);
const [inputFocus, setInputFocus] = useState<boolean>(false);
const fileInputRef = useRef<HTMLInputElement>(null);
useEffect(() => {
if (!lockChat && inputRef.current) {
inputRef.current.focus();
}
}, [lockChat, inputRef]);
useFocusOnUnlock(lockChat, inputRef);
useAutoResizeTextArea(chatValue, inputRef);
useUpload(uploadFile, currentFlowId, setFiles, lockChat || saveLoading);
const { handleFileChange } = useHandleFileChange(setFiles, currentFlowId);
useEffect(() => {
if (inputRef.current && inputRef.current.scrollHeight !== 0) {
inputRef.current.style.height = "inherit"; // Reset the height
inputRef.current.style.height = `${inputRef.current.scrollHeight}px`; // Set it to the scrollHeight
}
}, [chatValue]);
//listen to ctrl-v to paste images
useEffect(() => {
const handlePaste = (event: ClipboardEvent): void => {
const items = event.clipboardData?.items;
if (items) {
for (let i = 0; i < items.length; i++) {
if (items[i].type.indexOf("image") !== -1) {
const blob = items[i].getAsFile();
if (blob) {
const id = uid();
setFiles([
...files,
{ file: blob, loading: true, error: false, id },
]);
uploadFile(blob, currentFlowId)
.then((res) => {
setFiles((prev) => {
const newFiles = [...prev];
const updatedIndex = newFiles.findIndex(
(file) => file.id === id,
);
newFiles[updatedIndex].loading = false;
newFiles[updatedIndex].path = res.data.file_path;
return newFiles;
});
})
.catch((err) => {
setFiles((prev) => {
const newFiles = [...prev];
const updatedIndex = newFiles.findIndex(
(file) => file.id === id,
);
newFiles[updatedIndex].loading = false;
newFiles[updatedIndex].error = true;
return newFiles;
});
});
}
}
}
}
};
document.addEventListener("paste", handlePaste);
return () => {
document.removeEventListener("paste", handlePaste);
};
}, []);
function send() {
const send = () => {
sendMessage({
repeat,
files: files.map((file) => file.path ?? "").filter((file) => file !== ""),
});
setFiles([]);
}
};
const checkSendingOk = (event: React.KeyboardEvent<HTMLTextAreaElement>) => {
return (
event.key === "Enter" &&
!lockChat &&
!saveLoading &&
!event.shiftKey &&
!event.nativeEvent.isComposing
);
};
const classNameFilePreview = getClassNamesFilePreview(inputFocus);
const handleButtonClick = () => {
fileInputRef.current!.click();
};
return (
<div className="flex w-full flex-col-reverse">
<div className="relative w-full">
<Textarea
onKeyDown={(event) => {
if (
event.key === "Enter" &&
!lockChat &&
!saveLoading &&
!event.shiftKey &&
!event.nativeEvent.isComposing
) {
send();
}
}}
rows={1}
ref={inputRef}
disabled={lockChat || noInput || saveLoading}
style={{
resize: "none",
bottom: `${inputRef?.current?.scrollHeight}px`,
maxHeight: "150px",
overflow: `${
inputRef.current && inputRef.current.scrollHeight > 150
? "auto"
: "hidden"
}`,
}}
value={
lockChat ? "Thinking..." : saveLoading ? "Saving..." : chatValue
}
onChange={(event): void => {
setChatValue(event.target.value);
}}
className={classNames(
lockChat || saveLoading
? " form-modal-lock-true bg-input"
: noInput
? "form-modal-no-input bg-input"
: " form-modal-lock-false bg-background",
"form-modal-lockchat",
)}
placeholder={
noInput ? CHAT_INPUT_PLACEHOLDER : CHAT_INPUT_PLACEHOLDER_SEND
}
<TextAreaWrapper
checkSendingOk={checkSendingOk}
send={send}
lockChat={lockChat}
noInput={noInput}
saveLoading={saveLoading}
chatValue={chatValue}
setChatValue={setChatValue}
CHAT_INPUT_PLACEHOLDER={CHAT_INPUT_PLACEHOLDER}
CHAT_INPUT_PLACEHOLDER_SEND={CHAT_INPUT_PLACEHOLDER_SEND}
inputRef={inputRef}
setInputFocus={setInputFocus}
files={files}
isDragging={isDragging}
/>
<div className="form-modal-send-icon-position">
<button
className={classNames(
"form-modal-send-button",
noInput
? "bg-high-indigo text-background"
: chatValue === ""
? "text-primary"
: "bg-chat-send text-background",
)}
disabled={lockChat || saveLoading}
onClick={(): void => send()}
>
{lockChat || saveLoading ? (
<IconComponent
name="Lock"
className="form-modal-lock-icon"
aria-hidden="true"
/>
) : noInput ? (
<IconComponent
name="Zap"
className="form-modal-play-icon"
aria-hidden="true"
/>
) : (
<IconComponent
name="LucideSend"
className="form-modal-send-icon "
aria-hidden="true"
/>
)}
</button>
<ButtonSendWrapper
send={send}
lockChat={lockChat}
noInput={noInput}
saveLoading={saveLoading}
chatValue={chatValue}
/>
</div>
<div
className={`absolute bottom-2 left-4 ${lockChat || saveLoading ? "cursor-not-allowed" : ""}`}
>
<UploadFileButton
lockChat={lockChat || saveLoading}
fileInputRef={fileInputRef}
handleFileChange={handleFileChange}
handleButtonClick={handleButtonClick}
/>
</div>
</div>
<div className="flex w-full gap-2 pb-2">
{files.map((file) => (
<FilePreview
error={file.error}
file={file.file}
loading={file.loading}
key={file.id}
onDelete={() => {
setFiles((prev) => prev.filter((f) => f.id !== file.id));
// TODO: delete file on backend
}}
/>
))}
</div>
{/*
<Popover>
<PopoverTrigger asChild>
<Button variant="primary" className="h-13 px-4">
<IconComponent name="Repeat" className="" aria-hidden="true" />
</Button>
</PopoverTrigger>
<PopoverContent className="w-fit">
<div className="flex flex-col items-center justify-center gap-2">
<span className="text-sm">Repetitions: </span>
<Input
onChange={(e) => {
handleChange(parseInt(e.target.value));
{files.length > 0 && (
<div className={classNameFilePreview}>
{files.map((file) => (
<FilePreview
error={file.error}
file={file.file}
loading={file.loading}
key={file.id}
onDelete={() => {
setFiles((prev: FilePreviewType[]) =>
prev.filter((f) => f.id !== file.id),
);
// TODO: delete file on backend
}}
className="w-16"
type="number"
min={0}
/>
</div>
</PopoverContent>
</Popover> */}
))}
</div>
)}
</div>
);
}

View file

@ -0,0 +1,35 @@
import { useState } from "react";
import ForwardedIconComponent from "../../../../../../../components/genericIconComponent";
import formatFileName from "../../../filePreviewChat/utils/format-file-name";
import FileCard from "../../../fileComponent";
export default function FileCardWrapper({
index,
name,
type,
path,
}: {
index: number;
name: string;
type: string;
path: string;
}) {
const [show, setShow] = useState<boolean>(true);
return (
<div key={index} className="flex flex-col gap-2">
<span
onClick={() => setShow(!show)}
className="flex cursor-pointer gap-2 text-sm text-muted-foreground"
>
{formatFileName(name, 50)}
<ForwardedIconComponent name={show ? "ChevronDown" : "ChevronRight"} />
</span>
<FileCard
showFile={show}
fileName={name}
fileType={type}
content={path}
/>
</div>
);
}

View file

@ -7,13 +7,17 @@ import remarkMath from "remark-math";
import MaleTechnology from "../../../../../assets/male-technologist.png";
import Robot from "../../../../../assets/robot.png";
import CodeTabsComponent from "../../../../../components/codeTabsComponent";
import IconComponent from "../../../../../components/genericIconComponent";
import IconComponent, {
ForwardedIconComponent,
} from "../../../../../components/genericIconComponent";
import SanitizedHTMLWrapper from "../../../../../components/sanitizedHTMLWrapper";
import useAlertStore from "../../../../../stores/alertStore";
import useFlowStore from "../../../../../stores/flowStore";
import { chatMessagePropsType } from "../../../../../types/components";
import { classNames, cn } from "../../../../../utils/utils";
import FileCard from "../fileComponent";
import formatFileName from "../filePreviewChat/utils/format-file-name";
import FileCardWrapper from "./components/fileCardWrapper";
export default function ChatMessage({
chat,
@ -22,6 +26,7 @@ export default function ChatMessage({
updateChat,
setLockChat,
}: chatMessagePropsType): JSX.Element {
const [showFile, setShowFile] = useState<boolean>(true);
const convert = new Convert({ newline: true });
const [hidden, setHidden] = useState(true);
const template = chat.template;
@ -308,34 +313,35 @@ dark:prose-invert"
</span>
</>
) : (
<span
className="prose text-primary word-break-break-word dark:prose-invert"
data-testid={
"chat-message-" + chat.sender_name + "-" + chatMessage
}
>
{chatMessage}
</span>
<div className="flex flex-col">
<span
className="prose text-primary word-break-break-word dark:prose-invert"
data-testid={
"chat-message-" + chat.sender_name + "-" + chatMessage
}
>
{chatMessage}
</span>
{chat.files && (
<div className="my-2 flex flex-col gap-5">
{chat.files.map((file, index) => {
return (
<FileCardWrapper
index={index}
name={file.name}
type={file.type}
path={file.path}
/>
);
})}
</div>
)}
</div>
)}
</div>
)}
</div>
<div id={lastMessage ? "last-chat-message" : ""}></div>
{chat.files && (
<div className="my-2 w-full">
{chat.files.map((file, index) => {
return (
<div key={index} className="my-2 w-full">
<FileCard
fileName={file.name}
fileType={file.type}
content={file.path}
/>
</div>
);
})}
</div>
)}
</>
);
}

View file

@ -0,0 +1,30 @@
import ForwardedIconComponent from "../../../../../../../components/genericIconComponent";
import { Button } from "../../../../../../../components/ui/button";
export default function DownloadButton({
isHovered,
handleDownload,
}: {
isHovered: boolean;
handleDownload: () => void;
}): JSX.Element | undefined {
if (isHovered) {
return (
<div
className={`absolute right-1 top-1 rounded-md bg-muted text-sm font-bold text-foreground `}
>
<Button
variant={"none"}
className="bg-transparent px-2 py-1 text-ring"
onClick={handleDownload}
>
<ForwardedIconComponent
name="DownloadCloud"
className="h-5 w-5 bg-transparent text-current hover:scale-110"
/>
</Button>
</div>
);
}
return undefined;
}

View file

@ -1,19 +1,21 @@
import { useState } from "react";
import IconComponent from "../../../../../components/genericIconComponent";
import { ForwardedIconComponent } from "../../../../../components/genericIconComponent";
import { BACKEND_URL, BASE_URL_API } from "../../../../../constants/constants";
import useFlowsManagerStore from "../../../../../stores/flowsManagerStore";
import { fileCardPropsType } from "../../../../../types/components";
import formatFileName from "../filePreviewChat/utils/format-file-name";
import DownloadButton from "./components/downloadButton/downloadButton";
import getClasses from "./utils/get-classes";
import handleDownload from "./utils/handle-download";
const imgTypes = new Set(["png", "jpg"]);
const imgTypes = new Set(["png", "jpg", "jpeg", "gif", "webp", "image"]);
export default function FileCard({
fileName,
content,
fileType,
}: fileCardPropsType): JSX.Element {
const handleDownload = (): void => {
//TODO: update download function
};
showFile = true,
}: fileCardPropsType): JSX.Element | undefined {
const [isHovered, setIsHovered] = useState(false);
const currentFlowId = useFlowsManagerStore((state) => state.currentFlowId);
function handleMouseEnter(): void {
@ -23,64 +25,57 @@ export default function FileCard({
setIsHovered(false);
}
if (imgTypes.has(fileType)) {
const fileWrapperClasses = getClasses(isHovered);
const imgSrc = `${BACKEND_URL.slice(
0,
BACKEND_URL.length - 1,
)}${BASE_URL_API}files/images/${content}`;
if (showFile) {
if (imgTypes.has(fileType)) {
return (
<div
className="inline-block w-full rounded-lg transition-all"
onMouseEnter={handleMouseEnter}
onMouseLeave={handleMouseLeave}
style={{ display: "inline-block" }}
>
<div className="relative w-[50%] rounded-lg border border-border">
<img
src={imgSrc}
alt="generated image"
className="m-0 h-auto w-auto rounded-lg border border-border p-0 transition-all"
/>
<DownloadButton
isHovered={isHovered}
handleDownload={() => handleDownload({ fileName, content })}
/>
</div>
</div>
);
}
return (
<div
className="relative h-1/4 w-1/4"
className={fileWrapperClasses}
onClick={() => handleDownload({ fileName, content })}
onMouseEnter={handleMouseEnter}
onMouseLeave={handleMouseLeave}
>
<img
src={`${BACKEND_URL.slice(
0,
BACKEND_URL.length - 1,
)}${BASE_URL_API}files/images/${content}`}
alt="generated image"
className="h-full w-full rounded-lg"
/>
{isHovered && (
<div className={`file-card-modal-image-div `}>
<button
className="file-card-modal-image-button "
onClick={handleDownload}
>
<IconComponent
name="DownloadCloud"
className="h-5 w-5 text-current hover:scale-110"
/>
</button>
<div className="ml-3 flex h-full w-full items-center gap-2 text-sm">
<ForwardedIconComponent name="File" className="h-8 w-8" />
<div className="flex flex-col">
<span className="font-bold">{formatFileName(fileName, 20)}</span>
<span>File</span>
</div>
)}
</div>
<DownloadButton
isHovered={isHovered}
handleDownload={() => handleDownload({ fileName, content })}
/>
</div>
);
}
return (
<button onClick={handleDownload} className="file-card-modal-button">
<div className="file-card-modal-div">
{" "}
{imgTypes.has(fileType) ? (
<img
src={`${BACKEND_URL.slice(
0,
BACKEND_URL.length - 1,
)}${BASE_URL_API}files/images/${content}`}
alt=""
className="h-8 w-8"
/>
) : (
<IconComponent name="File" className="h-8 w-8" />
)}
<div className="file-card-modal-footer">
{" "}
<div className="file-card-modal-name">{fileName}</div>
<div className="file-card-modal-type">{fileType}</div>
</div>
<IconComponent
name="DownloadCloud"
className="ml-auto h-6 w-6 text-current"
/>
</div>
</button>
);
return undefined;
}

View file

@ -0,0 +1,5 @@
export default function getClasses(isHovered: boolean): string {
return `relative ${false ? "h-20 w-20" : "h-20 w-80"} cursor-pointer rounded-lg border border-ring bg-muted shadow transition duration-300 hover:drop-shadow-lg ${
isHovered ? "shadow-md" : ""
}`;
}

View file

@ -0,0 +1,43 @@
import {
BACKEND_URL,
BASE_URL_API,
} from "../../../../../../constants/constants";
let isDownloading = false;
export default async function handleDownload({
fileName,
content,
}: {
fileName: string;
content: string;
}): Promise<void> {
if (isDownloading) return;
try {
isDownloading = true;
const response = await fetch(
`${BACKEND_URL.slice(0, BACKEND_URL.length - 1)}${BASE_URL_API}files/download/${content}`,
);
if (!response.ok) {
throw new Error("Network response was not ok");
}
const blob = await response.blob();
const url = URL.createObjectURL(blob);
const link = document.createElement("a");
link.href = url;
link.setAttribute("download", fileName); // Set the filename
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
URL.revokeObjectURL(url); // Clean up the URL object
} catch (error) {
console.error("Failed to download file:", error);
} finally {
isDownloading = false;
}
}

View file

@ -1,6 +1,11 @@
import { useState } from "react";
import IconComponent from "../../../../../components/genericIconComponent";
import LoadingComponent from "../../../../../components/loadingComponent";
import IconComponent, {
ForwardedIconComponent,
} from "../../../../../components/genericIconComponent";
import { Skeleton } from "../../../../../components/ui/skeleton";
import formatFileName from "./utils/format-file-name";
const supImgFiles = ["png", "jpg", "jpeg", "gif", "bmp", "webp", "image"];
export default function FilePreview({
error,
@ -13,35 +18,93 @@ export default function FilePreview({
error: boolean;
onDelete: () => void;
}) {
const fileType = file.type.toLowerCase();
const isImage = supImgFiles.some((type) => fileType.includes(type));
const [isHovered, setIsHovered] = useState(false);
return (
<div className="relative inline-block w-56">
{loading && <LoadingComponent remSize={5} />}
{error && <div>Error...</div>}
<div
className={`relative overflow-hidden rounded-md bg-background p-4 transition duration-300 ${
isHovered ? "shadow-md" : ""
}`}
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
>
<img
src={URL.createObjectURL(file)}
alt="file"
className="block h-auto w-full"
/>
{isHovered && (
<div className="absolute inset-0 flex items-center justify-center bg-black bg-opacity-30">
<div
className="cursor-pointer rounded-full bg-white bg-opacity-80 p-2"
onClick={onDelete}
<div className="relative inline-block">
{loading ? (
isImage ? (
<div className="flex h-20 w-20 items-center justify-center rounded-md border border-ring bg-background ">
<svg
aria-hidden="true"
className={`h-10 w-10 animate-spin fill-black text-muted`}
viewBox="0 0 100 101"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<IconComponent name="trash" className="stroke-red-500" />
<path
d="M100 50.5908C100 78.2051 77.6142 100.591 50 100.591C22.3858 100.591 0 78.2051 0 50.5908C0 22.9766 22.3858 0.59082 50 0.59082C77.6142 0.59082 100 22.9766 100 50.5908ZM9.08144 50.5908C9.08144 73.1895 27.4013 91.5094 50 91.5094C72.5987 91.5094 90.9186 73.1895 90.9186 50.5908C90.9186 27.9921 72.5987 9.67226 50 9.67226C27.4013 9.67226 9.08144 27.9921 9.08144 50.5908Z"
fill="currentColor"
/>
<path
d="M93.9676 39.0409C96.393 38.4038 97.8624 35.9116 97.0079 33.5539C95.2932 28.8227 92.871 24.3692 89.8167 20.348C85.8452 15.1192 80.8826 10.7238 75.2124 7.41289C69.5422 4.10194 63.2754 1.94025 56.7698 1.05124C51.7666 0.367541 46.6976 0.446843 41.7345 1.27873C39.2613 1.69328 37.813 4.19778 38.4501 6.62326C39.0873 9.04874 41.5694 10.4717 44.0505 10.1071C47.8511 9.54855 51.7191 9.52689 55.5402 10.0491C60.8642 10.7766 65.9928 12.5457 70.6331 15.2552C75.2735 17.9648 79.3347 21.5619 82.5849 25.841C84.9175 28.9121 86.7997 32.2913 88.1811 35.8758C89.083 38.2158 91.5421 39.6781 93.9676 39.0409Z"
fill="currentFill"
/>
</svg>
</div>
) : (
<div
className={`relative ${
isImage ? "h-20 w-20" : "h-20 w-80"
} cursor-wait rounded-lg border border-ring bg-background transition duration-300 ${
isHovered ? "shadow-md" : ""
}`}
>
<div className="ml-3 flex h-full w-full items-center gap-2 text-sm">
<Skeleton className="h-10 w-10 rounded-lg" />
<div className="flex flex-col gap-1">
<Skeleton className="h-3 w-48" />
<Skeleton className="h-3 w-10" />
</div>
</div>
</div>
)}
</div>
)
) : error ? (
<div>Error...</div>
) : (
<div
className={`relative mt-2 ${
isImage ? "h-20 w-20" : "h-20 w-80"
} cursor-pointer rounded-lg border border-ring bg-background transition duration-300 ${
isHovered ? "shadow-md" : ""
}`}
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
>
{isImage ? (
<img
src={URL.createObjectURL(file)}
alt="file"
className="block h-full w-full rounded-md border border-border"
/>
) : (
<div className="ml-3 flex h-full w-full items-center gap-2 text-sm">
<ForwardedIconComponent name="File" className="h-8 w-8" />
<div className="flex flex-col">
<span className="font-bold">{formatFileName(file.name)}</span>
<span>File</span>
</div>
</div>
)}
{isHovered && (
<div
className={`absolute ${
isImage ? "bottom-16 left-16" : "bottom-16 left-[19em]"
} flex h-5 w-5 items-center justify-center`}
>
<div
className="flex h-7 w-7 cursor-pointer items-center justify-center rounded-full bg-gray-200 p-2 transition-all"
onClick={onDelete}
>
<IconComponent name="X" className="stroke-slate-950 stroke-2" />
</div>
</div>
)}
</div>
)}
</div>
);
}

View file

@ -0,0 +1,14 @@
export default function formatFileName(
name: string,
numberToTruncate: number = 25,
): string {
if (name[numberToTruncate] === undefined) {
return name;
}
const fileExtension = name.split(".").pop(); // Get the file extension
const baseName = name.slice(0, name.lastIndexOf(".")); // Get the base name without the extension
if (baseName.length > 6) {
return `${baseName.slice(0, numberToTruncate)}...${fileExtension}`;
}
return name;
}

View file

@ -11,9 +11,10 @@ import useFlowStore from "../../../../stores/flowStore";
import useFlowsManagerStore from "../../../../stores/flowsManagerStore";
import { VertexBuildTypeAPI, sendAllProps } from "../../../../types/api";
import { ChatMessageType, ChatOutputType } from "../../../../types/chat";
import { chatViewProps } from "../../../../types/components";
import { FilePreviewType, chatViewProps } from "../../../../types/components";
import { classNames } from "../../../../utils/utils";
import ChatInput from "./chatInput";
import useDragAndDrop from "./chatInput/hooks/use-drag-and-drop";
import ChatMessage from "./chatMessage";
export default function ChatView({
@ -61,13 +62,12 @@ export default function ChatView({
const chatMessages: ChatMessageType[] = chatOutputResponses
.sort((a, b) => Date.parse(a.timestamp) - Date.parse(b.timestamp))
//
.filter(
(output) => output.data.messages && output.data.messages.length > 0,
)
.filter((output) => output.data.message)
.map((output, index) => {
try {
const { sender, message, sender_name, stream_url, files } = output
.data.messages[0] as ChatOutputType;
.data.message as ChatOutputType;
const is_ai = sender === "Machine" || sender === null;
return {
isSend: !is_ai,
@ -154,9 +154,23 @@ export default function ChatView({
// return newChatHistory;
// });
}
const [files, setFiles] = useState<FilePreviewType[]>([]);
const [isDragging, setIsDragging] = useState(false);
const { dragOver, dragEnter, dragLeave, onDrop } = useDragAndDrop(
setIsDragging,
setFiles,
currentFlowId,
);
return (
<div className="eraser-column-arrangement">
<div
className="eraser-column-arrangement"
onDragOver={dragOver}
onDragEnter={dragEnter}
onDragLeave={dragLeave}
onDrop={onDrop}
>
<div className="eraser-size">
<div className="eraser-position">
<Button
@ -258,6 +272,9 @@ export default function ChatView({
setChatValue(value);
}}
inputRef={ref}
files={files}
setFiles={setFiles}
isDragging={isDragging}
/>
</div>
</div>

View file

@ -3,33 +3,29 @@ import AccordionComponent from "../../components/accordionComponent";
import IconComponent from "../../components/genericIconComponent";
import ShadTooltip from "../../components/shadTooltipComponent";
import { Badge } from "../../components/ui/badge";
import { Button } from "../../components/ui/button";
import {
Tabs,
TabsContent,
TabsList,
TabsTrigger,
} from "../../components/ui/tabs";
import {
CHAT_FORM_DIALOG_SUBTITLE,
OUTPUTS_MODAL_TITLE,
TEXT_INPUT_MODAL_TITLE,
} from "../../constants/constants";
import { CHAT_FORM_DIALOG_SUBTITLE } from "../../constants/constants";
import { InputOutput } from "../../constants/enums";
import { getMessagesTable } from "../../controllers/API";
import useAlertStore from "../../stores/alertStore";
import useFlowStore from "../../stores/flowStore";
import useFlowsManagerStore from "../../stores/flowsManagerStore";
import { useMessagesStore } from "../../stores/messagesStore";
import { IOModalPropsType } from "../../types/components";
import { NodeDataType, NodeType } from "../../types/flow";
import { updateVerticesOrder } from "../../utils/buildUtils";
import { cn } from "../../utils/utils";
import BaseModal from "../baseModal";
import IOFieldView from "./components/IOFieldView";
import ChatView from "./components/chatView";
import { getMessagesTable } from "../../controllers/API";
import { useMessagesStore } from "../../stores/messagesStore";
import SessionView from "./components/SessionView";
import useRemoveSession from "./components/SessionView/hooks";
import useAlertStore from "../../stores/alertStore";
import { Button } from "../../components/ui/button";
import ChatView from "./components/chatView";
export default function IOModal({
children,
@ -93,12 +89,6 @@ export default function IOModal({
return updateVerticesOrder(currentFlow!.id, null);
}
// useEffect(() => {
// if (open) {
// updateVertices();
// }
// }, [open, currentFlow]);
async function sendMessage({
repeat = 1,
files,
@ -121,10 +111,14 @@ export default function IOModal({
setLockChat(false);
});
}
const { rows, columns } = await getMessagesTable("union", currentFlow!.id);
setMessages(rows);
setColumns(columns);
setLockChat(false);
if (chatInput) {
setNode(chatInput.id, (node: NodeType) => {
const newNode = { ...node };
newNode.data.node!.template["input_value"].value = chatValue;
return newNode;
});

View file

@ -23,8 +23,6 @@ const useColumnDefs = (
: templateParam.name) ?? params.data.key
);
},
tooltipField: "display_name",
tooltipComponent: TableTooltipRender,
wrapText: true,
autoHeight: true,
flex: 1,
@ -35,7 +33,6 @@ const useColumnDefs = (
headerName: "Description",
field: "info",
tooltipField: "info",
tooltipComponent: TableTooltipRender,
wrapText: true,
autoHeight: true,
flex: 2,

View file

@ -92,6 +92,7 @@ const EditNodeModal = forwardRef(
<div className="h-full">
{nodeLength > 0 && (
<TableComponent
key={"editNode"}
onGridReady={(params) => {
setGridApi(params.api);
}}

View file

@ -86,6 +86,7 @@ export default function FlowLogsModal({
</TabsList>
</Tabs>
<TableComponent
key={activeTab}
readOnlyEdit
className="h-max-full h-full w-full"
pagination={rows.length === 0 ? false : true}

View file

@ -6,13 +6,11 @@ import { cn } from "../../../../../../utils/utils";
type ApiKeyHeaderComponentProps = {
selectedRows: string[];
handleDeleteKey: () => void;
fetchApiKeys: () => void;
userId: string;
};
const ApiKeyHeaderComponent = ({
selectedRows,
handleDeleteKey,
fetchApiKeys,
userId,
}: ApiKeyHeaderComponentProps) => {
@ -30,20 +28,6 @@ const ApiKeyHeaderComponent = ({
<p className="text-sm text-muted-foreground">{API_PAGE_PARAGRAPH}</p>
</div>
<div className="flex flex-shrink-0 items-center gap-2">
<Button
data-testid="api-key-button-store"
variant="primary"
className="group px-2"
disabled={selectedRows.length === 0}
onClick={handleDeleteKey}
>
<ForwardedIconComponent
name="Trash2"
className={cn(
"h-5 w-5 text-destructive group-disabled:text-primary",
)}
/>
</Button>
<SecretKeyModal data={userId} onCloseModal={fetchApiKeys}>
<Button data-testid="api-key-button-store" variant="primary">
<ForwardedIconComponent name="Plus" className="mr-2 w-4" />

View file

@ -45,13 +45,14 @@ export default function ApiKeysPage() {
<div className="flex h-full w-full flex-col justify-between gap-6">
<ApiKeyHeaderComponent
selectedRows={selectedRows}
handleDeleteKey={handleDeleteKey}
fetchApiKeys={fetchApiKeys}
userId={userId}
/>
<div className="flex h-full w-full flex-col justify-between">
<TableComponent
key={"apiKeys"}
onDelete={handleDeleteKey}
overlayNoRowsTemplate="No data available"
onSelectionChanged={(event: SelectionChangedEvent) => {
setSelectedRows(event.api.getSelectedRows().map((row) => row.id));

View file

@ -143,20 +143,6 @@ export default function GlobalVariablesPage() {
</p>
</div>
<div className="flex flex-shrink-0 items-center gap-2">
<Button
data-testid="api-key-button-store"
variant="primary"
className="group px-2"
disabled={selectedRows.length === 0}
onClick={removeVariables}
>
<IconComponent
name="Trash2"
className={cn(
"h-5 w-5 text-destructive group-disabled:text-primary",
)}
/>
</Button>
<AddNewVariableButton>
<Button data-testid="api-key-button-store" variant="primary">
<IconComponent name="Plus" className="mr-2 w-4" />
@ -168,6 +154,7 @@ export default function GlobalVariablesPage() {
<div className="flex h-full w-full flex-col justify-between">
<TableComponent
key={"globalVariables"}
overlayNoRowsTemplate="No data available"
onSelectionChanged={(event: SelectionChangedEvent) => {
setSelectedRows(event.api.getSelectedRows().map((row) => row.name));
@ -177,6 +164,7 @@ export default function GlobalVariablesPage() {
pagination={true}
columnDefs={colDefs}
rowData={rowData}
onDelete={removeVariables}
/>
</div>
</div>

View file

@ -113,6 +113,7 @@ export default function ShortcutsPage() {
</div>
<div className="flex h-full w-full flex-col justify-between">
<TableComponent
key={"shortcuts"}
pagination={false}
columnDefs={colDefs}
rowData={nodesRowData}

View file

@ -8,7 +8,6 @@ type HeaderMessagesComponentProps = {
};
const HeaderMessagesComponent = ({
selectedRows,
handleRemoveMessages,
}: HeaderMessagesComponentProps) => {
return (
<>
@ -26,22 +25,6 @@ const HeaderMessagesComponent = ({
behaviors.
</p>
</div>
<div className="flex flex-shrink-0 items-center gap-2">
<Button
data-testid="api-key-button-store"
variant="primary"
className="group px-2"
disabled={selectedRows.length === 0}
onClick={handleRemoveMessages}
>
<ForwardedIconComponent
name="Trash2"
className={cn(
"h-5 w-5 text-destructive group-disabled:text-primary",
)}
/>
</Button>
</div>
</div>
</>
);

View file

@ -53,6 +53,8 @@ export default function MessagesPage() {
<div className="flex h-full w-full flex-col justify-between">
<TableComponent
key={"sessionView"}
onDelete={handleRemoveMessages}
readOnlyEdit
onCellEditRequest={(event) => {
handleUpdateMessage(event);

View file

@ -16,18 +16,14 @@ import {
import { BuildStatus } from "../constants/enums";
import { getFlowPool } from "../controllers/API";
import { VertexBuildTypeAPI } from "../types/api";
import { ChatInputType, ChatOutputType } from "../types/chat";
import {
NodeDataType,
NodeType,
sourceHandleType,
targetHandleType,
} from "../types/flow";
import {
ChatOutputType,
FlowStoreType,
VertexLayerElementType,
chatInputType,
} from "../types/zustand/flow";
import { FlowStoreType, VertexLayerElementType } from "../types/zustand/flow";
import { buildVertices } from "../utils/buildUtils";
import {
checkChatInput,
@ -77,7 +73,7 @@ const useFlowStore = create<FlowStoreType>((set, get) => ({
},
updateFlowPool: (
nodeId: string,
data: VertexBuildTypeAPI | ChatOutputType | chatInputType,
data: VertexBuildTypeAPI | ChatOutputType | ChatInputType,
buildId?: string,
) => {
let newFlowPool = cloneDeep({ ...get().flowPool });
@ -94,9 +90,9 @@ const useFlowStore = create<FlowStoreType>((set, get) => ({
}
//update data results
else {
newFlowPool[nodeId][index].data.messages[0] = data as
newFlowPool[nodeId][index].data.message = data as
| ChatOutputType
| chatInputType;
| ChatInputType;
}
}
get().setFlowPool(newFlowPool);
@ -492,8 +488,7 @@ const useFlowStore = create<FlowStoreType>((set, get) => ({
(id) => !vertexBuildData.inactivated_vertices?.includes(id),
);
const top_level_vertices = vertexBuildData.top_level_vertices.filter(
(vertex) =>
!vertexBuildData.inactivated_vertices?.includes(vertex.id),
(vertex) => !vertexBuildData.inactivated_vertices?.includes(vertex),
);
const nextVertices: VertexLayerElementType[] = zip(
next_vertices_ids,
@ -518,7 +513,7 @@ const useFlowStore = create<FlowStoreType>((set, get) => ({
}
get().addDataToFlowPool(
{ ...vertexBuildData, buildId: runId },
{ ...vertexBuildData, run_id: runId },
vertexBuildData.id,
);

View file

@ -1,5 +1,5 @@
import { Edge, Node, Viewport } from "reactflow";
import { ChatOutputType, chatInputType } from "../chat";
import { ChatInputType, ChatOutputType } from "../chat";
import { FlowType } from "../flow";
//kind and class are just representative names to represent the actual structure of the object received by the API
export type APIDataType = { [key: string]: APIKindType };
@ -158,19 +158,20 @@ export type VertexBuildTypeAPI = {
data: VertexDataTypeAPI;
timestamp: string;
params: any;
messages: ChatOutputType[] | chatInputType[];
messages: ChatOutputType[] | ChatInputType[];
};
// data is the object received by the API
// it has results, artifacts, timedelta, duration
export type VertexDataTypeAPI = {
results: { [key: string]: string };
logs: any[];
messages: ChatOutputType[] | chatInputType[];
logs: { message: any; type: string }[];
messages: ChatOutputType[] | ChatInputType[];
inactive?: boolean;
timedelta?: number;
duration?: string;
artifacts?: any;
message: ChatOutputType | ChatInputType;
};
export type CodeErrorDataTypeAPI = {

View file

@ -22,15 +22,19 @@ export type ChatOutputType = {
files?: Array<{ path: string; type: string; name: string }>;
};
export type chatInputType = {
result: string;
export type ChatInputType = {
message: string;
sender: string;
sender_name: string;
stream_url?: string;
files?: Array<{ path: string; type: string; name: string }>;
};
export type FlowPoolObjectType = {
timestamp: string;
valid: boolean;
// list of chat outputs or list of chat inputs
messages: Array<ChatOutputType | chatInputType> | [];
data: { artifacts?: any; results: any | ChatOutputType | chatInputType };
messages: Array<ChatOutputType | ChatInputType> | [];
data: { artifacts: any; results: any | ChatOutputType | ChatInputType };
id: string;
};

View file

@ -487,7 +487,12 @@ export type headerFlowsType = {
style?: FlowStyleType;
};
export type chatInputType = {
export type ChatInputType = {
isDragging: boolean;
files: FilePreviewType[];
setFiles: (
files: FilePreviewType[] | ((prev: FilePreviewType[]) => FilePreviewType[]),
) => void;
chatValue: string;
inputRef: {
current: any;
@ -526,6 +531,7 @@ export type fileCardPropsType = {
fileName: string;
content: string;
fileType: string;
showFile?: boolean;
};
export type nodeToolbarPropsType = {

View file

@ -9,28 +9,16 @@ import {
} from "reactflow";
import { BuildStatus } from "../../../constants/enums";
import { VertexBuildTypeAPI } from "../../api";
import { ChatInputType, ChatOutputType } from "../../chat";
import { FlowState } from "../../tabs";
export type chatInputType = {
result: string;
files?: string[];
};
export type ChatOutputType = {
message: string;
sender: string;
sender_name: string;
stream_url?: string;
files?: string[];
};
export type FlowPoolObjectType = {
timestamp: string;
valid: boolean;
messages: Array<ChatOutputType | chatInputType> | [];
messages: Array<ChatOutputType | ChatInputType> | [];
data: {
artifacts: any | ChatOutputType | chatInputType;
results: any | ChatOutputType | chatInputType;
artifacts: any | ChatOutputType | ChatInputType;
results: any | ChatOutputType | ChatInputType;
};
duration?: string;
progress?: number;
@ -45,8 +33,8 @@ export type FlowPoolObjectTypeNew = {
timestamp: string;
valid: boolean;
data: {
logs?: any | ChatOutputType | chatInputType;
results: any | ChatOutputType | chatInputType;
logs?: any | ChatOutputType | ChatInputType;
results: any | ChatOutputType | ChatInputType;
};
duration?: string;
progress?: number;
@ -167,7 +155,7 @@ export type FlowStoreType = {
};
updateFlowPool: (
nodeId: string,
data: VertexBuildTypeAPI | ChatOutputType | chatInputType,
data: VertexBuildTypeAPI | ChatOutputType | ChatInputType,
buildId?: string,
) => void;
getNodePosition: (nodeId: string) => { x: number; y: number };

View file

@ -1,4 +1,4 @@
export type chatInputType = {
export type ChatInputType = {
result: string;
};
@ -12,7 +12,7 @@ export type FlowPoolObjectType = {
timestamp: string;
valid: boolean;
params: any;
data: { artifacts: any; results: any | ChatOutputType | chatInputType };
data: { artifacts: any; results: any | ChatOutputType | ChatInputType };
id: string;
};

View file

@ -93,6 +93,7 @@ import {
Package2,
Palette,
Paperclip,
PaperclipIcon,
Pencil,
PencilLine,
Pin,
@ -535,6 +536,7 @@ export const nodeIconsLucide: iconsType = {
FolderPlusIcon,
FolderIcon,
Discord: FaDiscord,
PaperclipIcon,
RotateCcw,
Settings,
Streamlit,

View file

@ -1,7 +1,6 @@
import { ColDef, ColGroupDef } from "ag-grid-community";
import clsx, { ClassValue } from "clsx";
import { twMerge } from "tailwind-merge";
import TableAutoCellRender from "../components/tableComponent/components/tableAutoCellRender";
import { APIDataType, TemplateVariableType } from "../types/api";
import {
groupedObjType,
@ -10,6 +9,7 @@ import {
} from "../types/components";
import { NodeType } from "../types/flow";
import { FlowState } from "../types/tabs";
import TableAutoCellRender from "../components/tableComponent/components/tableAutoCellRender";
export function classNames(...classes: Array<string>): string {
return classes.filter(Boolean).join(" ");

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View file

@ -1,4 +1,5 @@
import { expect, test } from "@playwright/test";
import { readFileSync } from "fs";
test("user must interact with chat with Input/Output", async ({ page }) => {
if (!process.env.CI) {
@ -147,3 +148,87 @@ test("user must be able to see output inspection", async ({ page }) => {
await page.getByText("Type", { exact: true }).isVisible();
await page.getByText("User", { exact: true }).last().isVisible();
});
test("user must be able to send an image on chat", async ({ page }) => {
if (!process.env.CI) {
dotenv.config();
dotenv.config({ path: path.resolve(__dirname, "../../.env") });
}
await page.goto("/");
await page.waitForTimeout(1000);
let modalCount = 0;
try {
const modalTitleElement = await page?.getByTestId("modal-title");
if (modalTitleElement) {
modalCount = await modalTitleElement.count();
}
} catch (error) {
modalCount = 0;
}
while (modalCount === 0) {
await page.getByText("New Project", { exact: true }).click();
await page.waitForTimeout(5000);
modalCount = await page.getByTestId("modal-title")?.count();
}
await page.getByRole("heading", { name: "Basic Prompting" }).click();
await page.waitForTimeout(1000);
await page.getByTitle("fit view").click();
await page.getByTitle("zoom out").click();
await page.getByTitle("zoom out").click();
await page.getByTitle("zoom out").click();
if (!process.env.OPENAI_API_KEY) {
//You must set the OPENAI_API_KEY on .env file to run this test
expect(false).toBe(true);
}
await page
.getByTestId("popover-anchor-input-openai_api_key")
.fill(process.env.OPENAI_API_KEY ?? "");
await page.getByText("Chat Input", { exact: true }).click();
await page.getByTestId("more-options-modal").click();
await page.getByTestId("edit-button-modal").click();
await page.getByTestId("toggle-edit-return_record").click();
expect(
await page.getByTestId("toggle-edit-return_record").isChecked(),
).toBeTruthy();
await page.getByText("Save Changes").click();
await page.getByText("Playground", { exact: true }).click();
const jsonContent = readFileSync(
"src/frontend/tests/end-to-end/assets/chain.png",
"utf-8",
);
// Create the DataTransfer and File
const dataTransfer = await page.evaluateHandle((data) => {
const dt = new DataTransfer();
// Convert the buffer to a hex array
const file = new File([data], "chain.png", {
type: "application/json",
});
dt.items.add(file);
return dt;
}, jsonContent);
const element = await page.getByPlaceholder("Send a message...");
await element.dispatchEvent("drop", { dataTransfer });
await page.getByText("chain.png").isVisible();
await page.getByTestId("icon-LucideSend").click();
await page.waitForTimeout(2000);
await page.getByText("chain.png").isVisible();
await page.getByText("Close", { exact: true }).click();
await page.getByTestId("icon-ScanEye").last().click();
await page.getByText("Restart").isHidden();
});

View file

@ -4,6 +4,7 @@ from uuid import UUID, uuid4
import pytest
from fastapi import status
from fastapi.testclient import TestClient
from langflow.custom.directory_reader.directory_reader import DirectoryReader
from langflow.services.deps import get_settings_service
@ -269,7 +270,9 @@ def test_get_all(client: TestClient, logged_in_headers):
all_names = [component_name for _, components in response.json().items() for component_name in components]
json_response = response.json()
# We need to test the custom nodes
assert len(all_names) == len(files)
assert len(all_names) <= len(
files
) # Less or equal because we might have some files that don't have the dependencies installed
assert "ChatInput" in json_response["inputs"]
assert "Prompt" in json_response["inputs"]
assert "ChatOutput" in json_response["outputs"]