diff --git a/docker/build_and_push.Dockerfile b/docker/build_and_push.Dockerfile index 29f9294a6..63521d06a 100644 --- a/docker/build_and_push.Dockerfile +++ b/docker/build_and_push.Dockerfile @@ -1,13 +1,6 @@ # syntax=docker/dockerfile:1 # Keep this syntax directive! It's used to enable Docker BuildKit -FROM node:20-bookworm-slim as builder-node -WORKDIR /app -COPY src/frontend/package.json src/frontend/package-lock.json ./ -RUN npm install -COPY src/frontend/ ./ -RUN npm run build - ################################ # BUILDER-BASE @@ -56,7 +49,6 @@ COPY pyproject.toml poetry.lock README.md ./ COPY src/ ./src COPY scripts/ ./scripts RUN python -m pip install requests --user && cd ./scripts && python update_dependencies.py -COPY --from=builder-node /app/build ./src/backend/base/langflow/frontend RUN $POETRY_HOME/bin/poetry lock --no-update \ && $POETRY_HOME/bin/poetry build -f wheel \ && $POETRY_HOME/bin/poetry run pip install dist/*.whl --force-reinstall diff --git a/docs/docs/deployment/backend-only.md b/docs/docs/deployment/backend-only.md index 9c408ad17..fb5efdfdb 100644 --- a/docs/docs/deployment/backend-only.md +++ b/docs/docs/deployment/backend-only.md @@ -4,7 +4,7 @@ You can run Langflow in `--backend-only` mode to expose your Langflow app as an Start langflow in backend-only mode with `python3 -m langflow run --backend-only`. -The terminal prints ` Welcome to ⛓ Langflow `, and a blank window opens at `http://127.0.0.1:7864/all`. +The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`. Langflow will now serve requests to its API without the frontend running. ## Prerequisites @@ -42,7 +42,7 @@ Note the flow ID of `ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef`. You can find this ID 1. Stop Langflow with Ctrl+C. 2. Start langflow in backend-only mode with `python3 -m langflow run --backend-only`. - The terminal prints ` Welcome to ⛓ Langflow `, and a blank window opens at `http://127.0.0.1:7864/all`. + The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`. Langflow will now serve requests to its API. 3. Run the curl code you copied from the UI. You should get a result like this: diff --git a/poetry.lock b/poetry.lock index 4bf30b352..71be043b6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -367,13 +367,13 @@ files = [ [[package]] name = "bce-python-sdk" -version = "0.9.11" +version = "0.9.14" description = "BCE SDK for python" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,<4,>=2.7" files = [ - {file = "bce_python_sdk-0.9.11-py3-none-any.whl", hash = "sha256:3afb9717f6c0c5f5fe3104a8bea4c111bf2ab3fe87ae73b05492566bc2b5d11a"}, - {file = "bce_python_sdk-0.9.11.tar.gz", hash = "sha256:d9e977f059fef6466eebdbb34ad1e27b6f76ef90338807ab959693a78a761e7d"}, + {file = "bce_python_sdk-0.9.14-py3-none-any.whl", hash = "sha256:5704aa454151ee608b01ddda7531457433f9b4bb8afbd00706dd368f3b4339a1"}, + {file = "bce_python_sdk-0.9.14.tar.gz", hash = "sha256:7cbd182ec1e21034f10d3cdb812f3171d31908f1a783d6cf643039272942d8e8"}, ] [package.dependencies] @@ -2556,13 +2556,13 @@ uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" -version = "2.29.0" +version = "2.30.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.29.0.tar.gz", hash = "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360"}, - {file = "google_auth-2.29.0-py2.py3-none-any.whl", hash = "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"}, + {file = "google-auth-2.30.0.tar.gz", hash = "sha256:ab630a1320f6720909ad76a7dbdb6841cdf5c66b328d690027e4867bdfb16688"}, + {file = "google_auth-2.30.0-py2.py3-none-any.whl", hash = "sha256:8df7da660f62757388b8a7f249df13549b3373f24388cb5d2f1dd91cc18180b5"}, ] [package.dependencies] @@ -2594,13 +2594,13 @@ httplib2 = ">=0.19.0" [[package]] name = "google-cloud-aiplatform" -version = "1.53.0" +version = "1.54.0" description = "Vertex AI API client library" optional = false python-versions = ">=3.8" files = [ - {file = "google-cloud-aiplatform-1.53.0.tar.gz", hash = "sha256:574cfad8ac5fa5d57ef717f5335ce05636a5fa9b8aeea0f5c325b46b9448e6b1"}, - {file = "google_cloud_aiplatform-1.53.0-py2.py3-none-any.whl", hash = "sha256:9dfb1f110e6d4795b45afcfab79108fc5c8ed9aa4eaf899e433bc2ca1b76c778"}, + {file = "google-cloud-aiplatform-1.54.0.tar.gz", hash = "sha256:6f5187d35a32951028465804fbb42b478362bf41e2b634ddd22b150299f6e1d8"}, + {file = "google_cloud_aiplatform-1.54.0-py2.py3-none-any.whl", hash = "sha256:7b3ed849b9fb59a01bd6f44444ccbb7d18495b867a26f913542f6b2d4c3de252"}, ] [package.dependencies] @@ -2621,7 +2621,7 @@ autologging = ["mlflow (>=1.27.0,<=2.1.1)"] cloud-profiler = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"] datasets = ["pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)"] endpoint = ["requests (>=2.28.1)"] -full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "starlette (>=0.17.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)"] +full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)"] langchain = ["langchain (>=0.1.16,<0.2)", "langchain-core (<0.2)", "langchain-google-vertexai (<2)"] langchain-testing = ["absl-py", "cloudpickle (>=2.2.1,<4.0)", "langchain (>=0.1.16,<0.2)", "langchain-core (<0.2)", "langchain-google-vertexai (<2)", "pydantic (>=2.6.3,<3)", "pytest-xdist"] lit = ["explainable-ai-sdk (>=1.0.0)", "lit-nlp (==0.4.0)", "pandas (>=1.0.0)", "tensorflow (>=2.3.0,<3.0.0dev)"] @@ -2631,11 +2631,11 @@ prediction = ["docker (>=5.0.3)", "fastapi (>=0.71.0,<=0.109.1)", "httpx (>=0.23 preview = ["cloudpickle (<3.0)", "google-cloud-logging (<4.0)"] private-endpoints = ["requests (>=2.28.1)", "urllib3 (>=1.21.1,<1.27)"] rapid-evaluation = ["nest-asyncio (>=1.0.0,<1.6.0)", "pandas (>=1.0.0,<2.2.0)"] -ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)"] -ray-testing = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pytest-xdist", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "ray[train] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "scikit-learn", "tensorflow", "torch (>=2.0.0,<2.1.0)", "xgboost", "xgboost-ray"] +ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "setuptools (<70.0.0)"] +ray-testing = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pytest-xdist", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "ray[train] (==2.9.3)", "scikit-learn", "setuptools (<70.0.0)", "tensorflow", "torch (>=2.0.0,<2.1.0)", "xgboost", "xgboost-ray"] reasoningengine = ["cloudpickle (>=2.2.1,<4.0)", "pydantic (>=2.6.3,<3)"] tensorboard = ["tensorflow (>=2.3.0,<3.0.0dev)"] -testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (==2.16.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "torch (>=2.2.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost"] +testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (==2.16.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "torch (>=2.2.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost"] vizier = ["google-vizier (>=0.1.6)"] xai = ["tensorflow (>=2.3.0,<3.0.0dev)"] @@ -4025,13 +4025,13 @@ adal = ["adal (>=1.0.2)"] [[package]] name = "langchain" -version = "0.2.2" +version = "0.2.3" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain-0.2.2-py3-none-any.whl", hash = "sha256:58ca0c47bcdd156da66f50a0a4fcedc49bf6950827f4a6b06c8c4842d55805f3"}, - {file = "langchain-0.2.2.tar.gz", hash = "sha256:9d61e50e9cdc2bea659bc5e6c03650ba048fda63a307490ae368e539f61a0d3a"}, + {file = "langchain-0.2.3-py3-none-any.whl", hash = "sha256:5dc33cd9c8008693d328b7cb698df69073acecc89ad9c2a95f243b3314f8d834"}, + {file = "langchain-0.2.3.tar.gz", hash = "sha256:81962cc72cce6515f7bd71e01542727870789bf8b666c6913d85559080c1a201"}, ] [package.dependencies] @@ -4047,20 +4047,6 @@ requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] -clarifai = ["clarifai (>=9.1.0)"] -cli = ["typer (>=0.9.0,<0.10.0)"] -cohere = ["cohere (>=4,<6)"] -docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] -embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] -javascript = ["esprima (>=4.0.1,<5.0.0)"] -llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] -openai = ["openai (<2)", "tiktoken (>=0.7,<1.0)"] -qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] -text-helpers = ["chardet (>=5.1.0,<6.0.0)"] - [[package]] name = "langchain-anthropic" version = "0.1.15" @@ -4127,13 +4113,13 @@ langchain-core = ">=0.1.42,<0.3" [[package]] name = "langchain-community" -version = "0.2.3" +version = "0.2.4" description = "Community contributed LangChain integrations." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_community-0.2.3-py3-none-any.whl", hash = "sha256:aa895545be2f3f4aa2fea36f6da2e3b4ec50ce61ec986e8f146901a1e9138138"}, - {file = "langchain_community-0.2.3.tar.gz", hash = "sha256:a3c35af215e47b700e7cb4e548fa8b45c6d46d52b5a5a65af2577c5a0104fc9f"}, + {file = "langchain_community-0.2.4-py3-none-any.whl", hash = "sha256:8582e9800f4837660dc297cccd2ee1ddc1d8c440d0fe8b64edb07620f0373b0e"}, + {file = "langchain_community-0.2.4.tar.gz", hash = "sha256:2bb6a1a36b8500a564d25d76469c02457b1a7c3afea6d4a609a47c06b993e3e4"}, ] [package.dependencies] @@ -4148,19 +4134,15 @@ requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -cli = ["typer (>=0.9.0,<0.10.0)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-search-documents (==11.4.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.6,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpathlib (>=0.18,<0.19)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "httpx-sse (>=0.4.0,<0.5.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "oracledb (>=2.2.0,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pyjwt (>=2.8.0,<3.0.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "simsimd (>=4.3.1,<5.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] - [[package]] name = "langchain-core" -version = "0.2.4" +version = "0.2.5" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_core-0.2.4-py3-none-any.whl", hash = "sha256:5212f7ec78a525e88a178ed3aefe2fd7134b03fb92573dfbab9914f1d92d6ec5"}, - {file = "langchain_core-0.2.4.tar.gz", hash = "sha256:82bdcc546eb0341cefcf1f4ecb3e49836fff003903afddda2d1312bb8491ef81"}, + {file = "langchain_core-0.2.5-py3-none-any.whl", hash = "sha256:abe5138f22acff23a079ec538be5268bbf97cf023d51987a0dd474d2a16cae3e"}, + {file = "langchain_core-0.2.5.tar.gz", hash = "sha256:4a5c2f56b22396a63ef4790043660e393adbfa6832b978f023ca996a04b8e752"}, ] [package.dependencies] @@ -4171,9 +4153,6 @@ pydantic = ">=1,<3" PyYAML = ">=5.3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -extended-testing = ["jinja2 (>=3,<4)"] - [[package]] name = "langchain-experimental" version = "0.0.60" @@ -4327,7 +4306,7 @@ types-requests = ">=2.31.0.2,<3.0.0.0" [[package]] name = "langflow-base" -version = "0.0.59" +version = "0.0.60" description = "A Python package with a built-in web application" optional = false python-versions = ">=3.10,<3.13" diff --git a/pyproject.toml b/pyproject.toml index 7e29d83c8..84ddf1144 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langflow" -version = "1.0.0a48" +version = "1.0.0a49" description = "A Python package with a built-in web application" authors = ["Langflow "] maintainers = [ diff --git a/src/backend/base/langflow/api/utils.py b/src/backend/base/langflow/api/utils.py index 1bfc58cdd..1dbd68d8f 100644 --- a/src/backend/base/langflow/api/utils.py +++ b/src/backend/base/langflow/api/utils.py @@ -86,9 +86,9 @@ def update_frontend_node_with_template_values(frontend_node, raw_frontend_node): update_template_values(frontend_node["template"], raw_frontend_node["template"]) - old_code = raw_frontend_node['template']['code']['value'] - new_code = frontend_node['template']['code']['value'] - frontend_node['edited'] = old_code != new_code + old_code = raw_frontend_node["template"]["code"]["value"] + new_code = frontend_node["template"]["code"]["value"] + frontend_node["edited"] = old_code != new_code return frontend_node @@ -208,16 +208,18 @@ def format_elapsed_time(elapsed_time: float) -> str: return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}" -async def build_and_cache_graph_from_db( - flow_id: str, - session: Session, - chat_service: "ChatService", -): +async def build_and_cache_graph_from_db(flow_id: str, session: Session, chat_service: "ChatService"): """Build and cache the graph.""" flow: Optional[Flow] = session.get(Flow, flow_id) if not flow or not flow.data: raise ValueError("Invalid flow ID") graph = Graph.from_payload(flow.data, flow_id) + for vertex_id in graph._has_session_id_vertices: + vertex = graph.get_vertex(vertex_id) + if vertex is None: + raise ValueError(f"Vertex {vertex_id} not found") + if not vertex._raw_params.get("session_id"): + vertex.update_raw_params({"session_id": flow_id}) await chat_service.set_cache(flow_id, graph) return graph @@ -321,3 +323,4 @@ def parse_exception(exc): if hasattr(exc, "body"): return exc.body["message"] return str(exc) + return str(exc) diff --git a/src/backend/base/langflow/api/v1/chat.py b/src/backend/base/langflow/api/v1/chat.py index 6e2a4dd35..0ca6b4944 100644 --- a/src/backend/base/langflow/api/v1/chat.py +++ b/src/backend/base/langflow/api/v1/chat.py @@ -168,9 +168,9 @@ async def build_vertex( next_runnable_vertices, top_level_vertices, result_dict, - log_message, + params, valid, - log_type, + artifacts, vertex, ) = await graph.build_vertex( lock=lock, @@ -180,22 +180,22 @@ async def build_vertex( inputs_dict=inputs.model_dump() if inputs else {}, files=files, ) - + log_obj = Log(message=vertex.artifacts_raw, type=vertex.artifacts_type) result_data_response = ResultDataResponse(**result_dict.model_dump()) except Exception as exc: logger.exception(f"Error building vertex: {exc}") - log_message = format_exception_message(exc) - log_type = type(exc).__name__ + params = format_exception_message(exc) valid = False + log_obj = Log(message=params, type="error") result_data_response = ResultDataResponse(results={}) - log_object = Log(message=log_message, type=log_type) - + artifacts = {} # If there's an error building the vertex # we need to clear the cache await chat_service.clear_cache(flow_id_str) - result_data_response.logs.append(log_object) + result_data_response.message = artifacts + result_data_response.logs.append(log_obj) # Log the vertex build if not vertex.will_stream: @@ -204,8 +204,9 @@ async def build_vertex( flow_id=flow_id_str, vertex_id=vertex_id, valid=valid, - logs=result_data_response.logs, + params=params, data=result_data_response, + artifacts=artifacts, ) timedelta = time.perf_counter() - start_time @@ -231,6 +232,7 @@ async def build_vertex( next_vertices_ids=next_runnable_vertices, top_level_vertices=top_level_vertices, valid=valid, + params=params, id=vertex.id, data=result_data_response, ) diff --git a/src/backend/base/langflow/api/v1/monitor.py b/src/backend/base/langflow/api/v1/monitor.py index ffd01b470..b748e4955 100644 --- a/src/backend/base/langflow/api/v1/monitor.py +++ b/src/backend/base/langflow/api/v1/monitor.py @@ -117,6 +117,21 @@ async def get_transactions( dicts = monitor_service.get_transactions( source=source, target=target, status=status, order_by=order_by, flow_id=flow_id ) - return [TransactionModelResponse(**d) for d in dicts] + result = [] + for d in dicts: + d = TransactionModelResponse( + index=d["index"], + timestamp=d["timestamp"], + vertex_id=d["vertex_id"], + inputs=d["inputs"], + outputs=d["outputs"], + status=d["status"], + error=d["error"], + flow_id=d["flow_id"], + source=d["vertex_id"], + target=d["target_id"], + ) + result.append(d) + return result except Exception as e: raise HTTPException(status_code=500, detail=str(e)) diff --git a/src/backend/base/langflow/api/v1/schemas.py b/src/backend/base/langflow/api/v1/schemas.py index 4e8915842..cb230c9f5 100644 --- a/src/backend/base/langflow/api/v1/schemas.py +++ b/src/backend/base/langflow/api/v1/schemas.py @@ -2,6 +2,7 @@ from datetime import datetime, timezone from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, Union +from typing_extensions import TypedDict from uuid import UUID from pydantic import BaseModel, ConfigDict, Field, field_validator, model_serializer @@ -243,11 +244,11 @@ class VerticesOrderResponse(BaseModel): run_id: UUID vertices_to_run: List[str] - class ResultDataResponse(BaseModel): results: Optional[Any] = Field(default_factory=dict) logs: List[Log | None] = Field(default_factory=list) - messages: List[ChatOutputResponse | None] = Field(default_factory=list) + message: Optional[Any] = Field(default_factory=dict) + artifacts: Optional[Any] = Field(default_factory=dict) timedelta: Optional[float] = None duration: Optional[str] = None used_frozen_result: Optional[bool] = False @@ -259,6 +260,8 @@ class VertexBuildResponse(BaseModel): next_vertices_ids: Optional[List[str]] = None top_level_vertices: Optional[List[str]] = None valid: bool + params: Optional[Any] = Field(default_factory=dict) + """JSON string of the params.""" data: ResultDataResponse """Mapping of vertex ids to result dict containing the param name and result value.""" timestamp: Optional[datetime] = Field(default_factory=lambda: datetime.now(timezone.utc)) diff --git a/src/backend/base/langflow/base/models/model.py b/src/backend/base/langflow/base/models/model.py index 690dc01ba..72f7ca0a7 100644 --- a/src/backend/base/langflow/base/models/model.py +++ b/src/backend/base/langflow/base/models/model.py @@ -1,10 +1,13 @@ +import warnings from typing import Optional, Union from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.language_models.llms import LLM +from langchain_core.load import load from langchain_core.messages import AIMessage, HumanMessage, SystemMessage from langflow.custom import CustomComponent +from langflow.schema.schema import Record class LCModelComponent(CustomComponent): @@ -82,7 +85,7 @@ class LCModelComponent(CustomComponent): return status_message def get_chat_result( - self, runnable: BaseChatModel, stream: bool, input_value: str, system_message: Optional[str] = None + self, runnable: BaseChatModel, stream: bool, input_value: str | Record, system_message: Optional[str] = None ): messages: list[Union[HumanMessage, SystemMessage]] = [] if not input_value and not system_message: @@ -90,7 +93,16 @@ class LCModelComponent(CustomComponent): if system_message: messages.append(SystemMessage(content=system_message)) if input_value: - messages.append(HumanMessage(content=input_value)) + if isinstance(input_value, Record): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + if "prompt" in input_value: + prompt = load(input_value.prompt) + runnable = prompt | runnable + else: + messages.append(input_value.to_lc_message()) + else: + messages.append(HumanMessage(content=input_value)) if stream: return runnable.stream(messages) else: diff --git a/src/backend/base/langflow/base/prompts/utils.py b/src/backend/base/langflow/base/prompts/utils.py index 2270035af..c4b4e08c2 100644 --- a/src/backend/base/langflow/base/prompts/utils.py +++ b/src/backend/base/langflow/base/prompts/utils.py @@ -1,9 +1,10 @@ +import base64 from copy import deepcopy - from langchain_core.documents import Document from langflow.schema import Record +from langflow.services.deps import get_storage_service def record_to_string(record: Record) -> str: @@ -19,7 +20,7 @@ def record_to_string(record: Record) -> str: return record.get_text() -def dict_values_to_string(d: dict) -> dict: +async def dict_values_to_string(d: dict) -> dict: """ Converts the values of a dictionary to strings. @@ -36,16 +37,43 @@ def dict_values_to_string(d: dict) -> dict: if isinstance(value, list): for i, item in enumerate(value): if isinstance(item, Record): - d_copy[key][i] = record_to_string(item) + d_copy[key][i] = item.to_lc_message() elif isinstance(item, Document): d_copy[key][i] = document_to_string(item) elif isinstance(value, Record): - d_copy[key] = record_to_string(value) + if "files" in value and value.files: + files = await get_file_paths(value.files) + value.files = files + d_copy[key] = value.to_lc_message() elif isinstance(value, Document): d_copy[key] = document_to_string(value) return d_copy +async def get_file_paths(files: list[str]): + storage_service = get_storage_service() + file_paths = [] + for file in files: + flow_id, file_name = file.split("/") + file_paths.append(storage_service.build_full_path(flow_id=flow_id, file_name=file_name)) + return file_paths + + +async def get_files( + file_paths: str, + convert_to_base64: bool = False, +): + storage_service = get_storage_service() + file_objects = [] + for file_path in file_paths: + flow_id, file_name = file_path.split("/") + file_object = await storage_service.get_file(flow_id=flow_id, file_name=file_name) + if convert_to_base64: + file_object = base64.b64encode(file_object).decode("utf-8") + file_objects.append(file_object) + return file_objects + + def document_to_string(document: Document) -> str: """ Convert a document to a string. diff --git a/src/backend/base/langflow/components/inputs/Prompt.py b/src/backend/base/langflow/components/inputs/Prompt.py index 2c76e6132..f51c1aaab 100644 --- a/src/backend/base/langflow/components/inputs/Prompt.py +++ b/src/backend/base/langflow/components/inputs/Prompt.py @@ -1,7 +1,9 @@ -from langchain_core.prompts import PromptTemplate +from langchain_core.prompts import ChatPromptTemplate +from langflow.base.prompts.utils import dict_values_to_string from langflow.custom import CustomComponent from langflow.field_typing import Prompt, TemplateField, Text +from langflow.schema.schema import Record class PromptComponent(CustomComponent): @@ -15,19 +17,14 @@ class PromptComponent(CustomComponent): "code": TemplateField(advanced=True), } - def build( + async def build( self, template: Prompt, **kwargs, - ) -> Text: - from langflow.base.prompts.utils import dict_values_to_string - - prompt_template = PromptTemplate.from_template(Text(template)) - kwargs = dict_values_to_string(kwargs) - kwargs = {k: "\n".join(v) if isinstance(v, list) else v for k, v in kwargs.items()} - try: - formated_prompt = prompt_template.format(**kwargs) - except Exception as exc: - raise ValueError(f"Error formatting prompt: {exc}") from exc - self.status = f'Prompt:\n"{formated_prompt}"' - return formated_prompt + ) -> Record: + prompt_template = ChatPromptTemplate.from_template(Text(template)) + kwargs = await dict_values_to_string(kwargs) + messages = list(kwargs.values()) + prompt = prompt_template + messages + self.status = f'Prompt:\n"{template}"' + return Record(data={"prompt": prompt.to_json()}) diff --git a/src/backend/base/langflow/components/models/AmazonBedrockModel.py b/src/backend/base/langflow/components/models/AmazonBedrockModel.py index 1015f1684..2ab1f426e 100644 --- a/src/backend/base/langflow/components/models/AmazonBedrockModel.py +++ b/src/backend/base/langflow/components/models/AmazonBedrockModel.py @@ -58,7 +58,7 @@ class AmazonBedrockComponent(LCModelComponent): "advanced": True, }, "cache": {"display_name": "Cache"}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "system_message": { "display_name": "System Message", "info": "System message to pass to the model.", diff --git a/src/backend/base/langflow/components/models/AnthropicModel.py b/src/backend/base/langflow/components/models/AnthropicModel.py index cfe9ed900..8ea796f0d 100644 --- a/src/backend/base/langflow/components/models/AnthropicModel.py +++ b/src/backend/base/langflow/components/models/AnthropicModel.py @@ -63,7 +63,7 @@ class AnthropicLLM(LCModelComponent): "info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.", }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "advanced": True, diff --git a/src/backend/base/langflow/components/models/AzureOpenAIModel.py b/src/backend/base/langflow/components/models/AzureOpenAIModel.py index c296a8fae..54ac1ec84 100644 --- a/src/backend/base/langflow/components/models/AzureOpenAIModel.py +++ b/src/backend/base/langflow/components/models/AzureOpenAIModel.py @@ -78,7 +78,7 @@ class AzureChatOpenAIComponent(LCModelComponent): "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py index f5e6497d0..8ff9a424f 100644 --- a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py +++ b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py @@ -81,7 +81,7 @@ class QianfanChatEndpointComponent(LCModelComponent): "info": "Endpoint of the Qianfan LLM, required if custom model used.", }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py index 054b59d12..6d03f613e 100644 --- a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py +++ b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py @@ -111,7 +111,7 @@ class ChatLiteLLMModelComponent(LCModelComponent): "required": False, "default": False, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/CohereModel.py b/src/backend/base/langflow/components/models/CohereModel.py index 3bd12c095..a223a1455 100644 --- a/src/backend/base/langflow/components/models/CohereModel.py +++ b/src/backend/base/langflow/components/models/CohereModel.py @@ -1,10 +1,11 @@ from typing import Optional +from langchain_cohere import ChatCohere from pydantic.v1 import SecretStr -from langflow.field_typing import Text + from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langchain_cohere import ChatCohere +from langflow.field_typing import Text class CohereComponent(LCModelComponent): @@ -42,7 +43,7 @@ class CohereComponent(LCModelComponent): "type": "float", "show": True, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, @@ -69,3 +70,4 @@ class CohereComponent(LCModelComponent): temperature=temperature, ) return self.get_chat_result(output, stream, input_value, system_message) + return self.get_chat_result(output, stream, input_value, system_message) diff --git a/src/backend/base/langflow/components/models/HuggingFaceModel.py b/src/backend/base/langflow/components/models/HuggingFaceModel.py index 19750ef9f..9b7949bc7 100644 --- a/src/backend/base/langflow/components/models/HuggingFaceModel.py +++ b/src/backend/base/langflow/components/models/HuggingFaceModel.py @@ -2,9 +2,10 @@ from typing import Optional from langchain_community.chat_models.huggingface import ChatHuggingFace from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint -from langflow.field_typing import Text + from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent +from langflow.field_typing import Text class HuggingFaceEndpointsComponent(LCModelComponent): @@ -36,7 +37,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent): "advanced": True, }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, @@ -72,3 +73,4 @@ class HuggingFaceEndpointsComponent(LCModelComponent): raise ValueError("Could not connect to HuggingFace Endpoints API.") from e output = ChatHuggingFace(llm=llm) return self.get_chat_result(output, stream, input_value, system_message) + return self.get_chat_result(output, stream, input_value, system_message) diff --git a/src/backend/base/langflow/components/models/MistralModel.py b/src/backend/base/langflow/components/models/MistralModel.py index 305a45e4b..dc77655b0 100644 --- a/src/backend/base/langflow/components/models/MistralModel.py +++ b/src/backend/base/langflow/components/models/MistralModel.py @@ -27,7 +27,7 @@ class MistralAIModelComponent(LCModelComponent): def build_config(self): return { - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "max_tokens": { "display_name": "Max Tokens", "advanced": True, diff --git a/src/backend/base/langflow/components/models/OllamaModel.py b/src/backend/base/langflow/components/models/OllamaModel.py index f591e4a5c..a68fff9e3 100644 --- a/src/backend/base/langflow/components/models/OllamaModel.py +++ b/src/backend/base/langflow/components/models/OllamaModel.py @@ -194,7 +194,7 @@ class ChatOllamaComponent(LCModelComponent): "info": "Template to use for generating text.", "advanced": True, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/OpenAIModel.py b/src/backend/base/langflow/components/models/OpenAIModel.py index 0aedce495..94e0a78ee 100644 --- a/src/backend/base/langflow/components/models/OpenAIModel.py +++ b/src/backend/base/langflow/components/models/OpenAIModel.py @@ -28,7 +28,7 @@ class OpenAIModelComponent(LCModelComponent): def build_config(self): return { - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "max_tokens": { "display_name": "Max Tokens", "advanced": True, diff --git a/src/backend/base/langflow/components/models/VertexAiModel.py b/src/backend/base/langflow/components/models/VertexAiModel.py index a992447f4..b5af1fac2 100644 --- a/src/backend/base/langflow/components/models/VertexAiModel.py +++ b/src/backend/base/langflow/components/models/VertexAiModel.py @@ -1,6 +1,5 @@ from typing import Optional - from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import Text @@ -74,7 +73,7 @@ class ChatVertexAIComponent(LCModelComponent): "value": False, "advanced": True, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/graph/graph/base.py b/src/backend/base/langflow/graph/graph/base.py index b0e43557d..06074ee1d 100644 --- a/src/backend/base/langflow/graph/graph/base.py +++ b/src/backend/base/langflow/graph/graph/base.py @@ -738,7 +738,9 @@ class Graph: # Check the cache for the vertex cached_result = await chat_service.get_cache(key=vertex.id) if isinstance(cached_result, CacheMiss): - await vertex.build(user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars) + await vertex.build( + user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars, files=files + ) await chat_service.set_cache(key=vertex.id, data=vertex) else: cached_vertex = cached_result["result"] @@ -752,7 +754,9 @@ class Graph: vertex.result.used_frozen_result = True else: - await vertex.build(user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars) + await vertex.build( + user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars, files=files + ) if vertex.result is not None: params = f"{vertex._built_object_repr()}{params}" @@ -765,11 +769,13 @@ class Graph: next_runnable_vertices, top_level_vertices = await self.get_next_and_top_level_vertices( lock, set_cache_coro, vertex ) - log_transaction(vertex, status="success") + flow_id = self.flow_id + log_transaction(flow_id, vertex, status="success") return next_runnable_vertices, top_level_vertices, result_dict, params, valid, artifacts, vertex except Exception as exc: logger.exception(f"Error building vertex: {exc}") - log_transaction(vertex, status="failure", error=str(exc)) + flow_id = self.flow_id + log_transaction(flow_id, vertex, status="failure", error=str(exc)) raise exc async def get_next_and_top_level_vertices( diff --git a/src/backend/base/langflow/graph/schema.py b/src/backend/base/langflow/graph/schema.py index 82cfa2930..72b810555 100644 --- a/src/backend/base/langflow/graph/schema.py +++ b/src/backend/base/langflow/graph/schema.py @@ -31,10 +31,10 @@ class ResultData(BaseModel): if not values.get("logs") and values.get("artifacts"): # Build the log from the artifacts message = values["artifacts"] - if "stream_url" in message: + if "stream_url" in message and "type" in message: stream_url = StreamURL(location=message["stream_url"]) values["logs"] = [Log(message=stream_url, type=message["type"])] - else: + elif "type" in message: values["logs"] = [Log(message=message, type=message["type"])] return values diff --git a/src/backend/base/langflow/graph/utils.py b/src/backend/base/langflow/graph/utils.py index 066d7511a..fd7f1e122 100644 --- a/src/backend/base/langflow/graph/utils.py +++ b/src/backend/base/langflow/graph/utils.py @@ -2,6 +2,7 @@ from enum import Enum from typing import Any, Generator, Union from langchain_core.documents import Document +from langflow.schema.schema import Record from pydantic import BaseModel from langflow.interface.utils import extract_input_variables_from_prompt diff --git a/src/backend/base/langflow/graph/vertex/base.py b/src/backend/base/langflow/graph/vertex/base.py index be5c85861..a94bd7a46 100644 --- a/src/backend/base/langflow/graph/vertex/base.py +++ b/src/backend/base/langflow/graph/vertex/base.py @@ -10,11 +10,11 @@ from loguru import logger from langflow.graph.schema import INPUT_COMPONENTS, OUTPUT_COMPONENTS, InterfaceComponentTypes, ResultData from langflow.graph.utils import ArtifactType, UnbuiltObject, UnbuiltResult -from langflow.graph.vertex.utils import log_transaction from langflow.interface.initialize import loading from langflow.interface.listing import lazy_load_dict from langflow.schema.schema import INPUT_FIELD_NAME from langflow.services.deps import get_storage_service +from langflow.services.monitor.utils import log_transaction from langflow.utils.constants import DIRECT_TYPES from langflow.utils.schemas import ChatOutputResponse from langflow.utils.util import sync_to_async, unescape_string @@ -529,12 +529,13 @@ class Vertex: Returns: The built result if use_result is True, else the built object. """ + flow_id = self.graph.flow_id if not self._built: - log_transaction(source=self, target=requester, flow_id=self.graph.flow_id, status="error") + log_transaction(flow_id, vertex=self, target=requester, status="error") raise ValueError(f"Component {self.display_name} has not been built yet") result = self._built_result if self.use_result else self._built_object - log_transaction(source=self, target=requester, flow_id=self.graph.flow_id, status="success") + log_transaction(flow_id, vertex=self, target=requester, status="success") return result async def _build_vertex_and_update_params(self, key, vertex: "Vertex"): @@ -628,9 +629,8 @@ class Vertex: self._built_object, self.artifacts = result elif len(result) == 3: self._custom_component, self._built_object, self.artifacts = result - self.artifacts_raw = self.artifacts.get("raw") - self.artifacts_type = self.artifacts.get("type") or ArtifactType.UNKNOWN.value - + self.artifacts_raw = self.artifacts.get("raw", None) + self.artifacts_type = self.artifacts.get("type", None) or ArtifactType.UNKNOWN.value else: self._built_object = result diff --git a/src/backend/base/langflow/graph/vertex/types.py b/src/backend/base/langflow/graph/vertex/types.py index 16a2a0f0e..fa527f75e 100644 --- a/src/backend/base/langflow/graph/vertex/types.py +++ b/src/backend/base/langflow/graph/vertex/types.py @@ -116,7 +116,7 @@ class InterfaceVertex(Vertex): sender_name=sender_name, stream_url=stream_url, files=files, - type=artifact_type.value, + type=artifact_type, ) self.will_stream = stream_url is not None @@ -213,9 +213,9 @@ class InterfaceVertex(Vertex): flow_id=self.graph.flow_id, vertex_id=self.id, valid=True, - logs=self._built_object_repr(), + params=self._built_object_repr(), data=self.result, - messages=self.artifacts, + artifacts=self.artifacts, ) self._validate_built_object() diff --git a/src/backend/base/langflow/graph/vertex/utils.py b/src/backend/base/langflow/graph/vertex/utils.py index 59a1c1949..0f69e4b2d 100644 --- a/src/backend/base/langflow/graph/vertex/utils.py +++ b/src/backend/base/langflow/graph/vertex/utils.py @@ -1,9 +1,5 @@ from typing import TYPE_CHECKING -from loguru import logger - -from langflow.services.deps import get_monitor_service - if TYPE_CHECKING: from langflow.graph.vertex.base import Vertex @@ -21,34 +17,3 @@ def build_clean_params(target: "Vertex") -> dict: if isinstance(value, list): params[key] = [item for item in value if isinstance(item, (str, int, bool, float, list, dict))] return params - - -def log_transaction(source: "Vertex", target: "Vertex", flow_id, status, error=None): - """ - Logs a transaction between two vertices. - - Args: - source (Vertex): The source vertex of the transaction. - target (Vertex): The target vertex of the transaction. - status: The status of the transaction. - error (Optional): Any error associated with the transaction. - - Raises: - Exception: If there is an error while logging the transaction. - - """ - try: - monitor_service = get_monitor_service() - clean_params = build_clean_params(target) - data = { - "source": source.vertex_type, - "target": target.vertex_type, - "target_args": clean_params, - "timestamp": monitor_service.get_timestamp(), - "status": status, - "error": error, - "flow_id": flow_id, - } - monitor_service.add_row(table_name="transactions", data=data) - except Exception as e: - logger.error(f"Error logging transaction: {e}") diff --git a/src/backend/base/langflow/helpers/flow.py b/src/backend/base/langflow/helpers/flow.py index 6ce3b2048..16db82939 100644 --- a/src/backend/base/langflow/helpers/flow.py +++ b/src/backend/base/langflow/helpers/flow.py @@ -251,7 +251,7 @@ def get_flow_by_id_or_endpoint_name( flow = db.get(Flow, flow_id) except ValueError: endpoint_name = flow_id_or_name - stmt = select(Flow).where(Flow.name == endpoint_name) + stmt = select(Flow).where(Flow.endpoint_name == endpoint_name) if user_id: stmt = stmt.where(Flow.user_id == user_id) flow = db.exec(stmt).first() diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json index 9bab93817..52bc386c8 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json @@ -20,7 +20,7 @@ "list": false, "show": true, "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", + "value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n", "fileTypes": [], "file_path": "", "password": false, @@ -140,7 +140,7 @@ "info": "", "load_from_db": false, "title_case": false, - "input_types": ["Text"] + "input_types": ["Text", "Record"] }, "code": { "type": "code", @@ -149,7 +149,7 @@ "list": false, "show": true, "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", "fileTypes": [], "file_path": "", "password": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writter.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writter.json index 49fa386e6..ede2f4692 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writter.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writter.json @@ -20,7 +20,7 @@ "list": false, "show": true, "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", + "value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n", "fileTypes": [], "file_path": "", "password": false, @@ -444,7 +444,7 @@ "info": "", "load_from_db": false, "title_case": false, - "input_types": ["Text"] + "input_types": ["Text", "Record"] }, "code": { "type": "code", @@ -453,7 +453,7 @@ "list": false, "show": true, "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", "fileTypes": [], "file_path": "", "password": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json index 361676b66..c2e2c1d70 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json @@ -20,7 +20,7 @@ "list": false, "show": true, "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", + "value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n", "fileTypes": [], "file_path": "", "password": false, @@ -589,7 +589,7 @@ "info": "", "load_from_db": false, "title_case": false, - "input_types": ["Text"] + "input_types": ["Text", "Record"] }, "code": { "type": "code", @@ -598,7 +598,7 @@ "list": false, "show": true, "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", "fileTypes": [], "file_path": "", "password": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json index 1083e08cb..5d9004316 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json @@ -524,7 +524,7 @@ "list": false, "show": true, "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", + "value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n", "fileTypes": [], "file_path": "", "password": false, @@ -670,7 +670,7 @@ "info": "", "load_from_db": false, "title_case": false, - "input_types": ["Text"] + "input_types": ["Text", "Record"] }, "code": { "type": "code", @@ -679,7 +679,7 @@ "list": false, "show": true, "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", "fileTypes": [], "file_path": "", "password": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json index 7182b97a1..62bcf2c70 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json @@ -20,7 +20,7 @@ "list": false, "show": true, "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", + "value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n", "fileTypes": [], "file_path": "", "password": false, @@ -130,7 +130,7 @@ "list": false, "show": true, "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", + "value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n", "fileTypes": [], "file_path": "", "password": false, @@ -789,7 +789,7 @@ "info": "", "load_from_db": false, "title_case": false, - "input_types": ["Text"] + "input_types": ["Text", "Record"] }, "code": { "type": "code", @@ -798,7 +798,7 @@ "list": false, "show": true, "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", "fileTypes": [], "file_path": "", "password": false, @@ -1146,7 +1146,7 @@ "info": "", "load_from_db": false, "title_case": false, - "input_types": ["Text"] + "input_types": ["Text", "Record"] }, "code": { "type": "code", @@ -1155,7 +1155,7 @@ "list": false, "show": true, "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", "fileTypes": [], "file_path": "", "password": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json b/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json index 4bd5931f1..ed9188788 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json @@ -784,7 +784,7 @@ "info": "", "load_from_db": false, "title_case": false, - "input_types": ["Text"] + "input_types": ["Text", "Record"] }, "code": { "type": "code", @@ -793,7 +793,7 @@ "list": false, "show": true, "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", "fileTypes": [], "file_path": "", "password": false, @@ -1034,7 +1034,7 @@ "list": false, "show": true, "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", + "value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n", "fileTypes": [], "file_path": "", "password": false, diff --git a/src/backend/base/langflow/interface/initialize/loading.py b/src/backend/base/langflow/interface/initialize/loading.py index 163587fa0..42fa4e7a1 100644 --- a/src/backend/base/langflow/interface/initialize/loading.py +++ b/src/backend/base/langflow/interface/initialize/loading.py @@ -125,7 +125,6 @@ async def instantiate_custom_component(params, user_id, vertex, fallback_to_env_ custom_repr = build_result if not isinstance(custom_repr, str): custom_repr = str(custom_repr) - raw = custom_component.repr_value if hasattr(raw, "data"): raw = raw.data diff --git a/src/backend/base/langflow/schema/schema.py b/src/backend/base/langflow/schema/schema.py index 749180755..209cb51c8 100644 --- a/src/backend/base/langflow/schema/schema.py +++ b/src/backend/base/langflow/schema/schema.py @@ -1,11 +1,12 @@ import copy import json from typing import Literal, Optional, cast +from typing_extensions import TypedDict from langchain_core.documents import Document from langchain_core.messages import AIMessage, BaseMessage, HumanMessage -from pydantic import BaseModel, model_validator -from typing_extensions import TypedDict +from langchain_core.prompts.image import ImagePromptTemplate +from pydantic import BaseModel, model_serializer, model_validator class Record(BaseModel): @@ -30,6 +31,11 @@ class Record(BaseModel): values["data"][key] = values[key] return values + @model_serializer(mode="plain", when_used="json") + def serialize_model(self): + data = {k: v.to_json() if hasattr(v, "to_json") else v for k, v in self.data.items()} + return data + def get_text(self): """ Retrieves the text value from the data dictionary. @@ -103,7 +109,9 @@ class Record(BaseModel): text = self.data.pop(self.text_key, self.default_value) return Document(page_content=text, metadata=self.data) - def to_lc_message(self) -> BaseMessage: + def to_lc_message( + self, + ) -> BaseMessage: """ Converts the Record to a BaseMessage. @@ -119,8 +127,22 @@ class Record(BaseModel): raise ValueError(f"Missing required keys ('text', 'sender') in Record: {self.data}") sender = self.data.get("sender", "Machine") text = self.data.get("text", "") + files = self.data.get("files", []) if sender == "User": - return HumanMessage(content=text) + if files: + contents = [{"type": "text", "text": text}] + for file_path in files: + image_template = ImagePromptTemplate() + image_prompt_value = image_template.invoke(input={"path": file_path}) + contents.append({"type": "image_url", "image_url": image_prompt_value.image_url}) + human_message = HumanMessage(content=contents) + else: + human_message = HumanMessage( + content=[{"type": "text", "text": text}], + ) + + return human_message + return AIMessage(content=text) def __getattr__(self, key): @@ -170,8 +192,14 @@ class Record(BaseModel): def __str__(self) -> str: # return a JSON string representation of the Record atributes + try: + data = {k: v.to_json() if hasattr(v, "to_json") else v for k, v in self.data.items()} + return json.dumps(data, indent=4) + except Exception: + return str(self.data) - return json.dumps(self.data) + def __contains__(self, key): + return key in self.data INPUT_FIELD_NAME = "input_value" diff --git a/src/backend/base/langflow/services/cache/utils.py b/src/backend/base/langflow/services/cache/utils.py index ff19836ef..a89963f56 100644 --- a/src/backend/base/langflow/services/cache/utils.py +++ b/src/backend/base/langflow/services/cache/utils.py @@ -23,6 +23,9 @@ class CacheMiss: def __repr__(self): return "" + def __bool__(self): + return False + def create_cache_folder(func): def wrapper(*args, **kwargs): diff --git a/src/backend/base/langflow/services/monitor/schema.py b/src/backend/base/langflow/services/monitor/schema.py index 349d5fc2a..4cb057ccc 100644 --- a/src/backend/base/langflow/services/monitor/schema.py +++ b/src/backend/base/langflow/services/monitor/schema.py @@ -11,25 +11,26 @@ if TYPE_CHECKING: class TransactionModel(BaseModel): index: Optional[int] = Field(default=None) timestamp: Optional[datetime] = Field(default_factory=datetime.now, alias="timestamp") - flow_id: str - source: str - target: str - target_args: dict + vertex_id: str + target_id: str | None = None + inputs: dict + outputs: Optional[dict] = None status: str error: Optional[str] = None + flow_id: Optional[str] = Field(default=None, alias="flow_id") class Config: from_attributes = True populate_by_name = True # validate target_args in case it is a JSON - @field_validator("target_args", mode="before") + @field_validator("outputs", "inputs", mode="before") def validate_target_args(cls, v): if isinstance(v, str): return json.loads(v) return v - @field_serializer("target_args") + @field_serializer("outputs", "inputs") def serialize_target_args(v): if isinstance(v, dict): return json.dumps(v) @@ -39,19 +40,21 @@ class TransactionModel(BaseModel): class TransactionModelResponse(BaseModel): index: Optional[int] = Field(default=None) timestamp: Optional[datetime] = Field(default_factory=datetime.now, alias="timestamp") - flow_id: str - source: str - target: str - target_args: dict + vertex_id: str + inputs: dict + outputs: Optional[dict] = None status: str error: Optional[str] = None + flow_id: Optional[str] = Field(default=None, alias="flow_id") + source: Optional[str] = None + target: Optional[str] = None class Config: from_attributes = True populate_by_name = True # validate target_args in case it is a JSON - @field_validator("target_args", mode="before") + @field_validator("outputs", "inputs", mode="before") def validate_target_args(cls, v): if isinstance(v, str): return json.loads(v) @@ -129,15 +132,16 @@ class VertexBuildModel(BaseModel): id: Optional[str] = Field(default=None, alias="id") flow_id: str valid: bool - logs: Any + params: Any data: dict + artifacts: dict timestamp: datetime = Field(default_factory=datetime.now) class Config: from_attributes = True populate_by_name = True - @field_serializer("data") + @field_serializer("data", "artifacts") def serialize_dict(v): if isinstance(v, dict): # check if the value of each key is a BaseModel or a list of BaseModels @@ -151,8 +155,8 @@ class VertexBuildModel(BaseModel): return v.model_dump_json() return v - @field_validator("logs", mode="before") - def validate_logs(cls, v): + @field_validator("params", mode="before") + def validate_params(cls, v): if isinstance(v, str): try: return json.loads(v) @@ -160,7 +164,7 @@ class VertexBuildModel(BaseModel): return v return v - @field_serializer("logs") + @field_serializer("params") def serialize_params(v): if isinstance(v, list) and all(isinstance(i, BaseModel) for i in v): return json.dumps([i.model_dump() for i in v]) @@ -172,11 +176,17 @@ class VertexBuildModel(BaseModel): return json.loads(v) return v + @field_validator("artifacts", mode="before") + def validate_artifacts(cls, v): + if isinstance(v, str): + return json.loads(v) + elif isinstance(v, BaseModel): + return v.model_dump() + return v + class VertexBuildResponseModel(VertexBuildModel): - messages: list[MessageModel] = [] - - @field_serializer("data") + @field_serializer("data", "artifacts") def serialize_dict(v): return v diff --git a/src/backend/base/langflow/services/monitor/service.py b/src/backend/base/langflow/services/monitor/service.py index e15cb39dd..6c37672af 100644 --- a/src/backend/base/langflow/services/monitor/service.py +++ b/src/backend/base/langflow/services/monitor/service.py @@ -168,7 +168,9 @@ class MonitorService(Service): order_by: Optional[str] = "timestamp", flow_id: Optional[str] = None, ): - query = "SELECT index,flow_id, source, target, target_args, status, error, timestamp FROM transactions" + query = ( + "SELECT index,flow_id, status, error, timestamp, vertex_id, inputs, outputs, target_id FROM transactions" + ) conditions = [] if source: conditions.append(f"source = '{source}'") @@ -183,7 +185,7 @@ class MonitorService(Service): query += " WHERE " + " AND ".join(conditions) if order_by: - query += f" ORDER BY {order_by}" + query += f" ORDER BY {order_by} DESC" with duckdb.connect(str(self.db_path)) as conn: df = conn.execute(query).df() diff --git a/src/backend/base/langflow/services/monitor/utils.py b/src/backend/base/langflow/services/monitor/utils.py index 447a58b92..706d62348 100644 --- a/src/backend/base/langflow/services/monitor/utils.py +++ b/src/backend/base/langflow/services/monitor/utils.py @@ -119,21 +119,16 @@ async def log_message( sender_name: str, message: str, session_id: str, - artifacts: Optional[dict] = None, + files: Optional[list] = None, flow_id: Optional[str] = None, ): try: - from langflow.graph.vertex.base import Vertex - - if isinstance(session_id, Vertex): - session_id = await session_id.build() # type: ignore - monitor_service = get_monitor_service() row = { "sender": sender, "sender_name": sender_name, "message": message, - "artifacts": artifacts or {}, + "files": files or [], "session_id": session_id, "timestamp": monitor_service.get_timestamp(), "flow_id": flow_id, @@ -147,9 +142,9 @@ async def log_vertex_build( flow_id: str, vertex_id: str, valid: bool, - logs: Any, + params: Any, data: "ResultDataResponse", - messages: Optional[dict] = None, + artifacts: Optional[dict] = None, ): try: monitor_service = get_monitor_service() @@ -158,9 +153,9 @@ async def log_vertex_build( "flow_id": flow_id, "id": vertex_id, "valid": valid, - "logs": logs, + "params": params, "data": data.model_dump(), - "messages": messages or {}, + "artifacts": artifacts or {}, "timestamp": monitor_service.get_timestamp(), } monitor_service.add_row(table_name="vertex_builds", data=row) @@ -183,17 +178,19 @@ def build_clean_params(target: "Vertex") -> dict: return params -def log_transaction(vertex: "Vertex", status, error=None): +def log_transaction(flow_id, vertex: "Vertex", status, target: Optional["Vertex"] = None, error=None): try: monitor_service = get_monitor_service() clean_params = build_clean_params(vertex) data = { - "vertex_id": vertex.id, + "vertex_id": str(vertex.id), + "target_id": str(target.id) if target else None, "inputs": clean_params, - "output": str(vertex.result), + "outputs": vertex.result.model_dump_json() if vertex.result else None, "timestamp": monitor_service.get_timestamp(), "status": status, "error": error, + "flow_id": flow_id, } monitor_service.add_row(table_name="transactions", data=data) except Exception as e: diff --git a/src/backend/base/langflow/services/socket/utils.py b/src/backend/base/langflow/services/socket/utils.py index bed00e28f..c1f012e18 100644 --- a/src/backend/base/langflow/services/socket/utils.py +++ b/src/backend/base/langflow/services/socket/utils.py @@ -90,9 +90,9 @@ async def build_vertex( flow_id=flow_id, vertex_id=vertex_id, valid=valid, - logs=params, + params=params, data=result_dict, - messages=artifacts, + artifacts=artifacts, ) # Emit the vertex build response diff --git a/src/backend/base/langflow/utils/validate.py b/src/backend/base/langflow/utils/validate.py index 0871dbd82..c3bbc9df8 100644 --- a/src/backend/base/langflow/utils/validate.py +++ b/src/backend/base/langflow/utils/validate.py @@ -159,7 +159,10 @@ def create_class(code, class_name): # Replace from langflow import CustomComponent with from langflow.custom import CustomComponent code = code.replace("from langflow import CustomComponent", "from langflow.custom import CustomComponent") - + code = code.replace( + "from langflow.interface.custom.custom_component import CustomComponent", + "from langflow.custom import CustomComponent", + ) module = ast.parse(code) exec_globals = prepare_global_scope(code, module) diff --git a/src/backend/base/poetry.lock b/src/backend/base/poetry.lock index 81f8a9668..75fbdcd6a 100644 --- a/src/backend/base/poetry.lock +++ b/src/backend/base/poetry.lock @@ -1159,13 +1159,13 @@ files = [ [[package]] name = "langchain" -version = "0.2.2" +version = "0.2.3" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain-0.2.2-py3-none-any.whl", hash = "sha256:58ca0c47bcdd156da66f50a0a4fcedc49bf6950827f4a6b06c8c4842d55805f3"}, - {file = "langchain-0.2.2.tar.gz", hash = "sha256:9d61e50e9cdc2bea659bc5e6c03650ba048fda63a307490ae368e539f61a0d3a"}, + {file = "langchain-0.2.3-py3-none-any.whl", hash = "sha256:5dc33cd9c8008693d328b7cb698df69073acecc89ad9c2a95f243b3314f8d834"}, + {file = "langchain-0.2.3.tar.gz", hash = "sha256:81962cc72cce6515f7bd71e01542727870789bf8b666c6913d85559080c1a201"}, ] [package.dependencies] @@ -1181,29 +1181,15 @@ requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] -clarifai = ["clarifai (>=9.1.0)"] -cli = ["typer (>=0.9.0,<0.10.0)"] -cohere = ["cohere (>=4,<6)"] -docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] -embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] -javascript = ["esprima (>=4.0.1,<5.0.0)"] -llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] -openai = ["openai (<2)", "tiktoken (>=0.7,<1.0)"] -qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] -text-helpers = ["chardet (>=5.1.0,<6.0.0)"] - [[package]] name = "langchain-community" -version = "0.2.3" +version = "0.2.4" description = "Community contributed LangChain integrations." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_community-0.2.3-py3-none-any.whl", hash = "sha256:aa895545be2f3f4aa2fea36f6da2e3b4ec50ce61ec986e8f146901a1e9138138"}, - {file = "langchain_community-0.2.3.tar.gz", hash = "sha256:a3c35af215e47b700e7cb4e548fa8b45c6d46d52b5a5a65af2577c5a0104fc9f"}, + {file = "langchain_community-0.2.4-py3-none-any.whl", hash = "sha256:8582e9800f4837660dc297cccd2ee1ddc1d8c440d0fe8b64edb07620f0373b0e"}, + {file = "langchain_community-0.2.4.tar.gz", hash = "sha256:2bb6a1a36b8500a564d25d76469c02457b1a7c3afea6d4a609a47c06b993e3e4"}, ] [package.dependencies] @@ -1218,19 +1204,15 @@ requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -cli = ["typer (>=0.9.0,<0.10.0)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-search-documents (==11.4.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.6,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpathlib (>=0.18,<0.19)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "httpx-sse (>=0.4.0,<0.5.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "oracledb (>=2.2.0,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pyjwt (>=2.8.0,<3.0.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "simsimd (>=4.3.1,<5.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] - [[package]] name = "langchain-core" -version = "0.2.4" +version = "0.2.5" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_core-0.2.4-py3-none-any.whl", hash = "sha256:5212f7ec78a525e88a178ed3aefe2fd7134b03fb92573dfbab9914f1d92d6ec5"}, - {file = "langchain_core-0.2.4.tar.gz", hash = "sha256:82bdcc546eb0341cefcf1f4ecb3e49836fff003903afddda2d1312bb8491ef81"}, + {file = "langchain_core-0.2.5-py3-none-any.whl", hash = "sha256:abe5138f22acff23a079ec538be5268bbf97cf023d51987a0dd474d2a16cae3e"}, + {file = "langchain_core-0.2.5.tar.gz", hash = "sha256:4a5c2f56b22396a63ef4790043660e393adbfa6832b978f023ca996a04b8e752"}, ] [package.dependencies] @@ -1241,9 +1223,6 @@ pydantic = ">=1,<3" PyYAML = ">=5.3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -extended-testing = ["jinja2 (>=3,<4)"] - [[package]] name = "langchain-experimental" version = "0.0.60" diff --git a/src/backend/base/pyproject.toml b/src/backend/base/pyproject.toml index f2df7364f..b175fbcf5 100644 --- a/src/backend/base/pyproject.toml +++ b/src/backend/base/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langflow-base" -version = "0.0.59" +version = "0.0.60" description = "A Python package with a built-in web application" authors = ["Langflow "] maintainers = [ diff --git a/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx index 437e5a1b1..941c86271 100644 --- a/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx +++ b/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx @@ -44,9 +44,9 @@ import useFetchDataOnMount from "../../../hooks/use-fetch-data-on-mount"; import useHandleOnNewValue from "../../../hooks/use-handle-new-value"; import useHandleNodeClass from "../../../hooks/use-handle-node-class"; import useHandleRefreshButtonPress from "../../../hooks/use-handle-refresh-buttons"; +import OutputModal from "../outputModal"; import TooltipRenderComponent from "../tooltipRenderComponent"; import { TEXT_FIELD_TYPES } from "./constants"; -import OutputModal from "../outputModal"; export default function ParameterComponent({ left, @@ -274,7 +274,9 @@ export default function ParameterComponent({ : "Please build the component first" } > - + )} diff --git a/src/frontend/src/components/arrayReaderComponent/index.tsx b/src/frontend/src/components/arrayReaderComponent/index.tsx index bcbfde010..6e151d426 100644 --- a/src/frontend/src/components/arrayReaderComponent/index.tsx +++ b/src/frontend/src/components/arrayReaderComponent/index.tsx @@ -1,10 +1,12 @@ +import TableAutoCellRender from "../tableComponent/components/tableAutoCellRender"; + export default function ArrayReader({ array }: { array: any[] }): JSX.Element { //TODO check array type return (
    {array.map((item, index) => ( -
  • {item}
  • +
  • {}
  • ))}
diff --git a/src/frontend/src/components/csvOutputComponent/index.tsx b/src/frontend/src/components/csvOutputComponent/index.tsx index a98d9c028..b6f5be515 100644 --- a/src/frontend/src/components/csvOutputComponent/index.tsx +++ b/src/frontend/src/components/csvOutputComponent/index.tsx @@ -115,6 +115,7 @@ function CsvOutputComponent({ style={{ height: "100%", width: "100%" }} > ({ ...col, @@ -23,6 +25,7 @@ function RecordsOutputComponent({ return ( void; + duplicateRow?: () => void; + deleteRow?: () => void; + hasSelection: boolean; + stateChange: boolean; +}): JSX.Element { + return ( +
+
+
+ + + +
+ {duplicateRow && ( +
+ Select items to duplicate + ) : ( + Duplicate selected items + ) + } + > + + +
+ )} + {deleteRow && ( +
+ Select items to delete + ) : ( + Delete selected items + ) + } + > + + +
+ )}{" "} +
+
+ ); +} diff --git a/src/frontend/src/components/tableComponent/components/tableAutoCellRender/index.tsx b/src/frontend/src/components/tableComponent/components/tableAutoCellRender/index.tsx index 0c5c00ac9..4a6020f78 100644 --- a/src/frontend/src/components/tableComponent/components/tableAutoCellRender/index.tsx +++ b/src/frontend/src/components/tableComponent/components/tableAutoCellRender/index.tsx @@ -9,20 +9,17 @@ import { Badge } from "../../../ui/badge"; export default function TableAutoCellRender({ value, -}: CustomCellRendererProps) { +}: CustomCellRendererProps | { value: any }) { function getCellType() { switch (typeof value) { case "object": if (value === null) { return String(value); } else if (Array.isArray(value)) { - return ; - } else if (value.definitions) { - // use a custom render defined by the sender + return ; } else { return ; } - break; case "string": if (isTimeStampString(value)) { return ; diff --git a/src/frontend/src/components/tableComponent/index.tsx b/src/frontend/src/components/tableComponent/index.tsx index 139392922..cc43a4a47 100644 --- a/src/frontend/src/components/tableComponent/index.tsx +++ b/src/frontend/src/components/tableComponent/index.tsx @@ -1,7 +1,7 @@ import "ag-grid-community/styles/ag-grid.css"; // Mandatory CSS required by the grid import "ag-grid-community/styles/ag-theme-quartz.css"; // Optional Theme applied to the grid import { AgGridReact, AgGridReactProps } from "ag-grid-react"; -import { ElementRef, forwardRef, useRef } from "react"; +import { ElementRef, forwardRef, useRef, useState } from "react"; import { DEFAULT_TABLE_ALERT_MSG, DEFAULT_TABLE_ALERT_TITLE, @@ -11,7 +11,8 @@ import "../../style/ag-theme-shadcn.css"; // Custom CSS applied to the grid import { cn, toTitleCase } from "../../utils/utils"; import ForwardedIconComponent from "../genericIconComponent"; import { Alert, AlertDescription, AlertTitle } from "../ui/alert"; -import ResetColumns from "./components/ResetColumns"; +import TableOptions from "./components/TableOptions"; +import { useParams } from "react-router-dom"; import resetGrid from "./utils/reset-grid-columns"; interface TableComponentProps extends AgGridReactProps { @@ -20,8 +21,8 @@ interface TableComponentProps extends AgGridReactProps { alertTitle?: string; alertDescription?: string; editable?: boolean | string[]; - onDelete?: (selectedRows: any) => void; - onDuplicate?: (selectedRows: any) => void; + onDelete?: () => void; + onDuplicate?: () => void; } const TableComponent = forwardRef< @@ -69,9 +70,12 @@ const TableComponent = forwardRef< }); const gridRef = useRef(null); // @ts-ignore - const realRef = ref?.current ? ref : gridRef; + const realRef: React.MutableRefObject = ref?.current + ? ref + : gridRef; const dark = useDarkStore((state) => state.dark); const initialColumnDefs = useRef(colDef); + const [columnStateChange, setColumnStateChange] = useState(false); const makeLastColumnNonResizable = (columnDefs) => { columnDefs.forEach((colDef, index) => { @@ -87,6 +91,9 @@ const TableComponent = forwardRef< params.api.setGridOption("columnDefs", updatedColumnDefs); initialColumnDefs.current = params.api.getColumnDefs(); if (props.onGridReady) props.onGridReady(params); + setTimeout(() => { + setColumnStateChange(false); + }, 50); }; const onColumnMoved = (params) => { @@ -121,7 +128,7 @@ const TableComponent = forwardRef< > { + console.log(e); + if (e.sources.some((source) => source.includes("column"))) { + setColumnStateChange(true); + } + }} + /> + 0} + duplicateRow={props.onDuplicate ? props.onDuplicate : undefined} + deleteRow={props.onDelete ? props.onDelete : undefined} + resetGrid={() => { + console.log("teste"); + resetGrid(realRef, initialColumnDefs); + setTimeout(() => { + setColumnStateChange(false); + }, 100); + }} /> - resetGrid(realRef, initialColumnDefs)} /> ); }, diff --git a/src/frontend/src/controllers/API/index.ts b/src/frontend/src/controllers/API/index.ts index f59f37410..fd3847e97 100644 --- a/src/frontend/src/controllers/API/index.ts +++ b/src/frontend/src/controllers/API/index.ts @@ -982,9 +982,7 @@ export async function postBuildVertex( files?: string[], ): Promise> { // input_value is optional and is a query parameter - const data = input_value - ? { inputs: { input_value: input_value } } - : undefined; + const data = { inputs: { input_value: input_value ?? "" } }; if (data && files) { data["files"] = files; } diff --git a/src/frontend/src/modals/IOModal/components/SessionView/index.tsx b/src/frontend/src/modals/IOModal/components/SessionView/index.tsx index b21a786a1..b20ed6c57 100644 --- a/src/frontend/src/modals/IOModal/components/SessionView/index.tsx +++ b/src/frontend/src/modals/IOModal/components/SessionView/index.tsx @@ -44,43 +44,23 @@ export default function SessionView({ rows }: { rows: Array }) { } return ( -
- <> -
-
- -
-
- - { - handleUpdateMessage(event); - }} - editable={["Sender Name", "Message"]} - overlayNoRowsTemplate="No data available" - onSelectionChanged={(event: SelectionChangedEvent) => { - setSelectedRows(event.api.getSelectedRows().map((row) => row.index)); - }} - rowSelection="multiple" - suppressRowClickSelection={true} - pagination={true} - columnDefs={columns} - rowData={rows} - /> -
+ { + handleUpdateMessage(event); + }} + editable={["Sender Name", "Message"]} + overlayNoRowsTemplate="No data available" + onSelectionChanged={(event: SelectionChangedEvent) => { + setSelectedRows(event.api.getSelectedRows().map((row) => row.index)); + }} + rowSelection="multiple" + suppressRowClickSelection={true} + pagination={true} + columnDefs={columns} + rowData={rows} + /> ); } diff --git a/src/frontend/src/modals/IOModal/components/chatView/chatInput/components/buttonSendWrapper/index.tsx b/src/frontend/src/modals/IOModal/components/chatView/chatInput/components/buttonSendWrapper/index.tsx new file mode 100644 index 000000000..ff93a9f7b --- /dev/null +++ b/src/frontend/src/modals/IOModal/components/chatView/chatInput/components/buttonSendWrapper/index.tsx @@ -0,0 +1,52 @@ +import IconComponent from "../../../../../../../components/genericIconComponent"; +import { Case } from "../../../../../../../shared/components/caseComponent"; +import { classNames } from "../../../../../../../utils/utils"; + +const ButtonSendWrapper = ({ + send, + lockChat, + noInput, + saveLoading, + chatValue, +}) => { + return ( + + ); +}; + +export default ButtonSendWrapper; diff --git a/src/frontend/src/modals/IOModal/components/chatView/chatInput/components/textAreaWrapper/index.tsx b/src/frontend/src/modals/IOModal/components/chatView/chatInput/components/textAreaWrapper/index.tsx new file mode 100644 index 000000000..29f88453a --- /dev/null +++ b/src/frontend/src/modals/IOModal/components/chatView/chatInput/components/textAreaWrapper/index.tsx @@ -0,0 +1,81 @@ +import { Textarea } from "../../../../../../../components/ui/textarea"; +import { classNames } from "../../../../../../../utils/utils"; + +const TextAreaWrapper = ({ + checkSendingOk, + send, + lockChat, + noInput, + saveLoading, + chatValue, + setChatValue, + CHAT_INPUT_PLACEHOLDER, + CHAT_INPUT_PLACEHOLDER_SEND, + inputRef, + setInputFocus, + files, + isDragging, +}) => { + const getPlaceholderText = ( + isDragging: boolean, + noInput: boolean, + ): string => { + if (isDragging) { + return "Drop here"; + } else if (noInput) { + return CHAT_INPUT_PLACEHOLDER; + } else { + return CHAT_INPUT_PLACEHOLDER_SEND; + } + }; + + const lockClass = + lockChat || saveLoading + ? "form-modal-lock-true bg-input" + : noInput + ? "form-modal-no-input bg-input" + : "form-modal-lock-false bg-background"; + + const fileClass = + files.length > 0 + ? "rounded-b-lg ring-0 focus:ring-0 focus:border-2 rounded-t-none border-t-0 border-border focus:border-t-0 focus:border-ring" + : "rounded-md border-t border-border focus:ring-0 focus:border-2 focus:border-ring"; + + const additionalClassNames = "form-modal-lockchat pl-14"; + + return ( +