Release 0.6.4 (#1268)

This PR:
- Updates Vectara component by @JAtharva22
- Adds Ollama components by @yamonkjd 
- Fixes bugs in the canvas by @Cristhianzl
- adds Docker image builds on release.
This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-12-29 11:02:32 -03:00 committed by GitHub
commit 57c6dad343
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 1288 additions and 793 deletions

View file

@ -1,2 +1,7 @@
.venv/
**/aws
**/aws
# node_modules
**/node_modules/
dist/
**/build/
src/backend/langflow/frontend

View file

@ -47,3 +47,19 @@ jobs:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
poetry publish
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./build_and_push.Dockerfile
tags: logspace/langflow:${{ steps.check-version.outputs.version }}

View file

@ -45,3 +45,21 @@ jobs:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
poetry publish
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./build_and_push.Dockerfile
tags: |
logspace/langflow:${{ steps.check-version.outputs.version }}
logspace/langflow:latest

79
build_and_push.Dockerfile Normal file
View file

@ -0,0 +1,79 @@
# syntax=docker/dockerfile:1
# Keep this syntax directive! It's used to enable Docker BuildKit
# Based on https://github.com/python-poetry/poetry/discussions/1879?sort=top#discussioncomment-216865
# but I try to keep it updated (see history)
################################
# PYTHON-BASE
# Sets up all our shared environment variables
################################
FROM python:3.10-slim as python-base
# python
ENV PYTHONUNBUFFERED=1 \
# prevents python creating .pyc files
PYTHONDONTWRITEBYTECODE=1 \
\
# pip
PIP_DISABLE_PIP_VERSION_CHECK=on \
PIP_DEFAULT_TIMEOUT=100 \
\
# poetry
# https://python-poetry.org/docs/configuration/#using-environment-variables
POETRY_VERSION=1.7.1 \
# make poetry install to this location
POETRY_HOME="/opt/poetry" \
# make poetry create the virtual environment in the project's root
# it gets named `.venv`
POETRY_VIRTUALENVS_IN_PROJECT=true \
# do not ask any interactive question
POETRY_NO_INTERACTION=1 \
\
# paths
# this is where our requirements + virtual environment will live
PYSETUP_PATH="/opt/pysetup" \
VENV_PATH="/opt/pysetup/.venv"
# prepend poetry and venv to path
ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH"
################################
# BUILDER-BASE
# Used to build deps + create our virtual environment
################################
FROM python-base as builder-base
RUN
RUN apt-get update \
&& apt-get install --no-install-recommends -y \
# deps for installing poetry
curl \
# deps for building python deps
build-essential \
# npm
npm
# Now we need to copy the entire project into the image
WORKDIR /app
COPY pyproject.toml poetry.lock ./
COPY src ./src
COPY Makefile ./
COPY README.md ./
RUN curl -sSL https://install.python-poetry.org | python3 - && make build
# Final stage for the application
FROM python-base as final
# Copy virtual environment and built .tar.gz from builder base
COPY --from=builder-base /app/dist/*.tar.gz ./
# Install the package from the .tar.gz
RUN pip install *.tar.gz
WORKDIR /app
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]

991
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.6.3"
version = "0.6.4"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -28,15 +28,15 @@ langflow = "langflow.__main__:main"
python = ">=3.9,<3.11"
fastapi = "^0.104.0"
uvicorn = "^0.23.0"
fastapi = "^0.108.0"
uvicorn = "^0.25.0"
beautifulsoup4 = "^4.12.2"
google-search-results = "^2.4.1"
google-api-python-client = "^2.79.0"
typer = "^0.9.0"
gunicorn = "^21.2.0"
langchain = "~0.0.345"
openai = "^1.3.6"
openai = "^1.6.1"
pandas = "2.0.3"
chromadb = "^0.4.0"
huggingface-hub = { version = "^0.19.0", extras = ["inference"] }
@ -47,31 +47,31 @@ unstructured = "^0.11.0"
pypdf = "^3.17.0"
lxml = "^4.9.2"
pysrt = "^1.1.2"
fake-useragent = "^1.3.0"
fake-useragent = "^1.4.0"
docstring-parser = "^0.15"
psycopg2-binary = "^2.9.6"
pyarrow = "^14.0.0"
tiktoken = "~0.5.0"
wikipedia = "^1.4.0"
qdrant-client = "^1.4.0"
qdrant-client = "^1.7.0"
websockets = "^10.3"
weaviate-client = "^3.23.0"
weaviate-client = "^3.26.0"
jina = "*"
sentence-transformers = { version = "^2.2.2", optional = true }
ctransformers = { version = "^0.2.10", optional = true }
cohere = "^4.37.0"
cohere = "^4.39.0"
python-multipart = "^0.0.6"
sqlmodel = "^0.0.14"
faiss-cpu = "^1.7.4"
anthropic = "^0.7.0"
anthropic = "^0.8.0"
orjson = "3.9.3"
multiprocess = "^0.70.14"
cachetools = "^5.3.1"
types-cachetools = "^5.3.0.5"
platformdirs = "^4.1.0"
pinecone-client = "^2.2.2"
pymongo = "^4.5.0"
supabase = "^2.0.3"
pymongo = "^4.6.0"
supabase = "^2.3.0"
certifi = "^2023.11.17"
google-cloud-aiplatform = "^1.36.0"
psycopg = "^3.1.9"
@ -81,13 +81,13 @@ langchain-experimental = "*"
celery = { extras = ["redis"], version = "^5.3.6", optional = true }
redis = { version = "^4.6.0", optional = true }
flower = { version = "^2.0.0", optional = true }
alembic = "^1.12.0"
alembic = "^1.13.0"
passlib = "^1.7.4"
bcrypt = "4.0.1"
python-jose = "^3.3.0"
metaphor-python = "^0.1.11"
pydantic = "^2.0.0"
pydantic-settings = "^2.0.3"
pydantic = "^2.5.0"
pydantic-settings = "^2.1.0"
zep-python = "*"
pywin32 = { version = "^306", markers = "sys_platform == 'win32'" }
loguru = "^0.7.1"
@ -100,10 +100,11 @@ extract-msg = "^0.45.0"
jq = { version = "^1.6.0", markers = "sys_platform != 'win32'" }
boto3 = "^1.28.63"
numexpr = "^2.8.6"
qianfan = "0.0.5"
qianfan = "0.2.0"
pgvector = "^0.2.3"
pyautogen = "^0.2.0"
langchain-google-genai = "^0.0.2"
pytube = "^15.0.0"
[tool.poetry.group.dev.dependencies]
pytest-asyncio = "^0.23.1"

View file

@ -0,0 +1,252 @@
from typing import Any, Dict, List, Optional
# from langchain_community.chat_models import ChatOllama
from langchain.chat_models import ChatOllama
from langchain.chat_models.base import BaseChatModel
# from langchain.chat_models import ChatOllama
from langflow import CustomComponent
# whe When a callback component is added to Langflow, the comment must be uncommented.
# from langchain.callbacks.manager import CallbackManager
class ChatOllamaComponent(CustomComponent):
display_name = "ChatOllama"
description = "Local LLM for chat with Ollama."
def build_config(self) -> dict:
return {
"base_url": {
"display_name": "Base URL",
"info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
},
"model": {
"display_name": "Model Name",
"value": "llama2",
"info": "Refer to https://ollama.ai/library for more models.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.8,
"info": "Controls the creativity of model responses.",
},
"cache": {
"display_name": "Cache",
"field_type": "bool",
"info": "Enable or disable caching.",
"advanced": True,
"value": False,
},
### When a callback component is added to Langflow, the comment must be uncommented. ###
# "callback_manager": {
# "display_name": "Callback Manager",
# "info": "Optional callback manager for additional functionality.",
# "advanced": True,
# },
# "callbacks": {
# "display_name": "Callbacks",
# "info": "Callbacks to execute during model runtime.",
# "advanced": True,
# },
########################################################################################
"format": {
"display_name": "Format",
"field_type": "str",
"info": "Specify the format of the output (e.g., json).",
"advanced": True,
},
"metadata": {
"display_name": "Metadata",
"info": "Metadata to add to the run trace.",
"advanced": True,
},
"mirostat": {
"display_name": "Mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"value": "Disabled",
"advanced": True,
},
"mirostat_eta": {
"display_name": "Mirostat Eta",
"field_type": "float",
"info": "Learning rate for Mirostat algorithm. (Default: 0.1)",
"advanced": True,
},
"mirostat_tau": {
"display_name": "Mirostat Tau",
"field_type": "float",
"info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)",
"advanced": True,
},
"num_ctx": {
"display_name": "Context Window Size",
"field_type": "int",
"info": "Size of the context window for generating tokens. (Default: 2048)",
"advanced": True,
},
"num_gpu": {
"display_name": "Number of GPUs",
"field_type": "int",
"info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
"advanced": True,
},
"num_thread": {
"display_name": "Number of Threads",
"field_type": "int",
"info": "Number of threads to use during computation. (Default: detected for optimal performance)",
"advanced": True,
},
"repeat_last_n": {
"display_name": "Repeat Last N",
"field_type": "int",
"info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
"advanced": True,
},
"repeat_penalty": {
"display_name": "Repeat Penalty",
"field_type": "float",
"info": "Penalty for repetitions in generated text. (Default: 1.1)",
"advanced": True,
},
"tfs_z": {
"display_name": "TFS Z",
"field_type": "float",
"info": "Tail free sampling value. (Default: 1)",
"advanced": True,
},
"timeout": {
"display_name": "Timeout",
"field_type": "int",
"info": "Timeout for the request stream.",
"advanced": True,
},
"top_k": {
"display_name": "Top K",
"field_type": "int",
"info": "Limits token selection to top K. (Default: 40)",
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"field_type": "float",
"info": "Works together with top-k. (Default: 0.9)",
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"field_type": "bool",
"info": "Whether to print out response text.",
},
"tags": {
"display_name": "Tags",
"field_type": "list",
"info": "Tags to add to the run trace.",
"advanced": True,
},
"stop": {
"display_name": "Stop Tokens",
"field_type": "list",
"info": "List of tokens to signal the model to stop generating text.",
"advanced": True,
},
"system": {
"display_name": "System",
"field_type": "str",
"info": "System to use for generating text.",
"advanced": True,
},
"template": {
"display_name": "Template",
"field_type": "str",
"info": "Template to use for generating text.",
"advanced": True,
},
}
def build(
self,
base_url: Optional[str],
model: str,
mirostat: Optional[str],
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
### When a callback component is added to Langflow, the comment must be uncommented.###
# callback_manager: Optional[CallbackManager] = None,
# callbacks: Optional[List[Callbacks]] = None,
#######################################################################################
repeat_last_n: Optional[int] = None,
verbose: Optional[bool] = None,
cache: Optional[bool] = None,
num_ctx: Optional[int] = None,
num_gpu: Optional[int] = None,
format: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
num_thread: Optional[int] = None,
repeat_penalty: Optional[float] = None,
stop: Optional[List[str]] = None,
system: Optional[str] = None,
tags: Optional[List[str]] = None,
temperature: Optional[float] = None,
template: Optional[str] = None,
tfs_z: Optional[float] = None,
timeout: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[int] = None,
) -> BaseChatModel:
if not base_url:
base_url = "http://localhost:11434"
# Mapping mirostat settings to their corresponding values
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
# Default to 0 for 'Disabled'
mirostat_value = mirostat_options.get(mirostat, 0) # type: ignore
# Set mirostat_eta and mirostat_tau to None if mirostat is disabled
if mirostat_value == 0:
mirostat_eta = None
mirostat_tau = None
# Mapping system settings to their corresponding values
llm_params = {
"base_url": base_url,
"cache": cache,
"model": model,
"mirostat": mirostat_value,
"format": format,
"metadata": metadata,
"tags": tags,
## When a callback component is added to Langflow, the comment must be uncommented.##
# "callback_manager": callback_manager,
# "callbacks": callbacks,
#####################################################################################
"mirostat_eta": mirostat_eta,
"mirostat_tau": mirostat_tau,
"num_ctx": num_ctx,
"num_gpu": num_gpu,
"num_thread": num_thread,
"repeat_last_n": repeat_last_n,
"repeat_penalty": repeat_penalty,
"temperature": temperature,
"stop": stop,
"system": system,
"template": template,
"tfs_z": tfs_z,
"timeout": timeout,
"top_k": top_k,
"top_p": top_p,
"verbose": verbose,
}
# None Value remove
llm_params = {k: v for k, v in llm_params.items() if v is not None}
try:
output = ChatOllama(**llm_params) # type: ignore
except Exception as e:
raise ValueError("Could not initialize Ollama LLM.") from e
return output # type: ignore

View file

@ -0,0 +1,163 @@
from typing import Optional, List
from langchain.llms import Ollama
from langchain.llms.base import BaseLLM
from langflow import CustomComponent
class OllamaLLM(CustomComponent):
display_name = "Ollama"
description = "Local LLM with Ollama."
def build_config(self) -> dict:
return {
"base_url": {
"display_name": "Base URL",
"info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
},
"model": {
"display_name": "Model Name",
"value": "llama2",
"info": "Refer to https://ollama.ai/library for more models.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.8,
"info": "Controls the creativity of model responses.",
},
"mirostat": {
"display_name": "Mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"value": "Disabled",
"advanced": True,
},
"mirostat_eta": {
"display_name": "Mirostat Eta",
"field_type": "float",
"info": "Learning rate influencing the algorithm's response to feedback.",
"advanced": True,
},
"mirostat_tau": {
"display_name": "Mirostat Tau",
"field_type": "float",
"info": "Controls balance between coherence and diversity.",
"advanced": True,
},
"num_ctx": {
"display_name": "Context Window Size",
"field_type": "int",
"info": "Size of the context window for generating the next token.",
"advanced": True,
},
"num_gpu": {
"display_name": "Number of GPUs",
"field_type": "int",
"info": "Number of GPUs to use for computation.",
"advanced": True,
},
"num_thread": {
"display_name": "Number of Threads",
"field_type": "int",
"info": "Number of threads to use during computation.",
"advanced": True,
},
"repeat_last_n": {
"display_name": "Repeat Last N",
"field_type": "int",
"info": "Sets how far back the model looks to prevent repetition.",
"advanced": True,
},
"repeat_penalty": {
"display_name": "Repeat Penalty",
"field_type": "float",
"info": "Penalty for repetitions in generated text.",
"advanced": True,
},
"stop": {
"display_name": "Stop Tokens",
"info": "List of tokens to signal the model to stop generating text.",
"advanced": True,
},
"tfs_z": {
"display_name": "TFS Z",
"field_type": "float",
"info": "Tail free sampling to reduce impact of less probable tokens.",
"advanced": True,
},
"top_k": {
"display_name": "Top K",
"field_type": "int",
"info": "Limits token selection to top K for reducing nonsense generation.",
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"field_type": "int",
"info": "Works with top-k to control diversity of generated text.",
"advanced": True,
},
}
def build(
self,
base_url: Optional[str],
model: str,
temperature: Optional[float],
mirostat: Optional[str],
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
num_ctx: Optional[int] = None,
num_gpu: Optional[int] = None,
num_thread: Optional[int] = None,
repeat_last_n: Optional[int] = None,
repeat_penalty: Optional[float] = None,
stop: Optional[List[str]] = None,
tfs_z: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[int] = None,
) -> BaseLLM:
if not base_url:
base_url = "http://localhost:11434"
# Mapping mirostat settings to their corresponding values
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
# Default to 0 for 'Disabled'
mirostat_value = mirostat_options.get(mirostat, 0) # type: ignore
# Set mirostat_eta and mirostat_tau to None if mirostat is disabled
if mirostat_value == 0:
mirostat_eta = None
mirostat_tau = None
llm_params = {
"base_url": base_url,
"model": model,
"mirostat": mirostat_value,
"mirostat_eta": mirostat_eta,
"mirostat_tau": mirostat_tau,
"num_ctx": num_ctx,
"num_gpu": num_gpu,
"num_thread": num_thread,
"repeat_last_n": repeat_last_n,
"repeat_penalty": repeat_penalty,
"temperature": temperature,
"stop": stop,
"tfs_z": tfs_z,
"top_k": top_k,
"top_p": top_p,
}
# None Value remove
llm_params = {k: v for k, v in llm_params.items() if v is not None}
try:
llm = Ollama(**llm_params)
except Exception as e:
raise ValueError("Could not connect to Ollama.") from e
return llm

View file

@ -0,0 +1,70 @@
from typing import List
from langflow import CustomComponent
import json
from langchain.schema import BaseRetriever
from langchain.schema.vectorstore import VectorStore
from langchain.base_language import BaseLanguageModel
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
class VectaraSelfQueryRetriverComponent(CustomComponent):
"""
A custom component for implementing Vectara Self Query Retriever using a vector store.
"""
display_name: str = "Vectara Self Query Retriever for Vectara Vector Store"
description: str = "Implementation of Vectara Self Query Retriever"
documentation = (
"https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query"
)
beta = True
field_config = {
"code": {"show": True},
"vectorstore": {
"display_name": "Vector Store",
"info": "Input Vectara Vectore Store"
},
"llm": {
"display_name": "LLM",
"info": "For self query retriever"
},
"document_content_description":{
"display_name": "Document Content Description",
"info": "For self query retriever",
},
"metadata_field_info": {
"display_name": "Metadata Field Info",
"info": "Each metadata field info is a string in the form of key value pair dictionary containing additional search metadata.\nExample input: {\"name\":\"speech\",\"description\":\"what name of the speech\",\"type\":\"string or list[string]\"}.\nThe keys should remain constant(name, description, type)",
},
}
def build(
self,
vectorstore: VectorStore,
document_content_description: str,
llm: BaseLanguageModel,
metadata_field_info: List[str],
) -> BaseRetriever:
metadata_field_obj = []
for meta in metadata_field_info:
meta_obj = json.loads(meta)
if 'name' not in meta_obj or 'description' not in meta_obj or 'type' not in meta_obj :
raise Exception('Incorrect metadata field info format.')
attribute_info = AttributeInfo(
name = meta_obj['name'],
description = meta_obj['description'],
type = meta_obj['type'],
)
metadata_field_obj.append(attribute_info)
return SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_obj,
verbose=True
)

View file

@ -1,10 +1,14 @@
from typing import Optional, Union
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores import Vectara
from langchain.vectorstores.base import VectorStore
from typing import Optional, Union, List
from langflow import CustomComponent
import tempfile
import urllib.request
import urllib
from langchain.vectorstores import Vectara
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.schema import BaseRetriever
from langchain.embeddings import FakeEmbeddings
class VectaraComponent(CustomComponent):
@ -12,13 +16,29 @@ class VectaraComponent(CustomComponent):
description: str = "Implementation of Vector Store using Vectara"
documentation = "https://python.langchain.com/docs/integrations/vectorstores/vectara"
beta = True
# api key should be password = True
field_config = {
"vectara_customer_id": {"display_name": "Vectara Customer ID"},
"vectara_corpus_id": {"display_name": "Vectara Corpus ID"},
"vectara_api_key": {"display_name": "Vectara API Key", "password": True},
"vectara_customer_id": {
"display_name": "Vectara Customer ID",
"required": True,
},
"vectara_corpus_id": {
"display_name": "Vectara Corpus ID",
"required": True,
},
"vectara_api_key": {
"display_name": "Vectara API Key",
"password": True,
"required": True,
},
"code": {"show": False},
"documents": {"display_name": "Documents"},
"documents": {
"display_name": "Documents",
"info": "Pass in either for Self Query Retriever or for making a Vectara Object",
},
"files_url": {
"display_name": "Files Url",
"info": "Make vectara object using url of files(documents not needed)",
},
}
def build(
@ -26,21 +46,35 @@ class VectaraComponent(CustomComponent):
vectara_customer_id: str,
vectara_corpus_id: str,
vectara_api_key: str,
files_url: Optional[List[str]] = None,
documents: Optional[Document] = None,
) -> Union[VectorStore, BaseRetriever]:
# If documents, then we need to create a Vectara instance using .from_documents
if documents is not None:
return Vectara.from_documents(
documents=documents, # type: ignore
documents=documents,
embedding=FakeEmbeddings(size=768),
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key,
)
if files_url is not None:
files_list = []
for url in files_url:
name = tempfile.NamedTemporaryFile().name
urllib.request.urlretrieve(url, name)
files_list.append(name)
return Vectara.from_files(
files=files_list,
embedding=FakeEmbeddings(size=768),
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key,
source="langflow",
)
return Vectara(
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key,
source="langflow",
)

View file

@ -3,13 +3,12 @@ import inspect
import types
from typing import TYPE_CHECKING, Any, Coroutine, Dict, List, Optional
from loguru import logger
from langflow.graph.utils import UnbuiltObject
from langflow.interface.initialize import loading
from langflow.interface.listing import lazy_load_dict
from langflow.utils.constants import DIRECT_TYPES
from langflow.utils.util import sync_to_async
from loguru import logger
if TYPE_CHECKING:
from langflow.graph.edge.base import Edge
@ -140,7 +139,11 @@ class Vertex:
if not hasattr(edge, "target_param"):
continue
param_key = edge.target_param
if param_key in template_dict:
# If the param_key is in the template_dict and the edge.target_id is the current node
# We check this to make sure params with the same name but different target_id
# don't get overwritten
if param_key in template_dict and edge.target_id == self.id:
if template_dict[param_key]["list"]:
if param_key not in params:
params[param_key] = []

View file

@ -23,107 +23,119 @@ export default function Dropdown({
return (
<>
<Listbox
value={internalValue}
onChange={(value) => {
setInternalValue(value);
onSelect(value);
}}
>
{({ open }) => (
<>
<div className={"relative mt-1"}>
<Listbox.Button
data-test={`${id ?? ""}`}
className={
editNode
? "dropdown-component-outline"
: "dropdown-component-false-outline"
}
>
<span
className="dropdown-component-display"
data-testid={`${id ?? ""}-display`}
>
{internalValue}
</span>
<span className={"dropdown-component-arrow"}>
<IconComponent
name="ChevronsUpDown"
className="dropdown-component-arrow-color"
aria-hidden="true"
/>
</span>
</Listbox.Button>
<Transition
show={open}
as={Fragment}
leave="transition ease-in duration-100"
leaveFrom="opacity-100"
leaveTo="opacity-0"
>
<Listbox.Options
className={classNames(
editNode
? "dropdown-component-true-options nowheel custom-scroll"
: "dropdown-component-false-options nowheel custom-scroll",
apiModal ? "mb-2 w-[250px]" : "absolute w-full"
)}
>
{options.map((option, id) => (
<Listbox.Option
key={id}
className={({ active }) =>
classNames(
active ? " bg-accent" : "",
editNode
? "dropdown-component-false-option"
: "dropdown-component-true-option"
)
}
value={option}
{Object.keys(options)?.length > 0 ? (
<>
<Listbox
value={internalValue}
onChange={(value) => {
setInternalValue(value);
onSelect(value);
}}
>
{({ open }) => (
<>
<div className={"relative mt-1"}>
<Listbox.Button
data-test={`${id ?? ""}`}
className={
editNode
? "dropdown-component-outline"
: "dropdown-component-false-outline"
}
>
<span
className="dropdown-component-display"
data-testid={`${id ?? ""}-display`}
>
{({ selected, active }) => (
<>
<span
className={classNames(
selected ? "font-semibold" : "font-normal",
"block truncate "
)}
data-testid={`${option}-${id ?? ""}-option`}
>
{option}
</span>
{internalValue}
</span>
<span className={"dropdown-component-arrow"}>
<IconComponent
name="ChevronsUpDown"
className="dropdown-component-arrow-color"
aria-hidden="true"
/>
</span>
</Listbox.Button>
{selected ? (
<span
className={classNames(
active ? "text-background " : "",
"dropdown-component-choosal"
)}
>
<IconComponent
name="Check"
className={
active
? "dropdown-component-check-icon"
: "dropdown-component-check-icon"
}
aria-hidden="true"
/>
</span>
) : null}
</>
<Transition
show={open}
as={Fragment}
leave="transition ease-in duration-100"
leaveFrom="opacity-100"
leaveTo="opacity-0"
>
<Listbox.Options
className={classNames(
editNode
? "dropdown-component-true-options nowheel custom-scroll"
: "dropdown-component-false-options nowheel custom-scroll",
apiModal ? "mb-2 w-[250px]" : "absolute w-full"
)}
</Listbox.Option>
))}
</Listbox.Options>
</Transition>
</div>
</>
)}
</Listbox>
>
{options?.map((option, id) => (
<Listbox.Option
key={id}
className={({ active }) =>
classNames(
active ? " bg-accent" : "",
editNode
? "dropdown-component-false-option"
: "dropdown-component-true-option"
)
}
value={option}
>
{({ selected, active }) => (
<>
<span
className={classNames(
selected ? "font-semibold" : "font-normal",
"block truncate "
)}
data-testid={`${option}-${id ?? ""}-option`}
>
{option}
</span>
{selected ? (
<span
className={classNames(
active ? "text-background " : "",
"dropdown-component-choosal"
)}
>
<IconComponent
name="Check"
className={
active
? "dropdown-component-check-icon"
: "dropdown-component-check-icon"
}
aria-hidden="true"
/>
</span>
) : null}
</>
)}
</Listbox.Option>
))}
</Listbox.Options>
</Transition>
</div>
</>
)}
</Listbox>
</>
) : (
<>
<div>
<span className="text-sm italic">
No parameters are available for display.
</span>
</div>
</>
)}
</>
);
}

View file

@ -46,7 +46,14 @@ export default function ChatInput({
: "hidden"
}`,
}}
value={lockChat ? "Thinking..." : chatValue}
value={
lockChat
? "Thinking..."
: typeof chatValue === "object" &&
Object.keys(chatValue)?.length === 0
? "No chat input variables found. Click to run your flow."
: chatValue
}
onChange={(event): void => {
setChatValue(event.target.value);
}}

View file

@ -1,5 +1,5 @@
import Convert from "ansi-to-html";
import { useMemo, useState } from "react";
import { useState } from "react";
import ReactMarkdown from "react-markdown";
import rehypeMathjax from "rehype-mathjax";
import remarkGfm from "remark-gfm";
@ -74,77 +74,71 @@ export default function ChatMessage({
<div className="w-full">
<div className="w-full dark:text-white">
<div className="w-full">
{useMemo(
() =>
chat.message.toString() === "" && lockChat ? (
<IconComponent
name="MoreHorizontal"
className="h-8 w-8 animate-pulse"
/>
) : (
<ReactMarkdown
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[rehypeMathjax]}
className="markdown prose min-w-full text-primary word-break-break-word
{chat.message.toString() === "" && lockChat ? (
<IconComponent
name="MoreHorizontal"
className="h-8 w-8 animate-pulse"
/>
) : (
<ReactMarkdown
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[rehypeMathjax]}
className="markdown prose min-w-full text-primary word-break-break-word
dark:prose-invert"
components={{
pre({ node, ...props }) {
return <>{props.children}</>;
},
code: ({
node,
inline,
className,
children,
...props
}) => {
if (children.length) {
if (children[0] === "▍") {
return (
<span className="form-modal-markdown-span">
</span>
);
}
children[0] = (children[0] as string).replace(
"`▍`",
"▍"
);
}
const match = /language-(\w+)/.exec(
className || ""
components={{
pre({ node, ...props }) {
return <>{props.children}</>;
},
code: ({
node,
inline,
className,
children,
...props
}) => {
if (children.length) {
if (children[0] === "▍") {
return (
<span className="form-modal-markdown-span">
</span>
);
}
return !inline ? (
<CodeTabsComponent
isMessage
tabs={[
{
name: (match && match[1]) || "",
mode: (match && match[1]) || "",
image:
"https://curl.se/logo/curl-symbol-transparent.png",
language: (match && match[1]) || "",
code: String(children).replace(/\n$/, ""),
},
]}
activeTab={"0"}
setActiveTab={() => {}}
/>
) : (
<code className={className} {...props}>
{children}
</code>
);
},
}}
>
{chat.message.toString()}
</ReactMarkdown>
),
[chat.message, chat.message.toString()]
children[0] = (children[0] as string).replace(
"`▍`",
"▍"
);
}
const match = /language-(\w+)/.exec(className || "");
return !inline ? (
<CodeTabsComponent
isMessage
tabs={[
{
name: (match && match[1]) || "",
mode: (match && match[1]) || "",
image:
"https://curl.se/logo/curl-symbol-transparent.png",
language: (match && match[1]) || "",
code: String(children).replace(/\n$/, ""),
},
]}
activeTab={"0"}
setActiveTab={() => {}}
/>
) : (
<code className={className} {...props}>
{children}
</code>
);
},
}}
>
{chat.message.toString()}
</ReactMarkdown>
)}
</div>
{chat.files && (

View file

@ -146,13 +146,14 @@ export default function FormModal({
newChat[newChat.length - 1].message + str;
}
}
if (thought) {
if (thought && newChat[newChat.length - 1]?.thought) {
newChat[newChat.length - 1].thought = thought;
}
if (files) {
if (files && newChat[newChat.length - 1]?.files) {
newChat[newChat.length - 1].files = files;
}
if (prompt) {
if (prompt && newChat[newChat.length - 2]?.template) {
newChat[newChat.length - 2].template = prompt;
}
return newChat;

View file

@ -122,24 +122,21 @@ export default function GenericModal({
}
if (apiReturn.data) {
let inputVariables = apiReturn.data.input_variables ?? [];
if (
JSON.stringify(apiReturn.data?.frontend_node) !== JSON.stringify({})
) {
setNodeClass!(apiReturn.data?.frontend_node, inputValue);
setModalOpen(closeModal);
setIsEdit(false);
}
if (!inputVariables || inputVariables.length === 0) {
setIsEdit(true);
setNoticeData({
title: "Your template does not have any variables.",
});
setModalOpen(false);
} else {
if (
JSON.stringify(apiReturn.data?.frontend_node) !==
JSON.stringify({})
) {
setNodeClass!(apiReturn.data?.frontend_node, inputValue);
setModalOpen(closeModal);
setIsEdit(false);
setSuccessData({
title: "Prompt is ready",
});
}
setSuccessData({
title: "Prompt is ready",
});
}
} else {
setIsEdit(true);

View file

@ -578,4 +578,4 @@ def test_async_task_processing_vector_store(client, added_vector_store, created_
# Validate that the task completed successfully and the result is as expected
assert "result" in task_status_json, task_status_json
assert "output" in task_status_json["result"], task_status_json["result"]
assert "Langflow" in task_status_json["result"]["output"], task_status_json["result"]
assert "Langflow" in task_status_json["result"]["output"], task_status_json["result"]