Merge branch 'dev' into vectorstores/elasticsearch

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-01-06 16:23:42 -03:00 committed by GitHub
commit 0c592c8f06
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
23 changed files with 7574 additions and 14831 deletions

View file

@ -1,2 +1,7 @@
.venv/
**/aws
**/aws
# node_modules
**/node_modules/
dist/
**/build/
src/backend/langflow/frontend

View file

@ -47,3 +47,19 @@ jobs:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
poetry publish
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./build_and_push.Dockerfile
tags: logspace/langflow:${{ steps.check-version.outputs.version }}

View file

@ -45,3 +45,21 @@ jobs:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
poetry publish
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./build_and_push.Dockerfile
tags: |
logspace/langflow:${{ steps.check-version.outputs.version }}
logspace/langflow:latest

5
.gitignore vendored
View file

@ -17,6 +17,9 @@ qdrant_storage
.chroma
.ruff_cache
# PyCharm
.idea/
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
@ -256,4 +259,4 @@ langflow.db
/tmp/*
src/backend/langflow/frontend/
.docker
scratchpad*
scratchpad*

View file

@ -133,7 +133,7 @@ Follow our step-by-step guide to deploy Langflow on Google Cloud Platform (GCP)
Alternatively, click the **"Open in Cloud Shell"** button below to launch Google Cloud Shell, clone the Langflow repository, and start an **interactive tutorial** that will guide you through the process of setting up the necessary resources and deploying Langflow on your GCP project.
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial_spot.md)
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## Deploy on Railway

79
build_and_push.Dockerfile Normal file
View file

@ -0,0 +1,79 @@
# syntax=docker/dockerfile:1
# Keep this syntax directive! It's used to enable Docker BuildKit
# Based on https://github.com/python-poetry/poetry/discussions/1879?sort=top#discussioncomment-216865
# but I try to keep it updated (see history)
################################
# PYTHON-BASE
# Sets up all our shared environment variables
################################
FROM python:3.10-slim as python-base
# python
ENV PYTHONUNBUFFERED=1 \
# prevents python creating .pyc files
PYTHONDONTWRITEBYTECODE=1 \
\
# pip
PIP_DISABLE_PIP_VERSION_CHECK=on \
PIP_DEFAULT_TIMEOUT=100 \
\
# poetry
# https://python-poetry.org/docs/configuration/#using-environment-variables
POETRY_VERSION=1.7.1 \
# make poetry install to this location
POETRY_HOME="/opt/poetry" \
# make poetry create the virtual environment in the project's root
# it gets named `.venv`
POETRY_VIRTUALENVS_IN_PROJECT=true \
# do not ask any interactive question
POETRY_NO_INTERACTION=1 \
\
# paths
# this is where our requirements + virtual environment will live
PYSETUP_PATH="/opt/pysetup" \
VENV_PATH="/opt/pysetup/.venv"
# prepend poetry and venv to path
ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH"
################################
# BUILDER-BASE
# Used to build deps + create our virtual environment
################################
FROM python-base as builder-base
RUN
RUN apt-get update \
&& apt-get install --no-install-recommends -y \
# deps for installing poetry
curl \
# deps for building python deps
build-essential \
# npm
npm
# Now we need to copy the entire project into the image
WORKDIR /app
COPY pyproject.toml poetry.lock ./
COPY src ./src
COPY Makefile ./
COPY README.md ./
RUN curl -sSL https://install.python-poetry.org | python3 - && make build
# Final stage for the application
FROM python-base as final
# Copy virtual environment and built .tar.gz from builder base
COPY --from=builder-base /app/dist/*.tar.gz ./
# Install the package from the .tar.gz
RUN pip install *.tar.gz
WORKDIR /app
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]

20173
docs/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
{
"name": "docusaurus",
"name": "langflow-docs",
"version": "0.0.0",
"private": true,
"scripts": {
@ -36,8 +36,8 @@
"path-browserify": "^1.0.1",
"postcss": "^8.4.31",
"prism-react-renderer": "^1.3.5",
"react": "^17.0.2",
"react-dom": "^17.0.2",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-images": "^0.6.7",
"react-medium-image-zoom": "^5.1.6",
"react-player": "^2.12.0",

1028
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.6.3"
version = "0.6.4"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -28,15 +28,15 @@ langflow = "langflow.__main__:main"
python = ">=3.9,<3.11"
fastapi = "^0.104.0"
uvicorn = "^0.23.0"
fastapi = "^0.108.0"
uvicorn = "^0.25.0"
beautifulsoup4 = "^4.12.2"
google-search-results = "^2.4.1"
google-api-python-client = "^2.79.0"
typer = "^0.9.0"
gunicorn = "^21.2.0"
langchain = "~0.0.345"
openai = "^1.3.6"
openai = "^1.6.1"
pandas = "2.0.3"
chromadb = "^0.4.0"
huggingface-hub = { version = "^0.19.0", extras = ["inference"] }
@ -47,31 +47,31 @@ unstructured = "^0.11.0"
pypdf = "^3.17.0"
lxml = "^4.9.2"
pysrt = "^1.1.2"
fake-useragent = "^1.3.0"
fake-useragent = "^1.4.0"
docstring-parser = "^0.15"
psycopg2-binary = "^2.9.6"
pyarrow = "^14.0.0"
tiktoken = "~0.5.0"
wikipedia = "^1.4.0"
qdrant-client = "^1.4.0"
qdrant-client = "^1.7.0"
websockets = "^10.3"
weaviate-client = "^3.23.0"
weaviate-client = "^3.26.0"
jina = "*"
sentence-transformers = { version = "^2.2.2", optional = true }
ctransformers = { version = "^0.2.10", optional = true }
cohere = "^4.37.0"
cohere = "^4.39.0"
python-multipart = "^0.0.6"
sqlmodel = "^0.0.14"
faiss-cpu = "^1.7.4"
anthropic = "^0.7.0"
anthropic = "^0.8.0"
orjson = "3.9.3"
multiprocess = "^0.70.14"
cachetools = "^5.3.1"
types-cachetools = "^5.3.0.5"
platformdirs = "^4.1.0"
pinecone-client = "^2.2.2"
pymongo = "^4.5.0"
supabase = "^2.0.3"
pymongo = "^4.6.0"
supabase = "^2.3.0"
certifi = "^2023.11.17"
google-cloud-aiplatform = "^1.36.0"
psycopg = "^3.1.9"
@ -81,13 +81,13 @@ langchain-experimental = "*"
celery = { extras = ["redis"], version = "^5.3.6", optional = true }
redis = { version = "^4.6.0", optional = true }
flower = { version = "^2.0.0", optional = true }
alembic = "^1.12.0"
alembic = "^1.13.0"
passlib = "^1.7.4"
bcrypt = "4.0.1"
python-jose = "^3.3.0"
metaphor-python = "^0.1.11"
pydantic = "^2.0.0"
pydantic-settings = "^2.0.3"
pydantic = "^2.5.0"
pydantic-settings = "^2.1.0"
zep-python = "*"
pywin32 = { version = "^306", markers = "sys_platform == 'win32'" }
loguru = "^0.7.1"
@ -100,11 +100,13 @@ extract-msg = "^0.45.0"
jq = { version = "^1.6.0", markers = "sys_platform != 'win32'" }
boto3 = "^1.28.63"
numexpr = "^2.8.6"
qianfan = "0.0.5"
qianfan = "0.2.0"
pgvector = "^0.2.3"
pyautogen = "^0.2.0"
langchain-google-genai = "^0.0.2"
elasticsearch = "^8.11.1"
pytube = "^15.0.0"
llama-index = "^0.9.24"
[tool.poetry.group.dev.dependencies]
pytest-asyncio = "^0.23.1"

View file

@ -1,7 +1,8 @@
from typing import Optional
from langflow import CustomComponent
from langchain.llms.bedrock import Bedrock
from langchain.llms.base import BaseLLM
from langchain.llms.bedrock import Bedrock
from langflow import CustomComponent
class AmazonBedrockComponent(CustomComponent):
@ -27,18 +28,32 @@ class AmazonBedrockComponent(CustomComponent):
},
"credentials_profile_name": {"display_name": "Credentials Profile Name"},
"streaming": {"display_name": "Streaming", "field_type": "bool"},
"code": {"show": False},
"endpoint_url": {"display_name": "Endpoint URL"},
"region_name": {"display_name": "Region Name"},
"model_kwargs": {"display_name": "Model Kwargs"},
"cache": {"display_name": "Cache"},
"code": {"advanced": True},
}
def build(
self,
model_id: str = "anthropic.claude-instant-v1",
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
model_kwargs: Optional[dict] = None,
endpoint_url: Optional[str] = None,
streaming: bool = False,
cache: Optional[bool] = None,
) -> BaseLLM:
try:
output = Bedrock(
credentials_profile_name=credentials_profile_name,
model_id=model_id,
region_name=region_name,
model_kwargs=model_kwargs,
endpoint_url=endpoint_url,
streaming=streaming,
cache=cache,
) # type: ignore
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e

View file

@ -0,0 +1,252 @@
from typing import Any, Dict, List, Optional
# from langchain_community.chat_models import ChatOllama
from langchain.chat_models import ChatOllama
from langchain.chat_models.base import BaseChatModel
# from langchain.chat_models import ChatOllama
from langflow import CustomComponent
# whe When a callback component is added to Langflow, the comment must be uncommented.
# from langchain.callbacks.manager import CallbackManager
class ChatOllamaComponent(CustomComponent):
display_name = "ChatOllama"
description = "Local LLM for chat with Ollama."
def build_config(self) -> dict:
return {
"base_url": {
"display_name": "Base URL",
"info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
},
"model": {
"display_name": "Model Name",
"value": "llama2",
"info": "Refer to https://ollama.ai/library for more models.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.8,
"info": "Controls the creativity of model responses.",
},
"cache": {
"display_name": "Cache",
"field_type": "bool",
"info": "Enable or disable caching.",
"advanced": True,
"value": False,
},
### When a callback component is added to Langflow, the comment must be uncommented. ###
# "callback_manager": {
# "display_name": "Callback Manager",
# "info": "Optional callback manager for additional functionality.",
# "advanced": True,
# },
# "callbacks": {
# "display_name": "Callbacks",
# "info": "Callbacks to execute during model runtime.",
# "advanced": True,
# },
########################################################################################
"format": {
"display_name": "Format",
"field_type": "str",
"info": "Specify the format of the output (e.g., json).",
"advanced": True,
},
"metadata": {
"display_name": "Metadata",
"info": "Metadata to add to the run trace.",
"advanced": True,
},
"mirostat": {
"display_name": "Mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"value": "Disabled",
"advanced": True,
},
"mirostat_eta": {
"display_name": "Mirostat Eta",
"field_type": "float",
"info": "Learning rate for Mirostat algorithm. (Default: 0.1)",
"advanced": True,
},
"mirostat_tau": {
"display_name": "Mirostat Tau",
"field_type": "float",
"info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)",
"advanced": True,
},
"num_ctx": {
"display_name": "Context Window Size",
"field_type": "int",
"info": "Size of the context window for generating tokens. (Default: 2048)",
"advanced": True,
},
"num_gpu": {
"display_name": "Number of GPUs",
"field_type": "int",
"info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
"advanced": True,
},
"num_thread": {
"display_name": "Number of Threads",
"field_type": "int",
"info": "Number of threads to use during computation. (Default: detected for optimal performance)",
"advanced": True,
},
"repeat_last_n": {
"display_name": "Repeat Last N",
"field_type": "int",
"info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
"advanced": True,
},
"repeat_penalty": {
"display_name": "Repeat Penalty",
"field_type": "float",
"info": "Penalty for repetitions in generated text. (Default: 1.1)",
"advanced": True,
},
"tfs_z": {
"display_name": "TFS Z",
"field_type": "float",
"info": "Tail free sampling value. (Default: 1)",
"advanced": True,
},
"timeout": {
"display_name": "Timeout",
"field_type": "int",
"info": "Timeout for the request stream.",
"advanced": True,
},
"top_k": {
"display_name": "Top K",
"field_type": "int",
"info": "Limits token selection to top K. (Default: 40)",
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"field_type": "float",
"info": "Works together with top-k. (Default: 0.9)",
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"field_type": "bool",
"info": "Whether to print out response text.",
},
"tags": {
"display_name": "Tags",
"field_type": "list",
"info": "Tags to add to the run trace.",
"advanced": True,
},
"stop": {
"display_name": "Stop Tokens",
"field_type": "list",
"info": "List of tokens to signal the model to stop generating text.",
"advanced": True,
},
"system": {
"display_name": "System",
"field_type": "str",
"info": "System to use for generating text.",
"advanced": True,
},
"template": {
"display_name": "Template",
"field_type": "str",
"info": "Template to use for generating text.",
"advanced": True,
},
}
def build(
self,
base_url: Optional[str],
model: str,
mirostat: Optional[str],
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
### When a callback component is added to Langflow, the comment must be uncommented.###
# callback_manager: Optional[CallbackManager] = None,
# callbacks: Optional[List[Callbacks]] = None,
#######################################################################################
repeat_last_n: Optional[int] = None,
verbose: Optional[bool] = None,
cache: Optional[bool] = None,
num_ctx: Optional[int] = None,
num_gpu: Optional[int] = None,
format: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
num_thread: Optional[int] = None,
repeat_penalty: Optional[float] = None,
stop: Optional[List[str]] = None,
system: Optional[str] = None,
tags: Optional[List[str]] = None,
temperature: Optional[float] = None,
template: Optional[str] = None,
tfs_z: Optional[float] = None,
timeout: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[int] = None,
) -> BaseChatModel:
if not base_url:
base_url = "http://localhost:11434"
# Mapping mirostat settings to their corresponding values
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
# Default to 0 for 'Disabled'
mirostat_value = mirostat_options.get(mirostat, 0) # type: ignore
# Set mirostat_eta and mirostat_tau to None if mirostat is disabled
if mirostat_value == 0:
mirostat_eta = None
mirostat_tau = None
# Mapping system settings to their corresponding values
llm_params = {
"base_url": base_url,
"cache": cache,
"model": model,
"mirostat": mirostat_value,
"format": format,
"metadata": metadata,
"tags": tags,
## When a callback component is added to Langflow, the comment must be uncommented.##
# "callback_manager": callback_manager,
# "callbacks": callbacks,
#####################################################################################
"mirostat_eta": mirostat_eta,
"mirostat_tau": mirostat_tau,
"num_ctx": num_ctx,
"num_gpu": num_gpu,
"num_thread": num_thread,
"repeat_last_n": repeat_last_n,
"repeat_penalty": repeat_penalty,
"temperature": temperature,
"stop": stop,
"system": system,
"template": template,
"tfs_z": tfs_z,
"timeout": timeout,
"top_k": top_k,
"top_p": top_p,
"verbose": verbose,
}
# None Value remove
llm_params = {k: v for k, v in llm_params.items() if v is not None}
try:
output = ChatOllama(**llm_params) # type: ignore
except Exception as e:
raise ValueError("Could not initialize Ollama LLM.") from e
return output # type: ignore

View file

@ -0,0 +1,163 @@
from typing import Optional, List
from langchain.llms import Ollama
from langchain.llms.base import BaseLLM
from langflow import CustomComponent
class OllamaLLM(CustomComponent):
display_name = "Ollama"
description = "Local LLM with Ollama."
def build_config(self) -> dict:
return {
"base_url": {
"display_name": "Base URL",
"info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
},
"model": {
"display_name": "Model Name",
"value": "llama2",
"info": "Refer to https://ollama.ai/library for more models.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.8,
"info": "Controls the creativity of model responses.",
},
"mirostat": {
"display_name": "Mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"value": "Disabled",
"advanced": True,
},
"mirostat_eta": {
"display_name": "Mirostat Eta",
"field_type": "float",
"info": "Learning rate influencing the algorithm's response to feedback.",
"advanced": True,
},
"mirostat_tau": {
"display_name": "Mirostat Tau",
"field_type": "float",
"info": "Controls balance between coherence and diversity.",
"advanced": True,
},
"num_ctx": {
"display_name": "Context Window Size",
"field_type": "int",
"info": "Size of the context window for generating the next token.",
"advanced": True,
},
"num_gpu": {
"display_name": "Number of GPUs",
"field_type": "int",
"info": "Number of GPUs to use for computation.",
"advanced": True,
},
"num_thread": {
"display_name": "Number of Threads",
"field_type": "int",
"info": "Number of threads to use during computation.",
"advanced": True,
},
"repeat_last_n": {
"display_name": "Repeat Last N",
"field_type": "int",
"info": "Sets how far back the model looks to prevent repetition.",
"advanced": True,
},
"repeat_penalty": {
"display_name": "Repeat Penalty",
"field_type": "float",
"info": "Penalty for repetitions in generated text.",
"advanced": True,
},
"stop": {
"display_name": "Stop Tokens",
"info": "List of tokens to signal the model to stop generating text.",
"advanced": True,
},
"tfs_z": {
"display_name": "TFS Z",
"field_type": "float",
"info": "Tail free sampling to reduce impact of less probable tokens.",
"advanced": True,
},
"top_k": {
"display_name": "Top K",
"field_type": "int",
"info": "Limits token selection to top K for reducing nonsense generation.",
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"field_type": "int",
"info": "Works with top-k to control diversity of generated text.",
"advanced": True,
},
}
def build(
self,
base_url: Optional[str],
model: str,
temperature: Optional[float],
mirostat: Optional[str],
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
num_ctx: Optional[int] = None,
num_gpu: Optional[int] = None,
num_thread: Optional[int] = None,
repeat_last_n: Optional[int] = None,
repeat_penalty: Optional[float] = None,
stop: Optional[List[str]] = None,
tfs_z: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[int] = None,
) -> BaseLLM:
if not base_url:
base_url = "http://localhost:11434"
# Mapping mirostat settings to their corresponding values
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
# Default to 0 for 'Disabled'
mirostat_value = mirostat_options.get(mirostat, 0) # type: ignore
# Set mirostat_eta and mirostat_tau to None if mirostat is disabled
if mirostat_value == 0:
mirostat_eta = None
mirostat_tau = None
llm_params = {
"base_url": base_url,
"model": model,
"mirostat": mirostat_value,
"mirostat_eta": mirostat_eta,
"mirostat_tau": mirostat_tau,
"num_ctx": num_ctx,
"num_gpu": num_gpu,
"num_thread": num_thread,
"repeat_last_n": repeat_last_n,
"repeat_penalty": repeat_penalty,
"temperature": temperature,
"stop": stop,
"tfs_z": tfs_z,
"top_k": top_k,
"top_p": top_p,
}
# None Value remove
llm_params = {k: v for k, v in llm_params.items() if v is not None}
try:
llm = Ollama(**llm_params)
except Exception as e:
raise ValueError("Could not connect to Ollama.") from e
return llm

View file

@ -0,0 +1,70 @@
from typing import List
from langflow import CustomComponent
import json
from langchain.schema import BaseRetriever
from langchain.schema.vectorstore import VectorStore
from langchain.base_language import BaseLanguageModel
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
class VectaraSelfQueryRetriverComponent(CustomComponent):
"""
A custom component for implementing Vectara Self Query Retriever using a vector store.
"""
display_name: str = "Vectara Self Query Retriever for Vectara Vector Store"
description: str = "Implementation of Vectara Self Query Retriever"
documentation = (
"https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query"
)
beta = True
field_config = {
"code": {"show": True},
"vectorstore": {
"display_name": "Vector Store",
"info": "Input Vectara Vectore Store"
},
"llm": {
"display_name": "LLM",
"info": "For self query retriever"
},
"document_content_description":{
"display_name": "Document Content Description",
"info": "For self query retriever",
},
"metadata_field_info": {
"display_name": "Metadata Field Info",
"info": "Each metadata field info is a string in the form of key value pair dictionary containing additional search metadata.\nExample input: {\"name\":\"speech\",\"description\":\"what name of the speech\",\"type\":\"string or list[string]\"}.\nThe keys should remain constant(name, description, type)",
},
}
def build(
self,
vectorstore: VectorStore,
document_content_description: str,
llm: BaseLanguageModel,
metadata_field_info: List[str],
) -> BaseRetriever:
metadata_field_obj = []
for meta in metadata_field_info:
meta_obj = json.loads(meta)
if 'name' not in meta_obj or 'description' not in meta_obj or 'type' not in meta_obj :
raise Exception('Incorrect metadata field info format.')
attribute_info = AttributeInfo(
name = meta_obj['name'],
description = meta_obj['description'],
type = meta_obj['type'],
)
metadata_field_obj.append(attribute_info)
return SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_obj,
verbose=True
)

View file

@ -5,14 +5,15 @@ from typing import Any, Dict, List
import orjson
from fastapi import WebSocket, status
from loguru import logger
from starlette.websockets import WebSocketState
from langflow.api.v1.schemas import ChatMessage, ChatResponse, FileResponse
from langflow.interface.utils import pil_to_base64
from langflow.services import ServiceType, service_manager
from langflow.services.base import Service
from langflow.services.chat.cache import Subject
from langflow.services.chat.utils import process_graph
from loguru import logger
from starlette.websockets import WebSocketState
from .cache import cache_service
@ -117,7 +118,7 @@ class ChatService(Service):
if "after sending" in str(exc):
logger.error(f"Error closing connection: {exc}")
async def process_message(self, client_id: str, payload: Dict, langchain_object: Any):
async def process_message(self, client_id: str, payload: Dict, build_result: Any):
# Process the graph data and chat message
chat_inputs = payload.pop("inputs", {})
chatkey = payload.pop("chatKey", None)
@ -134,12 +135,12 @@ class ChatService(Service):
logger.debug("Generating result and thought")
result, intermediate_steps, raw_output = await process_graph(
langchain_object=langchain_object,
build_result=build_result,
chat_inputs=chat_inputs,
client_id=client_id,
session_id=self.connection_ids[client_id],
)
self.set_cache(client_id, langchain_object)
self.set_cache(client_id, build_result)
except Exception as e:
# Log stack trace
logger.exception(e)
@ -205,8 +206,8 @@ class ChatService(Service):
continue
with self.chat_cache.set_client_id(client_id):
if langchain_object := self.cache_service.get(client_id).get("result"):
await self.process_message(client_id, payload, langchain_object)
if build_result := self.cache_service.get(client_id).get("result"):
await self.process_message(client_id, payload, build_result)
else:
raise RuntimeError(f"Could not find a build result for client_id {client_id}")

View file

@ -1,20 +1,28 @@
from typing import Any
from langchain.agents import AgentExecutor
from langchain.chains.base import Chain
from langchain_core.runnables import Runnable
from loguru import logger
from langflow.api.v1.schemas import ChatMessage
from langflow.interface.utils import try_setting_streaming_options
from langflow.processing.base import get_result_and_steps
from langflow.utils.chat import ChatDefinition
LANGCHAIN_RUNNABLES = (Chain, Runnable, AgentExecutor)
async def process_graph(
langchain_object,
build_result,
chat_inputs: ChatMessage,
client_id: str,
session_id: str,
):
langchain_object = try_setting_streaming_options(langchain_object)
build_result = try_setting_streaming_options(build_result)
logger.debug("Loaded langchain object")
if langchain_object is None:
if build_result is None:
# Raise user facing error
raise ValueError("There was an error loading the langchain_object. Please, check all the nodes and try again.")
@ -25,15 +33,36 @@ async def process_graph(
chat_inputs.message = {}
logger.debug("Generating result and thought")
result, intermediate_steps, raw_output = await get_result_and_steps(
langchain_object,
chat_inputs.message,
client_id=client_id,
session_id=session_id,
)
if isinstance(build_result, LANGCHAIN_RUNNABLES):
result, intermediate_steps, raw_output = await get_result_and_steps(
build_result,
chat_inputs.message,
client_id=client_id,
session_id=session_id,
)
elif isinstance(build_result, ChatDefinition):
raw_output = await run_build_result(
build_result,
chat_inputs,
client_id=client_id,
session_id=session_id,
)
if isinstance(raw_output, dict):
if not build_result.output_key:
raise ValueError("No output key provided to ChatDefinition when returning a dict.")
result = raw_output[build_result.output_key]
else:
result = raw_output
intermediate_steps = []
else:
raise TypeError(f"Unknown type {type(build_result)}")
logger.debug("Generated result and intermediate_steps")
return result, intermediate_steps, raw_output
except Exception as e:
# Log stack trace
logger.exception(e)
raise e
async def run_build_result(build_result: Any, chat_inputs: ChatMessage, client_id: str, session_id: str):
return build_result(inputs=chat_inputs.message)

View file

@ -0,0 +1,34 @@
from typing import Any, Callable, Optional, Union
from langchain_core.prompts import PromptTemplate as LCPromptTemplate
from langflow.utils.prompt import GenericPromptTemplate
from llama_index.prompts import PromptTemplate as LIPromptTemplate
PromptTemplate = Union[LCPromptTemplate, LIPromptTemplate]
class ChatDefinition:
def __init__(
self,
func: Callable,
inputs: list[str],
output_key: Optional[str] = None,
prompt_template: Optional[PromptTemplate] = None,
):
self.func = func
self.input_keys = inputs
self.output_key = output_key
self.prompt_template = prompt_template
@classmethod
def from_prompt_template(cls, prompt_template: PromptTemplate, func: Callable, output_key: Optional[str] = None):
prompt = GenericPromptTemplate(prompt_template)
return cls(
func=func,
inputs=prompt.input_keys,
output_key=output_key,
prompt_template=prompt_template,
)
def __call__(self, inputs: dict, callbacks: Optional[Any] = None) -> dict:
return self.func(inputs, callbacks)

View file

@ -0,0 +1,58 @@
from typing import Any, Union
from langchain_core.prompts import PromptTemplate as LCPromptTemplate
from llama_index.prompts import PromptTemplate as LIPromptTemplate
PromptTemplateTypes = Union[LCPromptTemplate, LIPromptTemplate]
class GenericPromptTemplate:
def __init__(self, prompt_template: PromptTemplateTypes):
object.__setattr__(self, "prompt_template", prompt_template)
@property
def input_keys(self):
prompt_template = object.__getattribute__(self, "prompt_template")
if isinstance(prompt_template, LCPromptTemplate):
return prompt_template.input_variables
elif isinstance(prompt_template, LIPromptTemplate):
return prompt_template.template_vars
else:
raise TypeError(f"Unknown prompt template type {type(prompt_template)}")
def to_lc_prompt(self):
prompt_template = object.__getattribute__(self, "prompt_template")
if isinstance(prompt_template, LCPromptTemplate):
return prompt_template
elif isinstance(prompt_template, LIPromptTemplate):
return LCPromptTemplate.from_template(prompt_template.get_template())
else:
raise TypeError(f"Unknown prompt template type {type(prompt_template)}")
def to_li_prompt(self):
prompt_template = object.__getattribute__(self, "prompt_template")
if isinstance(prompt_template, LIPromptTemplate):
return prompt_template
elif isinstance(prompt_template, LCPromptTemplate):
return LIPromptTemplate(template=prompt_template.template)
else:
raise TypeError(f"Unknown prompt template type {type(prompt_template)}")
def __or__(self, other):
prompt_template = object.__getattribute__(self, "prompt_template")
if isinstance(prompt_template, LIPromptTemplate):
return self.to_lc_prompt() | other
else:
raise TypeError(f"Unknown prompt template type {type(other)}")
def __getattribute__(self, name: str) -> Any:
if name in {
"input_keys",
"to_lc_prompt",
"to_li_prompt",
"__or__",
"prompt_template",
}:
return object.__getattribute__(self, name)
prompt_template = object.__getattribute__(self, "prompt_template")
return getattr(prompt_template, name)

View file

@ -23,107 +23,119 @@ export default function Dropdown({
return (
<>
<Listbox
value={internalValue}
onChange={(value) => {
setInternalValue(value);
onSelect(value);
}}
>
{({ open }) => (
<>
<div className={"relative mt-1"}>
<Listbox.Button
data-test={`${id ?? ""}`}
className={
editNode
? "dropdown-component-outline"
: "dropdown-component-false-outline"
}
>
<span
className="dropdown-component-display"
data-testid={`${id ?? ""}-display`}
>
{internalValue}
</span>
<span className={"dropdown-component-arrow"}>
<IconComponent
name="ChevronsUpDown"
className="dropdown-component-arrow-color"
aria-hidden="true"
/>
</span>
</Listbox.Button>
<Transition
show={open}
as={Fragment}
leave="transition ease-in duration-100"
leaveFrom="opacity-100"
leaveTo="opacity-0"
>
<Listbox.Options
className={classNames(
editNode
? "dropdown-component-true-options nowheel custom-scroll"
: "dropdown-component-false-options nowheel custom-scroll",
apiModal ? "mb-2 w-[250px]" : "absolute w-full"
)}
>
{options.map((option, id) => (
<Listbox.Option
key={id}
className={({ active }) =>
classNames(
active ? " bg-accent" : "",
editNode
? "dropdown-component-false-option"
: "dropdown-component-true-option"
)
}
value={option}
{Object.keys(options)?.length > 0 ? (
<>
<Listbox
value={internalValue}
onChange={(value) => {
setInternalValue(value);
onSelect(value);
}}
>
{({ open }) => (
<>
<div className={"relative mt-1"}>
<Listbox.Button
data-test={`${id ?? ""}`}
className={
editNode
? "dropdown-component-outline"
: "dropdown-component-false-outline"
}
>
<span
className="dropdown-component-display"
data-testid={`${id ?? ""}-display`}
>
{({ selected, active }) => (
<>
<span
className={classNames(
selected ? "font-semibold" : "font-normal",
"block truncate "
)}
data-testid={`${option}-${id ?? ""}-option`}
>
{option}
</span>
{internalValue}
</span>
<span className={"dropdown-component-arrow"}>
<IconComponent
name="ChevronsUpDown"
className="dropdown-component-arrow-color"
aria-hidden="true"
/>
</span>
</Listbox.Button>
{selected ? (
<span
className={classNames(
active ? "text-background " : "",
"dropdown-component-choosal"
)}
>
<IconComponent
name="Check"
className={
active
? "dropdown-component-check-icon"
: "dropdown-component-check-icon"
}
aria-hidden="true"
/>
</span>
) : null}
</>
<Transition
show={open}
as={Fragment}
leave="transition ease-in duration-100"
leaveFrom="opacity-100"
leaveTo="opacity-0"
>
<Listbox.Options
className={classNames(
editNode
? "dropdown-component-true-options nowheel custom-scroll"
: "dropdown-component-false-options nowheel custom-scroll",
apiModal ? "mb-2 w-[250px]" : "absolute w-full"
)}
</Listbox.Option>
))}
</Listbox.Options>
</Transition>
</div>
</>
)}
</Listbox>
>
{options?.map((option, id) => (
<Listbox.Option
key={id}
className={({ active }) =>
classNames(
active ? " bg-accent" : "",
editNode
? "dropdown-component-false-option"
: "dropdown-component-true-option"
)
}
value={option}
>
{({ selected, active }) => (
<>
<span
className={classNames(
selected ? "font-semibold" : "font-normal",
"block truncate "
)}
data-testid={`${option}-${id ?? ""}-option`}
>
{option}
</span>
{selected ? (
<span
className={classNames(
active ? "text-background " : "",
"dropdown-component-choosal"
)}
>
<IconComponent
name="Check"
className={
active
? "dropdown-component-check-icon"
: "dropdown-component-check-icon"
}
aria-hidden="true"
/>
</span>
) : null}
</>
)}
</Listbox.Option>
))}
</Listbox.Options>
</Transition>
</div>
</>
)}
</Listbox>
</>
) : (
<>
<div>
<span className="text-sm italic">
No parameters are available for display.
</span>
</div>
</>
)}
</>
);
}

View file

@ -46,7 +46,14 @@ export default function ChatInput({
: "hidden"
}`,
}}
value={lockChat ? "Thinking..." : chatValue}
value={
lockChat
? "Thinking..."
: typeof chatValue === "object" &&
Object.keys(chatValue)?.length === 0
? "No chat input variables found. Click to run your flow."
: chatValue
}
onChange={(event): void => {
setChatValue(event.target.value);
}}

View file

@ -1,5 +1,5 @@
import Convert from "ansi-to-html";
import { useMemo, useState } from "react";
import { useState } from "react";
import ReactMarkdown from "react-markdown";
import rehypeMathjax from "rehype-mathjax";
import remarkGfm from "remark-gfm";
@ -74,77 +74,71 @@ export default function ChatMessage({
<div className="w-full">
<div className="w-full dark:text-white">
<div className="w-full">
{useMemo(
() =>
chat.message.toString() === "" && lockChat ? (
<IconComponent
name="MoreHorizontal"
className="h-8 w-8 animate-pulse"
/>
) : (
<ReactMarkdown
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[rehypeMathjax]}
className="markdown prose min-w-full text-primary word-break-break-word
{chat.message.toString() === "" && lockChat ? (
<IconComponent
name="MoreHorizontal"
className="h-8 w-8 animate-pulse"
/>
) : (
<ReactMarkdown
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[rehypeMathjax]}
className="markdown prose min-w-full text-primary word-break-break-word
dark:prose-invert"
components={{
pre({ node, ...props }) {
return <>{props.children}</>;
},
code: ({
node,
inline,
className,
children,
...props
}) => {
if (children.length) {
if (children[0] === "▍") {
return (
<span className="form-modal-markdown-span">
</span>
);
}
children[0] = (children[0] as string).replace(
"`▍`",
"▍"
);
}
const match = /language-(\w+)/.exec(
className || ""
components={{
pre({ node, ...props }) {
return <>{props.children}</>;
},
code: ({
node,
inline,
className,
children,
...props
}) => {
if (children.length) {
if (children[0] === "▍") {
return (
<span className="form-modal-markdown-span">
</span>
);
}
return !inline ? (
<CodeTabsComponent
isMessage
tabs={[
{
name: (match && match[1]) || "",
mode: (match && match[1]) || "",
image:
"https://curl.se/logo/curl-symbol-transparent.png",
language: (match && match[1]) || "",
code: String(children).replace(/\n$/, ""),
},
]}
activeTab={"0"}
setActiveTab={() => {}}
/>
) : (
<code className={className} {...props}>
{children}
</code>
);
},
}}
>
{chat.message.toString()}
</ReactMarkdown>
),
[chat.message, chat.message.toString()]
children[0] = (children[0] as string).replace(
"`▍`",
"▍"
);
}
const match = /language-(\w+)/.exec(className || "");
return !inline ? (
<CodeTabsComponent
isMessage
tabs={[
{
name: (match && match[1]) || "",
mode: (match && match[1]) || "",
image:
"https://curl.se/logo/curl-symbol-transparent.png",
language: (match && match[1]) || "",
code: String(children).replace(/\n$/, ""),
},
]}
activeTab={"0"}
setActiveTab={() => {}}
/>
) : (
<code className={className} {...props}>
{children}
</code>
);
},
}}
>
{chat.message.toString()}
</ReactMarkdown>
)}
</div>
{chat.files && (

View file

@ -146,13 +146,14 @@ export default function FormModal({
newChat[newChat.length - 1].message + str;
}
}
if (thought) {
if (thought && newChat[newChat.length - 1]?.thought) {
newChat[newChat.length - 1].thought = thought;
}
if (files) {
if (files && newChat[newChat.length - 1]?.files) {
newChat[newChat.length - 1].files = files;
}
if (prompt) {
if (prompt && newChat[newChat.length - 2]?.template) {
newChat[newChat.length - 2].template = prompt;
}
return newChat;

View file

@ -122,24 +122,21 @@ export default function GenericModal({
}
if (apiReturn.data) {
let inputVariables = apiReturn.data.input_variables ?? [];
if (
JSON.stringify(apiReturn.data?.frontend_node) !== JSON.stringify({})
) {
setNodeClass!(apiReturn.data?.frontend_node, inputValue);
setModalOpen(closeModal);
setIsEdit(false);
}
if (!inputVariables || inputVariables.length === 0) {
setIsEdit(true);
setNoticeData({
title: "Your template does not have any variables.",
});
setModalOpen(false);
} else {
if (
JSON.stringify(apiReturn.data?.frontend_node) !==
JSON.stringify({})
) {
setNodeClass!(apiReturn.data?.frontend_node, inputValue);
setModalOpen(closeModal);
setIsEdit(false);
setSuccessData({
title: "Prompt is ready",
});
}
setSuccessData({
title: "Prompt is ready",
});
}
} else {
setIsEdit(true);