Merge branch 'main' into task/update_langchain_semver

This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-04-27 16:57:22 -03:00 committed by GitHub
commit d78f8bea2c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
143 changed files with 12972 additions and 1374 deletions

28
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View file

@ -0,0 +1,28 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**Browser and Version**
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

View file

@ -6,7 +6,7 @@ on:
pull_request:
env:
POETRY_VERSION: "1.3.1"
POETRY_VERSION: "1.4.0"
jobs:
build:

View file

@ -10,7 +10,7 @@ on:
- "pyproject.toml"
env:
POETRY_VERSION: "1.3.1"
POETRY_VERSION: "1.4.0"
jobs:
if_release:

33
.github/workflows/test.yml vendored Normal file
View file

@ -0,0 +1,33 @@
name: test
on:
push:
branches: [main]
pull_request:
branches: [dev]
env:
POETRY_VERSION: "1.4.0"
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.10"
- "3.11"
steps:
- uses: actions/checkout@v3
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: "poetry"
- name: Install dependencies
run: poetry install
- name: Run unit tests
run: |
make test

9
.gitignore vendored
View file

@ -9,6 +9,11 @@ lerna-debug.log*
# Mac
.DS_Store
# VSCode
.vscode
.chroma
.ruff_cache
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
@ -233,5 +238,5 @@ venv.bak/
.dmypy.json
dmypy.json
# Poetry
.testenv/*
# Poetry
.testenv/*

View file

@ -1,6 +1,6 @@
# Contributing to LangFlow
Hello there! I appreciate your interest in contributing to LangFlow.
Hello there! We appreciate your interest in contributing to LangFlow.
As an open-source project in a rapidly developing field, we are extremely open
to contributions, whether it be in the form of a new feature, improved infra, or better documentation.

28
GCP_DEPLOYMENT.md Normal file
View file

@ -0,0 +1,28 @@
# Run Langflow from a New Google Cloud Project
This guide will help you set up a Langflow development VM in a Google Cloud Platform project using Google Cloud Shell.
> **Note**: When Cloud Shell opens, be sure to select **Trust repo**. Some `gcloud` commands might not run in an ephemeral Cloud Shell environment.
## Standard VM
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/genome21/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial.md)
This script sets up a Debian-based VM with the Langflow package, Nginx, and the necessary configurations to run the Langflow Dev environment.
<hr>
## Spot/Preemptible Instance
[![Open in Cloud Shell - Spot Instance](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/genome21/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial.md)
When running as a [spot (preemptible) instance](https://cloud.google.com/compute/docs/instances/preemptible), the code and VM will behave the same way as in a regular instance, executing the startup script to configure the environment, install necessary dependencies, and run the Langflow application. However, **due to the nature of spot instances, the VM may be terminated at any time if Google Cloud needs to reclaim the resources**. This makes spot instances suitable for fault-tolerant, stateless, or interruptible workloads that can handle unexpected terminations and restarts.
## Pricing (approximate)
> For a more accurate breakdown of costs, please use the [**GCP Pricing Calculator**](https://cloud.google.com/products/calculator)
<br>
| Component | Regular Cost (Hourly) | Regular Cost (Monthly) | Spot/Preemptible Cost (Hourly) | Spot/Preemptible Cost (Monthly) | Notes |
| -------------- | --------------------- | ---------------------- | ------------------------------ | ------------------------------- | ----- |
| 100 GB Disk | - | $10/month | - | $10/month | Disk cost remains the same for both regular and Spot/Preemptible VMs |
| VM (n1-standard-4) | $0.15/hr | ~$108/month | ~$0.04/hr | ~$29/month | The VM cost can be significantly reduced using a Spot/Preemptible instance |
| **Total** | **$0.15/hr** | **~$118/month** | **~$0.04/hr** | **~$39/month** | Total costs for running the VM and disk 24/7 for an entire month |

View file

@ -42,14 +42,13 @@ build:
dev:
make install_frontend
ifeq ($(build),1)
@echo 'Running docker compose up with build'
docker compose up --build
@echo 'Running docker compose up with build'
docker compose $(if $(debug),-f docker-compose.debug.yml) up --build
else
@echo 'Running docker compose up without build'
docker compose up
@echo 'Running docker compose up without build'
docker compose $(if $(debug),-f docker-compose.debug.yml) up
endif
publish:
make build
poetry publish

View file

@ -5,6 +5,7 @@
~ A User Interface For [LangChain](https://github.com/hwchase17/langchain) ~
<p>
<a href="https://huggingface.co/spaces/Logspace/LangFlow"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
<img alt="GitHub Contributors" src="https://img.shields.io/github/contributors/logspace-ai/langflow" />
<img alt="GitHub Last Commit" src="https://img.shields.io/github/last-commit/logspace-ai/langflow" />
<img alt="" src="https://img.shields.io/github/repo-size/logspace-ai/langflow" />
@ -19,14 +20,31 @@
LangFlow is a GUI for [LangChain](https://github.com/hwchase17/langchain), designed with [react-flow](https://github.com/wbkd/react-flow) to provide an effortless way to experiment and prototype flows with drag-and-drop components and a chat box.
## 📦 Installation
### <b>Locally</b>
You can install LangFlow from pip:
`pip install langflow`
```shell
pip install langflow
```
Next, run:
`langflow`
```shell
python -m langflow
```
or
```shell
langflow
```
### Deploy Langflow on Google Cloud Platform
Follow our step-by-step guide to deploy Langflow on Google Cloud Platform (GCP) using Google Cloud Shell. The guide is available in the [**Langflow in Google Cloud Platform**](GCP_DEPLOYMENT.md) document.
Alternatively, click the **"Open in Cloud Shell"** button below to launch Google Cloud Shell, clone the Langflow repository, and start an **interactive tutorial** that will guide you through the process of setting up the necessary resources and deploying Langflow on your GCP project.
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/genome21/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## 🎨 Creating Flows
@ -49,7 +67,7 @@ flow("Hey, have you heard of LangFlow?")
## 👋 Contributing
We welcome contributions from developers of all levels to our open-source project on GitHub. If you'd like to contribute, please check our contributing guidelines and help make LangFlow more accessible.
We welcome contributions from developers of all levels to our open-source project on GitHub. If you'd like to contribute, please check our [contributing guidelines](./CONTRIBUTING.md) and help make LangFlow more accessible.
[![Star History Chart](https://api.star-history.com/svg?repos=logspace-ai/langflow&type=Timeline)](https://star-history.com/#logspace-ai/langflow&Date)

View file

@ -3,7 +3,7 @@ FROM python:3.10-slim
WORKDIR /app
# Install Poetry
RUN apt-get update && apt-get install gcc curl -y
RUN apt-get update && apt-get install gcc g++ curl build-essential postgresql-server-dev-all -y
RUN curl -sSL https://install.python-poetry.org | python3 -
# # Add Poetry to PATH
ENV PATH="${PATH}:/root/.local/bin"
@ -15,4 +15,4 @@ COPY ./ ./
# Install dependencies
RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi
CMD ["uvicorn", "langflow.main:app", "--host", "0.0.0.0", "--port", "5003", "--reload"]
CMD ["uvicorn", "langflow.main:app", "--host", "0.0.0.0", "--port", "5003", "--reload", "log-level", "debug"]

28
docker-compose.debug.yml Normal file
View file

@ -0,0 +1,28 @@
version: '3.4'
services:
backend:
volumes:
- ./:/app
build:
context: ./
dockerfile: ./dev.Dockerfile
command: ["sh", "-c", "pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 -m uvicorn langflow.main:app --host 0.0.0.0 --port 7860 --reload"]
ports:
- 7860:7860
- 5678:5678
restart: on-failure
frontend:
build:
context: ./src/frontend
dockerfile: ./dev.Dockerfile
args:
- BACKEND_URL=http://backend:7860
ports:
- "3000:3000"
volumes:
- ./src/frontend/public:/home/node/app/public
- ./src/frontend/src:/home/node/app/src
- ./src/frontend/package.json:/home/node/app/package.json
restart: on-failure

2787
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.0.46"
version = "0.0.58"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -32,12 +32,34 @@ gunicorn = "^20.1.0"
langchain = "~0.0.113"
openai = "^0.27.2"
types-pyyaml = "^6.0.12.8"
dill = "^0.3.6"
pandas = "^1.5.3"
chromadb = "^0.3.21"
huggingface-hub = "^0.13.3"
rich = "^13.3.3"
llama-cpp-python = "0.1.23"
networkx = "^3.1"
unstructured = "^0.5.11"
pypdf = "^3.7.1"
lxml = "^4.9.2"
pysrt = "^1.1.2"
fake-useragent = "^1.1.3"
docstring-parser = "^0.15"
psycopg2-binary = "^2.9.6"
pyarrow = "^11.0.0"
[tool.poetry.group.dev.dependencies]
black = "^23.1.0"
ipykernel = "^6.21.2"
mypy = "^1.1.1"
ruff = "^0.0.254"
httpx = "^0.23.3"
pytest = "^7.2.2"
types-requests = "^2.28.11"
requests = "^2.28.0"
[tool.ruff]
line-length = 120
[build-system]
requires = ["poetry-core"]

View file

@ -0,0 +1,89 @@
# Set the VM, image, and networking configuration
VM_NAME="langflow-dev"
IMAGE_FAMILY="debian-11"
IMAGE_PROJECT="debian-cloud"
BOOT_DISK_SIZE="100GB"
ZONE="us-central1-a"
REGION="us-central1"
VPC_NAME="default"
SUBNET_NAME="default"
SUBNET_RANGE="10.128.0.0/20"
NAT_GATEWAY_NAME="nat-gateway"
CLOUD_ROUTER_NAME="nat-client"
# Set the GCP project's compute region
gcloud config set compute/region $REGION
# Check if the VPC exists, and create it if not
vpc_exists=$(gcloud compute networks list --filter="name=$VPC_NAME" --format="value(name)")
if [[ -z "$vpc_exists" ]]; then
gcloud compute networks create $VPC_NAME --subnet-mode=custom
fi
# Check if the subnet exists, and create it if not
subnet_exists=$(gcloud compute networks subnets list --filter="name=$SUBNET_NAME AND region=$REGION" --format="value(name)")
if [[ -z "$subnet_exists" ]]; then
gcloud compute networks subnets create $SUBNET_NAME --network=$VPC_NAME --region=$REGION --range=$SUBNET_RANGE
fi
# Create a firewall rule to allow TCP port 8080 for all instances in the VPC
firewall_8080_exists=$(gcloud compute firewall-rules list --filter="name=allow-tcp-8080" --format="value(name)")
if [[ -z "$firewall_8080_exists" ]]; then
gcloud compute firewall-rules create allow-tcp-8080 --network $VPC_NAME --allow tcp:8080 --source-ranges 0.0.0.0/0 --direction INGRESS
fi
# Create a firewall rule to allow IAP traffic
firewall_iap_exists=$(gcloud compute firewall-rules list --filter="name=allow-iap" --format="value(name)")
if [[ -z "$firewall_iap_exists" ]]; then
gcloud compute firewall-rules create allow-iap --network $VPC_NAME --allow tcp:80,tcp:443 --source-ranges 35.235.240.0/20 --direction INGRESS
fi
# Define the startup script as a multiline Bash here-doc
STARTUP_SCRIPT=$(cat <<'EOF'
#!/bin/bash
# Update and upgrade the system
apt -y update
apt -y upgrade
# Install Python 3 pip, Langflow, and Nginx
apt -y install python3-pip
pip install langflow
apt-get -y install nginx
# Configure Nginx for Langflow
touch /etc/nginx/sites-available/langflow-app
echo "server {
listen 0.0.0.0:8080;
location / {
proxy_pass http://127.0.0.1:7860;
proxy_set_header Host "\$host";
proxy_set_header X-Real-IP "\$remote_addr";
proxy_set_header X-Forwarded-For "\$proxy_add_x_forwarded_for";
}
}" >> /etc/nginx/sites-available/langflow-app
ln -s /etc/nginx/sites-available/langflow-app /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl restart nginx
langflow
EOF
)
# Create a temporary file to store the startup script
tempfile=$(mktemp)
echo "$STARTUP_SCRIPT" > $tempfile
# Create the VM instance with the specified configuration and startup script
gcloud compute instances create $VM_NAME \
--image-family $IMAGE_FAMILY \
--image-project $IMAGE_PROJECT \
--boot-disk-size $BOOT_DISK_SIZE \
--machine-type=n1-standard-4 \
--metadata-from-file startup-script=$tempfile \
--zone $ZONE \
--network $VPC_NAME \
--subnet $SUBNET_NAME
# Remove the temporary file after the VM is created
rm $tempfile

View file

@ -0,0 +1,90 @@
# Set the VM, image, and networking configuration
VM_NAME="langflow-dev"
IMAGE_FAMILY="debian-11"
IMAGE_PROJECT="debian-cloud"
BOOT_DISK_SIZE="100GB"
ZONE="us-central1-a"
REGION="us-central1"
VPC_NAME="default"
SUBNET_NAME="default"
SUBNET_RANGE="10.128.0.0/20"
NAT_GATEWAY_NAME="nat-gateway"
CLOUD_ROUTER_NAME="nat-client"
# Set the GCP project's compute region
gcloud config set compute/region $REGION
# Check if the VPC exists, and create it if not
vpc_exists=$(gcloud compute networks list --filter="name=$VPC_NAME" --format="value(name)")
if [[ -z "$vpc_exists" ]]; then
gcloud compute networks create $VPC_NAME --subnet-mode=custom
fi
# Check if the subnet exists, and create it if not
subnet_exists=$(gcloud compute networks subnets list --filter="name=$SUBNET_NAME AND region=$REGION" --format="value(name)")
if [[ -z "$subnet_exists" ]]; then
gcloud compute networks subnets create $SUBNET_NAME --network=$VPC_NAME --region=$REGION --range=$SUBNET_RANGE
fi
# Create a firewall rule to allow TCP port 8080 for all instances in the VPC
firewall_8080_exists=$(gcloud compute firewall-rules list --filter="name=allow-tcp-8080" --format="value(name)")
if [[ -z "$firewall_8080_exists" ]]; then
gcloud compute firewall-rules create allow-tcp-8080 --network $VPC_NAME --allow tcp:8080 --source-ranges 0.0.0.0/0 --direction INGRESS
fi
# Create a firewall rule to allow IAP traffic
firewall_iap_exists=$(gcloud compute firewall-rules list --filter="name=allow-iap" --format="value(name)")
if [[ -z "$firewall_iap_exists" ]]; then
gcloud compute firewall-rules create allow-iap --network $VPC_NAME --allow tcp:80,tcp:443 --source-ranges 35.235.240.0/20 --direction INGRESS
fi
# Define the startup script as a multiline Bash here-doc
STARTUP_SCRIPT=$(cat <<'EOF'
#!/bin/bash
# Update and upgrade the system
apt -y update
apt -y upgrade
# Install Python 3 pip, Langflow, and Nginx
apt -y install python3-pip
pip install langflow
apt-get -y install nginx
# Configure Nginx for Langflow
touch /etc/nginx/sites-available/langflow-app
echo "server {
listen 0.0.0.0:8080;
location / {
proxy_pass http://127.0.0.1:7860;
proxy_set_header Host "\$host";
proxy_set_header X-Real-IP "\$remote_addr";
proxy_set_header X-Forwarded-For "\$proxy_add_x_forwarded_for";
}
}" >> /etc/nginx/sites-available/langflow-app
ln -s /etc/nginx/sites-available/langflow-app /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl restart nginx
langflow
EOF
)
# Create a temporary file to store the startup script
tempfile=$(mktemp)
echo "$STARTUP_SCRIPT" > $tempfile
# Create the VM instance with the specified configuration and startup script
gcloud compute instances create $VM_NAME \
--image-family $IMAGE_FAMILY \
--image-project $IMAGE_PROJECT \
--boot-disk-size $BOOT_DISK_SIZE \
--machine-type=n1-standard-4 \
--metadata-from-file startup-script=$tempfile \
--zone $ZONE \
--network $VPC_NAME \
--subnet $SUBNET_NAME \
-preemptible
# Remove the temporary file after the VM is created
rm $tempfile

View file

@ -0,0 +1,86 @@
# Deploy Langflow on Google Cloud Platform
**Duration**: 45 minutes
**Author**: [Robert Wilkins III](https://www.linkedin.com/in/robertwilkinsiii)
## Introduction
In this tutorial, you will learn how to deploy Langflow on [Google Cloud Platform](https://cloud.google.com/) (GCP) using Google Cloud Shell.
This tutorial assumes you have a GCP account and basic knowledge of Google Cloud Shell. If you're not familiar with Cloud Shell, you can review the [Cloud Shell documentation](https://cloud.google.com/shell/docs).
## Set up your environment
Before you start, make sure you have the following prerequisites:
- A GCP account with the necessary permissions to create resources
- A project on GCP where you want to deploy Langflow
[**Select your GCP project**]<walkthrough-project-setup
billing="true"
apis="compute.googleapis.com,container.googleapis.com">
</walkthrough-project-setup>
In the next step, you'll configure the GCP environment and deploy Langflow.
## Configure the GCP environment and deploy Langflow
Run the deploy_langflow_gcp.sh script to configure the GCP environment and deploy Langflow:
```sh
gcloud config set project <walkthrough-project-id/>
bash ./deploy_langflow_gcp.sh
```
The script will:
1. Check if the required resources (VPC, subnet, firewall rules, and Cloud Router) exist and create them if needed
2. Create a startup script to install Python, Langflow, and Nginx
3. Create a Compute Engine VM instance with the specified configuration and startup script
4. Configure Nginx to serve Langflow on TCP port 8080
<walkthrough-pin-section-icon></walkthrough-pin-section-icon>
> The process may take approximately 30 minutes to complete. Rest assured that progress is being made, and you'll be able to proceed once the process is finished.
In the next step, you'll learn how to connect to the Langflow VM.
## Connect to the Langflow VM
To connect to your new Langflow VM, follow these steps:
1. Navigate to the [VM instances](https://console.cloud.google.com/compute/instances) page and click on the external IP for your VM. Make sure to use HTTP and set the port to 8080
<br>**or**
3. Run the following command to display the URL for your Langflow environment:
```bash
export LANGFLOW_IP=$(gcloud compute instances list --filter="NAME=langflow-dev" --format="value(EXTERNAL_IP)")
echo http://$LANGFLOW_IP:8080
```
4. Click on the Langflow URL in cloudshell to be greeted by the Langflow Dev environment
Congratulations! You have successfully deployed Langflow on Google Cloud Platform.
<walkthrough-conclusion-trophy></walkthrough-conclusion-trophy>
## Cleanup
If you want to remove the resources created during this tutorial, you can use the following commands:
```sql
gcloud compute instances delete langflow-dev --zone us-central1-a --quiet
```
The following network settings and services are used during this walkthrough. If you plan to continue using the project after the walkthrough, you may keep these configurations in place.
However, if you decide to remove them after completing the walkthrough, you can use the following gcloud commands:
<walkthrough-pin-section-icon></walkthrough-pin-section-icon>
> These commands will delete the firewall rules and network configurations created during the walkthrough. Make sure to run them only if you no longer need these settings.
```
gcloud compute firewall-rules delete allow-tcp-8080 --quiet
gcloud compute firewall-rules delete allow-iap --quiet
gcloud compute networks subnets delete default --region us-central1 --quiet
gcloud compute networks delete default --quiet
```

View file

@ -0,0 +1,83 @@
# Deploy Langflow on Google Cloud Platform
**Duration**: 45 minutes
**Author**: [Robert Wilkins III](https://www.linkedin.com/in/robertwilkinsiii)
## Introduction
In this tutorial, you will learn how to deploy Langflow on [Google Cloud Platform](https://cloud.google.com/) (GCP) using Google Cloud Shell.
This tutorial assumes you have a GCP account and basic knowledge of Google Cloud Shell. If you're not familiar with Cloud Shell, you can review the [Cloud Shell documentation](https://cloud.google.com/shell/docs).
## Set up your environment
Before you start, make sure you have the following prerequisites:
- A GCP account with the necessary permissions to create resources
- A project on GCP where you want to deploy Langflow
[**Select your GCP project**]<walkthrough-project-setup
billing="true"
apis="compute.googleapis.com,container.googleapis.com">
</walkthrough-project-setup>
In the next step, you'll configure the GCP environment and deploy Langflow.
## Configure the GCP environment and deploy Langflow
Run the deploy_langflow_gcp_spot.sh script to configure the GCP environment and deploy Langflow:
```sh
gcloud config set project <walkthrough-project-id/>
bash ./deploy_langflow_gcp.sh
```
The script will:
1. Check if the required resources (VPC, subnet, firewall rules, and Cloud Router) exist and create them if needed
2. Create a startup script to install Python, Langflow, and Nginx
3. Create a Compute Engine VM instance with the specified configuration and startup script
4. Configure Nginx to serve Langflow on TCP port 8080
> <walkthrough-pin-section-icon></walkthrough-pin-section-icon> The process may take approximately 30 minutes to complete. Rest assured that progress is being made, and you'll be able to proceed once the process is finished.
In the next step, you'll learn how to connect to the Langflow VM.
## Connect to the Langflow VM
To connect to your new Langflow VM, follow these steps:
1. Navigate to the [VM instances](https://console.cloud.google.com/compute/instances) page and click on the external IP for your VM. Make sure to use HTTP and set the port to 8080
<br>**or**
3. Run the following command to display the URL for your Langflow environment:
```bash
export LANGFLOW_IP=$(gcloud compute instances list --filter="NAME=langflow-dev" --format="value(EXTERNAL_IP)")
echo http://$LANGFLOW_IP:8080
```
4. Click on the Langflow URL in cloudshell to be greeted by the Langflow Dev environment
Congratulations! You have successfully deployed Langflow on Google Cloud Platform.
<walkthrough-conclusion-trophy></walkthrough-conclusion-trophy>
## Cleanup
If you want to remove the resources created during this tutorial, you can use the following commands:
```sql
gcloud compute instances delete langflow-dev --zone us-central1-a --quiet
```
The following network settings and services are used during this walkthrough. If you plan to continue using the project after the walkthrough, you may keep these configurations in place.
However, if you decide to remove them after completing the walkthrough, you can use the following gcloud commands:
> <walkthrough-pin-section-icon></walkthrough-pin-section-icon> These commands will delete the firewall rules and network configurations created during the walkthrough. Make sure to run them only if you no longer need these settings.
```
gcloud compute firewall-rules delete allow-tcp-8080 --quiet
gcloud compute firewall-rules delete allow-iap --quiet
gcloud compute networks subnets delete default --region us-central1 --quiet
gcloud compute networks delete default --quiet
```

View file

@ -1,4 +1,3 @@
import logging
import multiprocessing
import platform
from pathlib import Path
@ -7,8 +6,10 @@ import typer
from fastapi.staticfiles import StaticFiles
from langflow.main import create_app
from langflow.settings import settings
from langflow.utils.logger import configure
logger = logging.getLogger(__name__)
app = typer.Typer()
def get_number_of_workers(workers=None):
@ -17,9 +18,28 @@ def get_number_of_workers(workers=None):
return workers
def update_settings(config: str):
"""Update the settings from a config file."""
if config:
settings.update_from_yaml(config)
@app.command()
def serve(
host: str = "127.0.0.1", workers: int = 1, timeout: int = 60, port: int = 7860
host: str = typer.Option("127.0.0.1", help="Host to bind the server to."),
workers: int = typer.Option(1, help="Number of worker processes."),
timeout: int = typer.Option(60, help="Worker timeout in seconds."),
port: int = typer.Option(7860, help="Port to listen on."),
config: str = typer.Option("config.yaml", help="Path to the configuration file."),
log_level: str = typer.Option("info", help="Logging level."),
log_file: Path = typer.Option("logs/langflow.log", help="Path to the log file."),
):
"""
Run the Langflow server.
"""
configure(log_level=log_level, log_file=log_file)
update_settings(config)
app = create_app()
# get the directory of the current file
path = Path(__file__).parent
@ -39,10 +59,10 @@ def serve(
if platform.system() in ["Darwin", "Windows"]:
# Run using uvicorn on MacOS and Windows
# Windows doesn't support gunicorn
# MacOS requires a env variable to be set to use gunicorn
# MacOS requires an env variable to be set to use gunicorn
import uvicorn
uvicorn.run(app, host=host, port=port, log_level="info")
uvicorn.run(app, host=host, port=port, log_level=log_level)
else:
from langflow.server import LangflowApplication
@ -50,7 +70,7 @@ def serve(
def main():
typer.run(serve)
app()
if __name__ == "__main__":

View file

@ -0,0 +1,80 @@
from pydantic import BaseModel, validator
from langflow.graph.utils import extract_input_variables_from_prompt
class Code(BaseModel):
code: str
class Prompt(BaseModel):
template: str
# Build ValidationResponse class for {"imports": {"errors": []}, "function": {"errors": []}}
class CodeValidationResponse(BaseModel):
imports: dict
function: dict
@validator("imports")
def validate_imports(cls, v):
return v or {"errors": []}
@validator("function")
def validate_function(cls, v):
return v or {"errors": []}
class PromptValidationResponse(BaseModel):
input_variables: list
INVALID_CHARACTERS = {
" ",
",",
".",
":",
";",
"!",
"?",
"/",
"\\",
"(",
")",
"[",
"]",
"{",
"}",
}
def validate_prompt(template: str):
input_variables = extract_input_variables_from_prompt(template)
# Check if there are invalid characters in the input_variables
input_variables = check_input_variables(input_variables)
return PromptValidationResponse(input_variables=input_variables)
def check_input_variables(input_variables: list):
invalid_chars = []
fixed_variables = []
for variable in input_variables:
new_var = variable
for char in INVALID_CHARACTERS:
if char in variable:
invalid_chars.append(char)
new_var = new_var.replace(char, "")
fixed_variables.append(new_var)
if new_var != variable:
input_variables.remove(variable)
input_variables.append(new_var)
# If any of the input_variables is not in the fixed_variables, then it means that
# there are invalid characters in the input_variables
if any(var not in fixed_variables for var in input_variables):
raise ValueError(
f"Invalid input variables: {input_variables}. Please, use something like {fixed_variables} instead."
)
return input_variables

View file

@ -1,12 +1,14 @@
import logging
from typing import Any, Dict
from fastapi import APIRouter, HTTPException
from langflow.interface.run import process_data_graph
from langflow.interface.run import process_graph_cached
from langflow.interface.types import build_langchain_types_dict
# build router
router = APIRouter()
logger = logging.getLogger(__name__)
@router.get("/all")
@ -17,6 +19,8 @@ def get_all():
@router.post("/predict")
def get_load(data: Dict[str, Any]):
try:
return process_data_graph(data)
return process_graph_cached(data)
except Exception as e:
return HTTPException(status_code=500, detail=str(e))
# Log stack trace
logger.exception(e)
raise HTTPException(status_code=500, detail=str(e)) from e

View file

@ -1,58 +0,0 @@
from fastapi import APIRouter
from langflow.interface.listing import list_type
# build router
router = APIRouter(
prefix="/list",
tags=["list"],
)
@router.get("/")
def read_items():
"""List all components"""
return [
"chains",
"agents",
"prompts",
"llms",
"tools",
]
@router.get("/chains")
def list_chains():
"""List all chain types"""
return list_type("chains")
@router.get("/agents")
def list_agents():
"""List all agent types"""
# return list(agents.loading.AGENT_TO_CLASS.keys())
return list_type("agents")
@router.get("/prompts")
def list_prompts():
"""List all prompt types"""
return list_type("prompts")
@router.get("/llms")
def list_llms():
"""List all llm types"""
return list_type("llms")
@router.get("/memories")
def list_memories():
"""List all memory types"""
return list_type("memories")
@router.get("/tools")
def list_tools():
"""List all load tools"""
return list_type("tools")

View file

@ -1,63 +0,0 @@
from fastapi import APIRouter, HTTPException
from langflow.interface.signature import get_signature
# build router
router = APIRouter(
prefix="/signatures",
tags=["signatures"],
)
@router.get("/chain")
def get_chain(name: str):
"""Get the signature of a chain."""
try:
return get_signature(name, "chains")
except ValueError as exc:
raise HTTPException(status_code=404, detail="Chain not found") from exc
@router.get("/agent")
def get_agent(name: str):
"""Get the signature of an agent."""
try:
return get_signature(name, "agents")
except ValueError as exc:
raise HTTPException(status_code=404, detail="Agent not found") from exc
@router.get("/prompt")
def get_prompt(name: str):
"""Get the signature of a prompt."""
try:
return get_signature(name, "prompts")
except ValueError as exc:
raise HTTPException(status_code=404, detail="Prompt not found") from exc
@router.get("/llm")
def get_llm(name: str):
"""Get the signature of an llm."""
try:
return get_signature(name, "llms")
except ValueError as exc:
raise HTTPException(status_code=404, detail="LLM not found") from exc
@router.get("/memory")
def get_memory(name: str):
"""Get the signature of a memory."""
try:
return get_signature(name, "memories")
except ValueError as exc:
raise HTTPException(status_code=404, detail="Memory not found") from exc
@router.get("/tool")
def get_tool(name: str):
"""Get the signature of a tool."""
try:
return get_signature(name, "tools")
except ValueError as exc:
raise HTTPException(status_code=404, detail="Tool not found") from exc

View file

@ -0,0 +1,35 @@
from fastapi import APIRouter, HTTPException
from langflow.api.base import (
Code,
CodeValidationResponse,
Prompt,
PromptValidationResponse,
validate_prompt,
)
from langflow.utils.logger import logger
from langflow.utils.validate import validate_code
# build router
router = APIRouter(prefix="/validate", tags=["validate"])
@router.post("/code", status_code=200, response_model=CodeValidationResponse)
def post_validate_code(code: Code):
try:
errors = validate_code(code.code)
return CodeValidationResponse(
imports=errors.get("imports", {}),
function=errors.get("function", {}),
)
except Exception as e:
return HTTPException(status_code=500, detail=str(e))
@router.post("/prompt", status_code=200, response_model=PromptValidationResponse)
def post_validate_prompt(prompt: Prompt):
try:
return validate_prompt(prompt.template)
except Exception as e:
logger.exception(e)
raise HTTPException(status_code=500, detail=str(e)) from e

View file

149
src/backend/langflow/cache/utils.py vendored Normal file
View file

@ -0,0 +1,149 @@
import base64
import contextlib
import functools
import hashlib
import json
import os
import tempfile
from collections import OrderedDict
from pathlib import Path
import dill # type: ignore
def create_cache_folder(func):
def wrapper(*args, **kwargs):
# Get the destination folder
cache_path = Path(tempfile.gettempdir()) / PREFIX
# Create the destination folder if it doesn't exist
os.makedirs(cache_path, exist_ok=True)
return func(*args, **kwargs)
return wrapper
def memoize_dict(maxsize=128):
cache = OrderedDict()
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
hashed = compute_dict_hash(args[0])
key = (func.__name__, hashed, frozenset(kwargs.items()))
if key not in cache:
result = func(*args, **kwargs)
cache[key] = result
if len(cache) > maxsize:
cache.popitem(last=False)
else:
result = cache[key]
return result
def clear_cache():
cache.clear()
wrapper.clear_cache = clear_cache
return wrapper
return decorator
PREFIX = "langflow_cache"
@create_cache_folder
def clear_old_cache_files(max_cache_size: int = 3):
cache_dir = Path(tempfile.gettempdir()) / PREFIX
cache_files = list(cache_dir.glob("*.dill"))
if len(cache_files) > max_cache_size:
cache_files_sorted_by_mtime = sorted(
cache_files, key=lambda x: x.stat().st_mtime, reverse=True
)
for cache_file in cache_files_sorted_by_mtime[max_cache_size:]:
with contextlib.suppress(OSError):
os.remove(cache_file)
def compute_dict_hash(graph_data):
graph_data = filter_json(graph_data)
cleaned_graph_json = json.dumps(graph_data, sort_keys=True)
return hashlib.sha256(cleaned_graph_json.encode("utf-8")).hexdigest()
def filter_json(json_data):
filtered_data = json_data.copy()
# Remove 'viewport' and 'chatHistory' keys
if "viewport" in filtered_data:
del filtered_data["viewport"]
if "chatHistory" in filtered_data:
del filtered_data["chatHistory"]
# Filter nodes
if "nodes" in filtered_data:
for node in filtered_data["nodes"]:
if "position" in node:
del node["position"]
if "positionAbsolute" in node:
del node["positionAbsolute"]
if "selected" in node:
del node["selected"]
if "dragging" in node:
del node["dragging"]
return filtered_data
@create_cache_folder
def save_binary_file(content: str, file_name: str, accepted_types: list[str]) -> str:
"""
Save a binary file to the specified folder.
Args:
content: The content of the file as a bytes object.
file_name: The name of the file, including its extension.
Returns:
The path to the saved file.
"""
if not any(file_name.endswith(suffix) for suffix in accepted_types):
raise ValueError(f"File {file_name} is not accepted")
# Get the destination folder
cache_path = Path(tempfile.gettempdir()) / PREFIX
data = content.split(",")[1]
decoded_bytes = base64.b64decode(data)
# Create the full file path
file_path = os.path.join(cache_path, file_name)
# Save the binary content to the file
with open(file_path, "wb") as file:
file.write(decoded_bytes)
return file_path
@create_cache_folder
def save_cache(hash_val: str, chat_data, clean_old_cache_files: bool):
cache_path = Path(tempfile.gettempdir()) / PREFIX / f"{hash_val}.dill"
with cache_path.open("wb") as cache_file:
dill.dump(chat_data, cache_file)
if clean_old_cache_files:
clear_old_cache_files()
@create_cache_folder
def load_cache(hash_val):
cache_path = Path(tempfile.gettempdir()) / PREFIX / f"{hash_val}.dill"
if cache_path.exists():
with cache_path.open("rb") as cache_file:
return dill.load(cache_file)
return None

View file

@ -1,27 +1,128 @@
chains:
- LLMChain
- LLMMathChain
- LLMChecker
# - ConversationChain
- LLMCheckerChain
- ConversationChain
- SeriesCharacterChain
- MidJourneyPromptChain
- TimeTravelGuideChain
- SQLDatabaseChain
agents:
- ZeroShotAgent
- JsonAgent
- CSVAgent
- initialize_agent
- VectorStoreAgent
- VectorStoreRouterAgent
- SQLAgent
prompts:
- PromptTemplate
- FewShotPromptTemplate
- ZeroShotPrompt
# Wait more tests
# - ChatPromptTemplate
# - SystemMessagePromptTemplate
# - HumanMessagePromptTemplate
llms:
- OpenAI
- OpenAIChat
# - AzureOpenAI
- ChatOpenAI
- HuggingFaceHub
- LlamaCpp
tools:
- Search
- PAL-MATH
- Calculator
- Serper Search
- Tool
- PythonFunction
- JsonSpec
- News API
- TMDB API
- Podcast API
- QuerySQLDataBaseTool
- InfoSQLDatabaseTool
- ListSQLDatabaseTool
# - QueryCheckerTool
- BingSearchRun
- GoogleSearchRun
- GoogleSearchResults
- JsonListKeysTool
- JsonGetValueTool
- PythonREPLTool
- PythonAstREPLTool
- RequestsGetTool
- RequestsPostTool
- RequestsPatchTool
- RequestsPutTool
- RequestsDeleteTool
- WikipediaQueryRun
- WolframAlphaQueryRun
wrappers:
- RequestsWrapper
toolkits:
- OpenAPIToolkit
- JsonToolkit
- VectorStoreInfo
- VectorStoreRouterToolkit
memories:
# - ConversationBufferMemory
- ConversationBufferMemory
- ConversationSummaryMemory
- ConversationKGMemory
embeddings:
- OpenAIEmbeddings
vectorstores:
- Chroma
documentloaders:
- AirbyteJSONLoader
- CoNLLULoader
- CSVLoader
- UnstructuredEmailLoader
- EverNoteLoader
- FacebookChatLoader
- GutenbergLoader
- BSHTMLLoader
- UnstructuredHTMLLoader
# - UnstructuredImageLoader # Issue with Python 3.11 (https://github.com/Unstructured-IO/unstructured-inference/issues/83)
- UnstructuredMarkdownLoader
- PyPDFLoader
- UnstructuredPowerPointLoader
- SRTLoader
- TelegramChatLoader
- TextLoader
- UnstructuredWordDocumentLoader
- WebBaseLoader
- AZLyricsLoader
- CollegeConfidentialLoader
- HNLoader
- IFixitLoader
- IMSDbLoader
- GitbookLoader
- ReadTheDocsLoader
textsplitters:
- CharacterTextSplitter
utilities:
- BingSearchAPIWrapper
- GoogleSearchAPIWrapper
- GoogleSerperAPIWrapper
- SearxResults
- SearxSearchWrapper
- SerpAPIWrapper
- WikipediaAPIWrapper
- WolframAlphaAPIWrapper
# - ZapierNLAWrapper
- SQLDatabase
dev: false

View file

@ -1,42 +1,23 @@
from langchain.agents.mrkl import prompt
from langflow.template import nodes
# These should always be instantiated
CUSTOM_NODES = {
"prompts": {"ZeroShotPrompt": nodes.ZeroShotPromptNode()},
"tools": {"PythonFunction": nodes.PythonFunctionNode(), "Tool": nodes.ToolNode()},
"agents": {
"JsonAgent": nodes.JsonAgentNode(),
"CSVAgent": nodes.CSVAgentNode(),
"initialize_agent": nodes.InitializeAgentNode(),
"VectorStoreAgent": nodes.VectorStoreAgentNode(),
"VectorStoreRouterAgent": nodes.VectorStoreRouterAgentNode(),
"SQLAgent": nodes.SQLAgentNode(),
},
"utilities": {
"SQLDatabase": nodes.SQLDatabaseNode(),
},
}
def get_custom_prompts():
"""Get custom prompts."""
return {
"ZeroShotPrompt": {
"template": {
"_type": "zero_shot",
"prefix": {
"type": "str",
"required": False,
"placeholder": "",
"list": False,
"show": True,
"multiline": True,
"value": prompt.PREFIX,
},
"suffix": {
"type": "str",
"required": True,
"placeholder": "",
"list": False,
"show": True,
"multiline": True,
"value": prompt.SUFFIX,
},
"format_instructions": {
"type": "str",
"required": False,
"placeholder": "",
"list": False,
"show": True,
"multiline": True,
"value": prompt.FORMAT_INSTRUCTIONS,
},
},
"description": "Prompt template for Zero Shot Agent.",
"base_classes": ["BasePromptTemplate"],
}
}
def get_custom_nodes(node_type: str):
"""Get custom nodes."""
return CUSTOM_NODES.get(node_type, {})

View file

@ -0,0 +1,4 @@
from langflow.graph.base import Edge, Node
from langflow.graph.graph import Graph
__all__ = ["Graph", "Node", "Edge"]

View file

@ -0,0 +1,270 @@
# Description: Graph class for building a graph of nodes and edges
# Insights:
# - Defer prompts building to the last moment or when they have all the tools
# - Build each inner agent first, then build the outer agent
import contextlib
import types
import warnings
from copy import deepcopy
from typing import Any, Dict, List, Optional
from langflow.cache import utils as cache_utils
from langflow.graph.constants import DIRECT_TYPES
from langflow.interface import loading
from langflow.interface.listing import ALL_TYPES_DICT
from langflow.utils.logger import logger
class Node:
def __init__(self, data: Dict, base_type: Optional[str] = None) -> None:
self.id: str = data["id"]
self._data = data
self.edges: List[Edge] = []
self.base_type: Optional[str] = base_type
self._parse_data()
self._built_object = None
self._built = False
def _parse_data(self) -> None:
self.data = self._data["data"]
self.output = self.data["node"]["base_classes"]
template_dicts = {
key: value
for key, value in self.data["node"]["template"].items()
if isinstance(value, dict)
}
self.required_inputs = [
template_dicts[key]["type"]
for key, value in template_dicts.items()
if value["required"]
]
self.optional_inputs = [
template_dicts[key]["type"]
for key, value in template_dicts.items()
if not value["required"]
]
template_dict = self.data["node"]["template"]
self.node_type = (
self.data["type"] if "Tool" not in self.output else template_dict["_type"]
)
if self.base_type is None:
for base_type, value in ALL_TYPES_DICT.items():
if self.node_type in value:
self.base_type = base_type
break
def _build_params(self):
# Some params are required, some are optional
# but most importantly, some params are python base classes
# like str and others are LangChain objects like LLMChain, BasePromptTemplate
# so we need to be able to distinguish between the two
# The dicts with "type" == "str" are the ones that are python base classes
# and most likely have a "value" key
# So for each key besides "_type" in the template dict, we have a dict
# with a "type" key. If the type is not "str", then we need to get the
# edge that connects to that node and get the Node with the required data
# and use that as the value for the param
# If the type is "str", then we need to get the value of the "value" key
# and use that as the value for the param
template_dict = {
key: value
for key, value in self.data["node"]["template"].items()
if isinstance(value, dict)
}
params = {}
for key, value in template_dict.items():
if key == "_type":
continue
# If the type is not transformable to a python base class
# then we need to get the edge that connects to this node
if value.get("type") == "file":
# Load the type in value.get('suffixes') using
# what is inside value.get('content')
# value.get('value') is the file name
file_name = value.get("value")
content = value.get("content")
type_to_load = value.get("suffixes")
file_path = cache_utils.save_binary_file(
content=content, file_name=file_name, accepted_types=type_to_load
)
params[key] = file_path
elif value.get("type") not in DIRECT_TYPES:
# Get the edge that connects to this node
edges = [
edge
for edge in self.edges
if edge.target == self and edge.matched_type in value["type"]
]
# Get the output of the node that the edge connects to
# if the value['list'] is True, then there will be more
# than one time setting to params[key]
# so we need to append to a list if it exists
# or create a new list if it doesn't
if value["required"] and not edges:
# If a required parameter is not found, raise an error
raise ValueError(
f"Required input {key} for module {self.node_type} not found"
)
elif value["list"]:
# If this is a list parameter, append all sources to a list
params[key] = [edge.source for edge in edges]
elif edges:
# If a single parameter is found, use its source
params[key] = edges[0].source
elif value["required"] or value.get("value"):
# If value does not have value this still passes
# but then gives a keyError
# so we need to check if value has value
new_value = value.get("value")
if new_value is None:
warnings.warn(f"Value for {key} in {self.node_type} is None. ")
if value.get("type") == "int":
with contextlib.suppress(TypeError, ValueError):
new_value = int(new_value) # type: ignore
params[key] = new_value
# Add _type to params
self.params = params
def _build(self):
# The params dict is used to build the module
# it contains values and keys that point to nodes which
# have their own params dict
# When build is called, we iterate through the params dict
# and if the value is a node, we call build on that node
# and use the output of that build as the value for the param
# if the value is not a node, then we use the value as the param
# and continue
# Another aspect is that the node_type is the class that we need to import
# and instantiate with these built params
logger.debug(f"Building {self.node_type}")
# Build each node in the params dict
for key, value in self.params.copy().items():
# Check if Node or list of Nodes and not self
# to avoid recursion
if isinstance(value, Node):
if value == self:
del self.params[key]
continue
result = value.build()
# If the key is "func", then we need to use the run method
if key == "func" and not isinstance(result, types.FunctionType):
# func can be PythonFunction(code='\ndef upper_case(text: str) -> str:\n return text.upper()\n')
# so we need to check if there is an attribute called run
if hasattr(result, "run"):
result = result.run # type: ignore
elif hasattr(result, "get_function"):
result = result.get_function() # type: ignore
self.params[key] = result
elif isinstance(value, list) and all(
isinstance(node, Node) for node in value
):
self.params[key] = [node.build() for node in value] # type: ignore
# Get the class from LANGCHAIN_TYPES_DICT
# and instantiate it with the params
# and return the instance
try:
self._built_object = loading.instantiate_class(
node_type=self.node_type,
base_type=self.base_type,
params=self.params,
)
except Exception as exc:
raise ValueError(
f"Error building node {self.node_type}: {str(exc)}"
) from exc
if self._built_object is None:
raise ValueError(f"Node type {self.node_type} not found")
self._built = True
def build(self, force: bool = False) -> Any:
if not self._built or force:
self._build()
#! Deepcopy is breaking for vectorstores
if self.base_type in [
"vectorstores",
"VectorStoreRouterAgent",
"VectorStoreAgent",
"VectorStoreInfo",
] or self.node_type in [
"VectorStoreInfo",
"VectorStoreRouterToolkit",
"SQLDatabase",
]:
return self._built_object
return deepcopy(self._built_object)
def add_edge(self, edge: "Edge") -> None:
self.edges.append(edge)
def __repr__(self) -> str:
return f"Node(id={self.id}, data={self.data})"
def __eq__(self, __o: object) -> bool:
return self.id == __o.id if isinstance(__o, Node) else False
def __hash__(self) -> int:
return id(self)
class Edge:
def __init__(self, source: "Node", target: "Node"):
self.source: "Node" = source
self.target: "Node" = target
self.validate_edge()
def validate_edge(self) -> None:
# Validate that the outputs of the source node are valid inputs
# for the target node
self.source_types = self.source.output
self.target_reqs = self.target.required_inputs + self.target.optional_inputs
# Both lists contain strings and sometimes a string contains the value we are
# looking for e.g. comgin_out=["Chain"] and target_reqs=["LLMChain"]
# so we need to check if any of the strings in source_types is in target_reqs
self.valid = any(
output in target_req
for output in self.source_types
for target_req in self.target_reqs
)
# Get what type of input the target node is expecting
self.matched_type = next(
(
output
for output in self.source_types
for target_req in self.target_reqs
if output in target_req
),
None,
)
no_matched_type = self.matched_type is None
if no_matched_type:
logger.debug(self.source_types)
logger.debug(self.target_reqs)
if no_matched_type:
raise ValueError(
f"Edge between {self.source.node_type} and {self.target.node_type} "
f"has no matched type"
)
def __repr__(self) -> str:
return (
f"Edge(source={self.source.id}, target={self.target.id}, valid={self.valid}"
f", matched_type={self.matched_type})"
)

View file

@ -0,0 +1 @@
DIRECT_TYPES = ["str", "bool", "code", "int", "float", "Any", "prompt"]

View file

@ -0,0 +1,166 @@
from typing import Dict, List, Type, Union
from langflow.graph.base import Edge, Node
from langflow.graph.nodes import (
AgentNode,
ChainNode,
DocumentLoaderNode,
EmbeddingNode,
FileToolNode,
LLMNode,
MemoryNode,
PromptNode,
TextSplitterNode,
ToolkitNode,
ToolNode,
VectorStoreNode,
WrapperNode,
)
from langflow.interface.agents.base import agent_creator
from langflow.interface.chains.base import chain_creator
from langflow.interface.document_loaders.base import documentloader_creator
from langflow.interface.embeddings.base import embedding_creator
from langflow.interface.llms.base import llm_creator
from langflow.interface.memories.base import memory_creator
from langflow.interface.prompts.base import prompt_creator
from langflow.interface.text_splitters.base import textsplitter_creator
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.tools.constants import FILE_TOOLS
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
from langflow.utils import payload
class Graph:
def __init__(
self,
nodes: List[Dict[str, Union[str, Dict[str, Union[str, List[str]]]]]],
edges: List[Dict[str, str]],
) -> None:
self._nodes = nodes
self._edges = edges
self._build_graph()
def _build_graph(self) -> None:
self.nodes = self._build_nodes()
self.edges = self._build_edges()
for edge in self.edges:
edge.source.add_edge(edge)
edge.target.add_edge(edge)
# This is a hack to make sure that the LLM node is sent to
# the toolkit node
llm_node = None
for node in self.nodes:
node._build_params()
if isinstance(node, LLMNode):
llm_node = node
for node in self.nodes:
if isinstance(node, ToolkitNode):
node.params["llm"] = llm_node
# remove invalid nodes
self.nodes = [
node
for node in self.nodes
if self._validate_node(node)
or (len(self.nodes) == 1 and len(self.edges) == 0)
]
def _validate_node(self, node: Node) -> bool:
# All nodes that do not have edges are invalid
return len(node.edges) > 0
def get_node(self, node_id: str) -> Union[None, Node]:
return next((node for node in self.nodes if node.id == node_id), None)
def get_nodes_with_target(self, node: Node) -> List[Node]:
connected_nodes: List[Node] = [
edge.source for edge in self.edges if edge.target == node
]
return connected_nodes
def build(self) -> List[Node]:
# Get root node
root_node = payload.get_root_node(self)
if root_node is None:
raise ValueError("No root node found")
return root_node.build()
def get_node_neighbors(self, node: Node) -> Dict[Node, int]:
neighbors: Dict[Node, int] = {}
for edge in self.edges:
if edge.source == node:
neighbor = edge.target
if neighbor not in neighbors:
neighbors[neighbor] = 0
neighbors[neighbor] += 1
elif edge.target == node:
neighbor = edge.source
if neighbor not in neighbors:
neighbors[neighbor] = 0
neighbors[neighbor] += 1
return neighbors
def _build_edges(self) -> List[Edge]:
# Edge takes two nodes as arguments, so we need to build the nodes first
# and then build the edges
# if we can't find a node, we raise an error
edges: List[Edge] = []
for edge in self._edges:
source = self.get_node(edge["source"])
target = self.get_node(edge["target"])
if source is None:
raise ValueError(f"Source node {edge['source']} not found")
if target is None:
raise ValueError(f"Target node {edge['target']} not found")
edges.append(Edge(source, target))
return edges
def _get_node_class(self, node_type: str, node_lc_type: str) -> Type[Node]:
node_type_map: Dict[str, Type[Node]] = {
**{t: PromptNode for t in prompt_creator.to_list()},
**{t: AgentNode for t in agent_creator.to_list()},
**{t: ChainNode for t in chain_creator.to_list()},
**{t: ToolNode for t in tool_creator.to_list()},
**{t: ToolkitNode for t in toolkits_creator.to_list()},
**{t: WrapperNode for t in wrapper_creator.to_list()},
**{t: LLMNode for t in llm_creator.to_list()},
**{t: MemoryNode for t in memory_creator.to_list()},
**{t: EmbeddingNode for t in embedding_creator.to_list()},
**{t: VectorStoreNode for t in vectorstore_creator.to_list()},
**{t: DocumentLoaderNode for t in documentloader_creator.to_list()},
**{t: TextSplitterNode for t in textsplitter_creator.to_list()},
}
if node_type in FILE_TOOLS:
return FileToolNode
if node_type in node_type_map:
return node_type_map[node_type]
if node_lc_type in node_type_map:
return node_type_map[node_lc_type]
return Node
def _build_nodes(self) -> List[Node]:
nodes: List[Node] = []
for node in self._nodes:
node_data = node["data"]
node_type: str = node_data["type"] # type: ignore
node_lc_type: str = node_data["node"]["template"]["_type"] # type: ignore
NodeClass = self._get_node_class(node_type, node_lc_type)
nodes.append(NodeClass(node))
return nodes
def get_children_by_node_type(self, node: Node, node_type: str) -> List[Node]:
children = []
node_types = [node.data["type"]]
if "node" in node.data:
node_types += node.data["node"]["base_classes"]
if node_type in node_types:
children.append(node)
return children

View file

@ -0,0 +1,160 @@
from copy import deepcopy
from typing import Any, Dict, List, Optional, Union
from langflow.graph.base import Node
from langflow.graph.utils import extract_input_variables_from_prompt
class AgentNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="agents")
self.tools: List[ToolNode] = []
self.chains: List[ChainNode] = []
def _set_tools_and_chains(self) -> None:
for edge in self.edges:
source_node = edge.source
if isinstance(source_node, ToolNode):
self.tools.append(source_node)
elif isinstance(source_node, ChainNode):
self.chains.append(source_node)
def build(self, force: bool = False) -> Any:
if not self._built or force:
self._set_tools_and_chains()
# First, build the tools
for tool_node in self.tools:
tool_node.build()
# Next, build the chains and the rest
for chain_node in self.chains:
chain_node.build(tools=self.tools)
self._build()
#! Cannot deepcopy VectorStore, VectorStoreRouter, or SQL agents
if self.node_type in ["VectorStoreAgent", "VectorStoreRouterAgent", "SQLAgent"]:
return self._built_object
return deepcopy(self._built_object)
class ToolNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="tools")
class PromptNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="prompts")
def build(
self,
force: bool = False,
tools: Optional[Union[List[Node], List[ToolNode]]] = None,
) -> Any:
if not self._built or force:
if (
"input_variables" not in self.params
or self.params["input_variables"] is None
):
self.params["input_variables"] = []
# Check if it is a ZeroShotPrompt and needs a tool
if "ShotPrompt" in self.node_type:
tools = (
[tool_node.build() for tool_node in tools]
if tools is not None
else []
)
self.params["tools"] = tools
prompt_params = [
key
for key, value in self.params.items()
if isinstance(value, str) and key != "format_instructions"
]
else:
prompt_params = ["template"]
for param in prompt_params:
prompt_text = self.params[param]
variables = extract_input_variables_from_prompt(prompt_text)
self.params["input_variables"].extend(variables)
self.params["input_variables"] = list(set(self.params["input_variables"]))
self._build()
return deepcopy(self._built_object)
class ChainNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="chains")
def build(
self,
force: bool = False,
tools: Optional[Union[List[Node], List[ToolNode]]] = None,
) -> Any:
if not self._built or force:
# Check if the chain requires a PromptNode
for key, value in self.params.items():
if isinstance(value, PromptNode):
# Build the PromptNode, passing the tools if available
self.params[key] = value.build(tools=tools, force=force)
self._build()
#! Cannot deepcopy SQLDatabaseChain
if self.node_type in ["SQLDatabaseChain"]:
return self._built_object
return deepcopy(self._built_object)
class LLMNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="llms")
class ToolkitNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="toolkits")
class FileToolNode(ToolNode):
def __init__(self, data: Dict):
super().__init__(data)
class WrapperNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="wrappers")
def build(self, force: bool = False) -> Any:
if not self._built or force:
if "headers" in self.params:
self.params["headers"] = eval(self.params["headers"])
self._build()
return deepcopy(self._built_object)
class DocumentLoaderNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="documentloaders")
class EmbeddingNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="embeddings")
class VectorStoreNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="vectorstores")
class MemoryNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="memory")
class TextSplitterNode(Node):
def __init__(self, data: Dict):
super().__init__(data, base_type="textsplitters")

View file

@ -0,0 +1,19 @@
import re
def validate_prompt(prompt: str):
"""Validate prompt."""
if extract_input_variables_from_prompt(prompt):
return prompt
return fix_prompt(prompt)
def fix_prompt(prompt: str):
"""Fix prompt."""
return prompt + " {input}"
def extract_input_variables_from_prompt(prompt: str) -> list[str]:
"""Extract input variables from prompt."""
return re.findall(r"{(.*?)}", prompt)

View file

@ -0,0 +1,3 @@
from langflow.interface.agents.base import AgentCreator
__all__ = ["AgentCreator"]

View file

@ -0,0 +1,53 @@
from typing import Dict, List, Optional
from langchain.agents import loading
from langflow.custom.customs import get_custom_nodes
from langflow.interface.agents.custom import CUSTOM_AGENTS
from langflow.interface.base import LangChainTypeCreator
from langflow.settings import settings
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class AgentCreator(LangChainTypeCreator):
type_name: str = "agents"
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict = loading.AGENT_TO_CLASS
# Add JsonAgent to the list of agents
for name, agent in CUSTOM_AGENTS.items():
# TODO: validate AgentType
self.type_dict[name] = agent # type: ignore
return self.type_dict
def get_signature(self, name: str) -> Optional[Dict]:
try:
if name in get_custom_nodes(self.type_name).keys():
return get_custom_nodes(self.type_name)[name]
return build_template_from_class(
name, self.type_to_loader_dict, add_function=True
)
except ValueError as exc:
raise ValueError("Agent not found") from exc
except AttributeError as exc:
logger.error(f"Agent {name} not loaded: {exc}")
return None
# Now this is a generator
def to_list(self) -> List[str]:
names = []
for _, agent in self.type_to_loader_dict.items():
agent_name = (
agent.function_name()
if hasattr(agent, "function_name")
else agent.__name__
)
if agent_name in settings.agents or settings.dev:
names.append(agent_name)
return names
agent_creator = AgentCreator()

View file

@ -0,0 +1,306 @@
from typing import Any, List, Optional
from langchain import LLMChain
from langchain.agents import (
AgentExecutor,
Tool,
ZeroShotAgent,
initialize_agent,
)
from langchain.agents.agent_toolkits import (
SQLDatabaseToolkit,
VectorStoreInfo,
VectorStoreRouterToolkit,
VectorStoreToolkit,
)
from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
from langchain.agents.agent_toolkits.pandas.prompt import PREFIX as PANDAS_PREFIX
from langchain.agents.agent_toolkits.pandas.prompt import SUFFIX as PANDAS_SUFFIX
from langchain.agents.agent_toolkits.sql.prompt import SQL_PREFIX, SQL_SUFFIX
from langchain.agents.agent_toolkits.vectorstore.prompt import (
PREFIX as VECTORSTORE_PREFIX,
)
from langchain.agents.agent_toolkits.vectorstore.prompt import (
ROUTER_PREFIX as VECTORSTORE_ROUTER_PREFIX,
)
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS as SQL_FORMAT_INSTRUCTIONS
from langchain.llms.base import BaseLLM
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import BaseLanguageModel
from langchain.sql_database import SQLDatabase
from langchain.tools.python.tool import PythonAstREPLTool
from langchain.tools.sql_database.prompt import QUERY_CHECKER
class JsonAgent(AgentExecutor):
"""Json agent"""
@staticmethod
def function_name():
return "JsonAgent"
@classmethod
def initialize(cls, *args, **kwargs):
return cls.from_toolkit_and_llm(*args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_toolkit_and_llm(cls, toolkit: JsonToolkit, llm: BaseLanguageModel):
tools = toolkit.get_tools()
tool_names = [tool.name for tool in tools]
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=JSON_PREFIX,
suffix=JSON_SUFFIX,
format_instructions=FORMAT_INSTRUCTIONS,
input_variables=None,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
)
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
return cls.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
def run(self, *args, **kwargs):
return super().run(*args, **kwargs)
class CSVAgent(AgentExecutor):
"""CSV agent"""
@staticmethod
def function_name():
return "CSVAgent"
@classmethod
def initialize(cls, *args, **kwargs):
return cls.from_toolkit_and_llm(*args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_toolkit_and_llm(
cls,
path: str,
llm: BaseLanguageModel,
pandas_kwargs: Optional[dict] = None,
**kwargs: Any
):
import pandas as pd # type: ignore
_kwargs = pandas_kwargs or {}
df = pd.read_csv(path, **_kwargs)
tools = [PythonAstREPLTool(locals={"df": df})] # type: ignore
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=PANDAS_PREFIX,
suffix=PANDAS_SUFFIX,
input_variables=["df", "input", "agent_scratchpad"],
)
partial_prompt = prompt.partial(df=str(df.head()))
llm_chain = LLMChain(
llm=llm,
prompt=partial_prompt,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return cls.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
def run(self, *args, **kwargs):
return super().run(*args, **kwargs)
class VectorStoreAgent(AgentExecutor):
"""Vector Store agent"""
@staticmethod
def function_name():
return "VectorStoreAgent"
@classmethod
def initialize(cls, *args, **kwargs):
return cls.from_toolkit_and_llm(*args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_toolkit_and_llm(
cls, llm: BaseLLM, vectorstoreinfo: VectorStoreInfo, **kwargs: Any
):
"""Construct a vectorstore agent from an LLM and tools."""
toolkit = VectorStoreToolkit(vectorstore_info=vectorstoreinfo, llm=llm)
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=VECTORSTORE_PREFIX)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True
)
def run(self, *args, **kwargs):
return super().run(*args, **kwargs)
class SQLAgent(AgentExecutor):
"""SQL agent"""
@staticmethod
def function_name():
return "SQLAgent"
@classmethod
def initialize(cls, *args, **kwargs):
return cls.from_toolkit_and_llm(*args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_toolkit_and_llm(cls, llm: BaseLLM, database_uri: str, **kwargs: Any):
"""Construct a sql agent from an LLM and tools."""
db = SQLDatabase.from_uri(database_uri)
toolkit = SQLDatabaseToolkit(db=db)
# The right code should be this, but there is a problem with tools = toolkit.get_tools()
# related to `OPENAI_API_KEY`
# return create_sql_agent(llm=llm, toolkit=toolkit, verbose=True)
from langchain.prompts import PromptTemplate
from langchain.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QueryCheckerTool,
QuerySQLDataBaseTool,
)
llmchain = LLMChain(
llm=llm,
prompt=PromptTemplate(
template=QUERY_CHECKER, input_variables=["query", "dialect"]
),
)
tools = [
QuerySQLDataBaseTool(db=db), # type: ignore
InfoSQLDatabaseTool(db=db), # type: ignore
ListSQLDatabaseTool(db=db), # type: ignore
QueryCheckerTool(db=db, llm_chain=llmchain), # type: ignore
]
prefix = SQL_PREFIX.format(dialect=toolkit.dialect, top_k=10)
prompt = ZeroShotAgent.create_prompt(
tools=tools, # type: ignore
prefix=prefix,
suffix=SQL_SUFFIX,
format_instructions=SQL_FORMAT_INSTRUCTIONS,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
)
tool_names = [tool.name for tool in tools] # type: ignore
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools, # type: ignore
verbose=True,
max_iterations=15,
early_stopping_method="force",
)
def run(self, *args, **kwargs):
return super().run(*args, **kwargs)
class VectorStoreRouterAgent(AgentExecutor):
"""Vector Store Router Agent"""
@staticmethod
def function_name():
return "VectorStoreRouterAgent"
@classmethod
def initialize(cls, *args, **kwargs):
return cls.from_toolkit_and_llm(*args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_toolkit_and_llm(
cls,
llm: BaseLanguageModel,
vectorstoreroutertoolkit: VectorStoreRouterToolkit,
**kwargs: Any
):
"""Construct a vector store router agent from an LLM and tools."""
tools = vectorstoreroutertoolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=VECTORSTORE_ROUTER_PREFIX)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True
)
def run(self, *args, **kwargs):
return super().run(*args, **kwargs)
class InitializeAgent(AgentExecutor):
"""Implementation of initialize_agent function"""
@staticmethod
def function_name():
return "initialize_agent"
@classmethod
def initialize(
cls,
llm: BaseLLM,
tools: List[Tool],
agent: str,
memory: Optional[BaseChatMemory] = None,
):
return initialize_agent(
tools=tools,
llm=llm,
# LangChain now uses Enum for agent, but we still support string
agent=agent, # type: ignore
memory=memory,
return_intermediate_steps=True,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self, *args, **kwargs):
return super().run(*args, **kwargs)
CUSTOM_AGENTS = {
"JsonAgent": JsonAgent,
"CSVAgent": CSVAgent,
"initialize_agent": InitializeAgent,
"VectorStoreAgent": VectorStoreAgent,
"VectorStoreRouterAgent": VectorStoreRouterAgent,
"SQLAgent": SQLAgent,
}

View file

@ -0,0 +1,45 @@
from langchain import LLMChain
from langchain.agents import AgentExecutor, ZeroShotAgent
from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import BaseLanguageModel
class MalfoyAgent(AgentExecutor):
"""Json agent"""
prefix = "Malfoy: "
@classmethod
def initialize(cls, *args, **kwargs):
return cls.from_toolkit_and_llm(*args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_toolkit_and_llm(cls, toolkit: JsonToolkit, llm: BaseLanguageModel):
tools = toolkit.get_tools()
tool_names = [tool.name for tool in tools]
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=JSON_PREFIX,
suffix=JSON_SUFFIX,
format_instructions=FORMAT_INSTRUCTIONS,
input_variables=None,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
)
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
return cls.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
def run(self, *args, **kwargs):
return super().run(*args, **kwargs)
PREBUILT_AGENTS = {
"MalfoyAgent": MalfoyAgent,
}

View file

@ -0,0 +1,80 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel
from langflow.template.base import FrontendNode, Template, TemplateField
from langflow.utils.logger import logger
# Assuming necessary imports for Field, Template, and FrontendNode classes
class LangChainTypeCreator(BaseModel, ABC):
type_name: str
type_dict: Optional[Dict] = None
@property
def frontend_node_class(self) -> Type[FrontendNode]:
"""The class type of the FrontendNode created in frontend_node."""
return FrontendNode
@property
@abstractmethod
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
raise NotImplementedError
return self.type_dict
@abstractmethod
def get_signature(self, name: str) -> Union[Optional[Dict[Any, Any]], FrontendNode]:
pass
@abstractmethod
def to_list(self) -> List[str]:
pass
def to_dict(self) -> Dict:
result: Dict = {self.type_name: {}}
for name in self.to_list():
# frontend_node.to_dict() returns a dict with the following structure:
# {name: {template: {fields}, description: str}}
# so we should update the result dict
node = self.frontend_node(name)
if node is not None:
node = node.to_dict()
result[self.type_name].update(node)
return result
def frontend_node(self, name) -> Union[FrontendNode, None]:
signature = self.get_signature(name)
if signature is None:
logger.error(f"Node {name} not loaded")
return None
if isinstance(signature, FrontendNode):
return signature
fields = [
TemplateField(
name=key,
field_type=value["type"],
required=value.get("required", False),
placeholder=value.get("placeholder", ""),
is_list=value.get("list", False),
show=value.get("show", True),
multiline=value.get("multiline", False),
value=value.get("value", None),
suffixes=value.get("suffixes", []),
file_types=value.get("fileTypes", []),
content=value.get("content", None),
)
for key, value in signature["template"].items()
if key != "_type"
]
template = Template(type_name=name, fields=fields)
return self.frontend_node_class(
template=template,
description=signature.get("description", ""),
base_classes=signature["base_classes"],
name=name,
)

View file

@ -0,0 +1,3 @@
from langflow.interface.chains.base import ChainCreator
__all__ = ["ChainCreator"]

View file

@ -0,0 +1,54 @@
from typing import Dict, List, Optional, Type
from langflow.custom.customs import get_custom_nodes
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import chain_type_to_cls_dict
from langflow.settings import settings
from langflow.template.nodes import ChainFrontendNode
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
# Assuming necessary imports for Field, Template, and FrontendNode classes
class ChainCreator(LangChainTypeCreator):
type_name: str = "chains"
@property
def frontend_node_class(self) -> Type[ChainFrontendNode]:
return ChainFrontendNode
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict = chain_type_to_cls_dict
from langflow.interface.chains.custom import CUSTOM_CHAINS
self.type_dict.update(CUSTOM_CHAINS)
# Filter according to settings.chains
self.type_dict = {
name: chain
for name, chain in self.type_dict.items()
if name in settings.chains or settings.dev
}
return self.type_dict
def get_signature(self, name: str) -> Optional[Dict]:
try:
if name in get_custom_nodes(self.type_name).keys():
return get_custom_nodes(self.type_name)[name]
return build_template_from_class(name, self.type_to_loader_dict)
except ValueError as exc:
raise ValueError("Chain not found") from exc
except AttributeError as exc:
logger.error(f"Chain {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
custom_chains = list(get_custom_nodes("chains").keys())
default_chains = list(self.type_to_loader_dict.keys())
return default_chains + custom_chains
chain_creator = ChainCreator()

View file

@ -0,0 +1,101 @@
from typing import Dict, Optional, Type
from langchain.chains import ConversationChain
from langchain.memory.buffer import ConversationBufferMemory
from langchain.schema import BaseMemory
from pydantic import Field, root_validator
from langflow.graph.utils import extract_input_variables_from_prompt
DEFAULT_SUFFIX = """"
Current conversation:
{history}
Human: {input}
{ai_prefix}"""
class BaseCustomChain(ConversationChain):
"""BaseCustomChain is a chain you can use to have a conversation with a custom character."""
template: Optional[str]
ai_prefix_value: Optional[str]
"""Field to use as the ai_prefix. It needs to be set and has to be in the template"""
@root_validator(pre=False)
def build_template(cls, values):
format_dict = {}
input_variables = extract_input_variables_from_prompt(values["template"])
if values.get("ai_prefix_value", None) is None:
values["ai_prefix_value"] = values["memory"].ai_prefix
for key in input_variables:
new_value = values.get(key, f"{{{key}}}")
format_dict[key] = new_value
if key == values.get("ai_prefix_value", None):
values["memory"].ai_prefix = new_value
values["template"] = values["template"].format(**format_dict)
values["template"] = values["template"]
values["input_variables"] = extract_input_variables_from_prompt(
values["template"]
)
values["prompt"].template = values["template"]
values["prompt"].input_variables = values["input_variables"]
return values
class SeriesCharacterChain(BaseCustomChain):
"""SeriesCharacterChain is a chain you can use to have a conversation with a character from a series."""
character: str
series: str
template: Optional[
str
] = """I want you to act like {character} from {series}.
I want you to respond and answer like {character}. do not write any explanations. only answer like {character}.
You must know all of the knowledge of {character}.
Current conversation:
{history}
Human: {input}
{character}:"""
memory: BaseMemory = Field(default_factory=ConversationBufferMemory)
ai_prefix_value: Optional[str] = "character"
"""Default memory store."""
class MidJourneyPromptChain(BaseCustomChain):
"""MidJourneyPromptChain is a chain you can use to generate new MidJourney prompts."""
template: Optional[
str
] = """I want you to act as a prompt generator for Midjourney's artificial intelligence program.
Your job is to provide detailed and creative descriptions that will inspire unique and interesting images from the AI.
Keep in mind that the AI is capable of understanding a wide range of language and can interpret abstract concepts, so feel free to be as imaginative and descriptive as possible.
For example, you could describe a scene from a futuristic city, or a surreal landscape filled with strange creatures.
The more detailed and imaginative your description, the more interesting the resulting image will be. Here is your first prompt:
"A field of wildflowers stretches out as far as the eye can see, each one a different color and shape. In the distance, a massive tree towers over the landscape, its branches reaching up to the sky like tentacles.\"
Current conversation:
{history}
Human: {input}
AI:""" # noqa: E501
class TimeTravelGuideChain(BaseCustomChain):
template: Optional[
str
] = """I want you to act as my time travel guide. You are helpful and creative. I will provide you with the historical period or future time I want to visit and you will suggest the best events, sights, or people to experience. Provide the suggestions and any necessary information.
Current conversation:
{history}
Human: {input}
AI:""" # noqa: E501
CUSTOM_CHAINS: Dict[str, Type[ConversationChain]] = {
"SeriesCharacterChain": SeriesCharacterChain,
"MidJourneyPromptChain": MidJourneyPromptChain,
"TimeTravelGuideChain": TimeTravelGuideChain,
}

View file

@ -1,43 +1,86 @@
## LLM
import inspect
from typing import Any
from langchain import llms
from langchain.llms.openai import OpenAIChat
from langchain import (
chains,
document_loaders,
embeddings,
llms,
memory,
requests,
text_splitter,
utilities,
vectorstores,
)
from langchain.agents import agent_toolkits
from langchain.chat_models import ChatOpenAI
from langchain.sql_database import SQLDatabase
from langflow.interface.importing.utils import import_class
## LLMs
llm_type_to_cls_dict = llms.type_to_cls_dict
llm_type_to_cls_dict["openai-chat"] = OpenAIChat
llm_type_to_cls_dict["openai-chat"] = ChatOpenAI # type: ignore
## Memory
# from langchain.memory.buffer_window import ConversationBufferWindowMemory
# from langchain.memory.chat_memory import ChatMessageHistory
# from langchain.memory.combined import CombinedMemory
# from langchain.memory.entity import ConversationEntityMemory
# from langchain.memory.kg import ConversationKGMemory
# from langchain.memory.readonly import ReadOnlySharedMemory
# from langchain.memory.simple import SimpleMemory
# from langchain.memory.summary import ConversationSummaryMemory
# from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
memory_type_to_cls_dict: dict[str, Any] = {
# "CombinedMemory": CombinedMemory,
# "ConversationBufferWindowMemory": ConversationBufferWindowMemory,
# "ConversationBufferMemory": ConversationBufferMemory,
# "SimpleMemory": SimpleMemory,
# "ConversationSummaryBufferMemory": ConversationSummaryBufferMemory,
# "ConversationKGMemory": ConversationKGMemory,
# "ConversationEntityMemory": ConversationEntityMemory,
# "ConversationSummaryMemory": ConversationSummaryMemory,
# "ChatMessageHistory": ChatMessageHistory,
# "ConversationStringBufferMemory": ConversationStringBufferMemory,
# "ReadOnlySharedMemory": ReadOnlySharedMemory,
## Chains
chain_type_to_cls_dict: dict[str, Any] = {
chain_name: import_class(f"langchain.chains.{chain_name}")
for chain_name in chains.__all__
}
## Toolkits
toolkit_type_to_loader_dict: dict[str, Any] = {
toolkit_name: import_class(f"langchain.agents.agent_toolkits.{toolkit_name}")
# if toolkit_name is lower case it is a loader
for toolkit_name in agent_toolkits.__all__
if toolkit_name.islower()
}
## Chain
# from langchain.chains.loading import type_to_loader_dict
# from langchain.chains.conversation.base import ConversationChain
toolkit_type_to_cls_dict: dict[str, Any] = {
toolkit_name: import_class(f"langchain.agents.agent_toolkits.{toolkit_name}")
# if toolkit_name is not lower case it is a class
for toolkit_name in agent_toolkits.__all__
if not toolkit_name.islower()
}
# chain_type_to_cls_dict = type_to_loader_dict
# chain_type_to_cls_dict["conversation_chain"] = ConversationChain
## Memories
memory_type_to_cls_dict: dict[str, Any] = {
memory_name: import_class(f"langchain.memory.{memory_name}")
for memory_name in memory.__all__
}
## Wrappers
wrapper_type_to_cls_dict: dict[str, Any] = {
wrapper.__name__: wrapper for wrapper in [requests.RequestsWrapper]
}
## Embeddings
embedding_type_to_cls_dict: dict[str, Any] = {
embedding_name: import_class(f"langchain.embeddings.{embedding_name}")
for embedding_name in embeddings.__all__
}
## Vector Stores
vectorstores_type_to_cls_dict: dict[str, Any] = {
vectorstore_name: import_class(f"langchain.vectorstores.{vectorstore_name}")
for vectorstore_name in vectorstores.__all__
}
## Document Loaders
documentloaders_type_to_cls_dict: dict[str, Any] = {
documentloader_name: import_class(
f"langchain.document_loaders.{documentloader_name}"
)
for documentloader_name in document_loaders.__all__
}
## Text Splitters
textsplitter_type_to_cls_dict: dict[str, Any] = dict(
inspect.getmembers(text_splitter, inspect.isclass)
)
## Utilities
utility_type_to_cls_dict: dict[str, Any] = dict(
inspect.getmembers(utilities, inspect.isclass)
)
utility_type_to_cls_dict["SQLDatabase"] = SQLDatabase

View file

@ -0,0 +1,146 @@
from typing import Dict, List, Optional
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import documentloaders_type_to_cls_dict
from langflow.settings import settings
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
def build_file_path_template(
suffixes: list, fileTypes: list, name: str = "file_path"
) -> Dict:
"""Build a file path template for a document loader."""
return {
"type": "file",
"required": True,
"show": True,
"name": name,
"value": "",
"suffixes": suffixes,
"fileTypes": fileTypes,
}
class DocumentLoaderCreator(LangChainTypeCreator):
type_name: str = "documentloaders"
@property
def type_to_loader_dict(self) -> Dict:
return documentloaders_type_to_cls_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a document loader."""
try:
signature = build_template_from_class(
name, documentloaders_type_to_cls_dict
)
file_path_templates = {
"AirbyteJSONLoader": build_file_path_template(
suffixes=[".json"], fileTypes=["json"]
),
"CoNLLULoader": build_file_path_template(
suffixes=[".csv"], fileTypes=["csv"]
),
"CSVLoader": build_file_path_template(
suffixes=[".csv"], fileTypes=["csv"]
),
"UnstructuredEmailLoader": build_file_path_template(
suffixes=[".eml"], fileTypes=["eml"]
),
"EverNoteLoader": build_file_path_template(
suffixes=[".xml"], fileTypes=["xml"]
),
"FacebookChatLoader": build_file_path_template(
suffixes=[".json"], fileTypes=["json"]
),
"GutenbergLoader": build_file_path_template(
suffixes=[".txt"], fileTypes=["txt"]
),
"BSHTMLLoader": build_file_path_template(
suffixes=[".html"], fileTypes=["html"]
),
"UnstructuredHTMLLoader": build_file_path_template(
suffixes=[".html"], fileTypes=["html"]
),
"UnstructuredImageLoader": build_file_path_template(
suffixes=[".jpg", ".jpeg", ".png", ".gif", ".bmp"],
fileTypes=["jpg", "jpeg", "png", "gif", "bmp"],
),
"UnstructuredMarkdownLoader": build_file_path_template(
suffixes=[".md"], fileTypes=["md"]
),
"PyPDFLoader": build_file_path_template(
suffixes=[".pdf"], fileTypes=["pdf"]
),
"UnstructuredPowerPointLoader": build_file_path_template(
suffixes=[".pptx", ".ppt"], fileTypes=["pptx", "ppt"]
),
"SRTLoader": build_file_path_template(
suffixes=[".srt"], fileTypes=["srt"]
),
"TelegramChatLoader": build_file_path_template(
suffixes=[".json"], fileTypes=["json"]
),
"TextLoader": build_file_path_template(
suffixes=[".txt"], fileTypes=["txt"]
),
"UnstructuredWordDocumentLoader": build_file_path_template(
suffixes=[".docx", ".doc"], fileTypes=["docx", "doc"]
),
}
if name in file_path_templates:
signature["template"]["file_path"] = file_path_templates[name]
elif name in {
"WebBaseLoader",
"AZLyricsLoader",
"CollegeConfidentialLoader",
"HNLoader",
"IFixitLoader",
"IMSDbLoader",
}:
signature["template"]["web_path"] = {
"type": "str",
"required": True,
"show": True,
"name": "web_path",
"value": "",
"display_name": "Web Page",
}
elif name in {"GitbookLoader"}:
signature["template"]["web_page"] = {
"type": "str",
"required": True,
"show": True,
"name": "web_page",
"value": "",
"display_name": "Web Page",
}
elif name in {"ReadTheDocsLoader"}:
signature["template"]["path"] = {
"type": "str",
"required": True,
"show": True,
"name": "path",
"value": "",
"display_name": "Web Page",
}
return signature
except ValueError as exc:
raise ValueError(f"Documment Loader {name} not found") from exc
except AttributeError as exc:
logger.error(f"Documment Loader {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return [
documentloader.__name__
for documentloader in self.type_to_loader_dict.values()
if documentloader.__name__ in settings.documentloaders or settings.dev
]
documentloader_creator = DocumentLoaderCreator()

View file

@ -0,0 +1,36 @@
from typing import Dict, List, Optional
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import embedding_type_to_cls_dict
from langflow.settings import settings
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class EmbeddingCreator(LangChainTypeCreator):
type_name: str = "embeddings"
@property
def type_to_loader_dict(self) -> Dict:
return embedding_type_to_cls_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of an embedding."""
try:
return build_template_from_class(name, embedding_type_to_cls_dict)
except ValueError as exc:
raise ValueError(f"Embedding {name} not found") from exc
except AttributeError as exc:
logger.error(f"Embedding {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return [
embedding.__name__
for embedding in self.type_to_loader_dict.values()
if embedding.__name__ in settings.embeddings or settings.dev
]
embedding_creator = EmbeddingCreator()

View file

@ -0,0 +1,7 @@
from langflow.interface.importing.utils import import_by_type # noqa: F401
# This module is used to import any langchain class by name.
ALL = [
"import_by_type",
]

View file

@ -0,0 +1,147 @@
# This module is used to import any langchain class by name.
import importlib
from typing import Any, Type
from langchain import PromptTemplate
from langchain.agents import Agent
from langchain.chains.base import Chain
from langchain.chat_models.base import BaseChatModel
from langchain.llms.base import BaseLLM
from langchain.tools import BaseTool
from langflow.interface.tools.base import tool_creator
def import_module(module_path: str) -> Any:
"""Import module from module path"""
if "from" not in module_path:
# Import the module using the module path
return importlib.import_module(module_path)
# Split the module path into its components
_, module_path, _, object_name = module_path.split()
# Import the module using the module path
module = importlib.import_module(module_path)
return getattr(module, object_name)
def import_by_type(_type: str, name: str) -> Any:
"""Import class by type and name"""
if _type is None:
raise ValueError(f"Type cannot be None. Check if {name} is in the config file.")
func_dict = {
"agents": import_agent,
"prompts": import_prompt,
"llms": {"llm": import_llm, "chat": import_chat_llm},
"tools": import_tool,
"chains": import_chain,
"toolkits": import_toolkit,
"wrappers": import_wrapper,
"memory": import_memory,
"embeddings": import_embedding,
"vectorstores": import_vectorstore,
"documentloaders": import_documentloader,
"textsplitters": import_textsplitter,
"utilities": import_utility,
}
if _type == "llms":
key = "chat" if "chat" in name.lower() else "llm"
loaded_func = func_dict[_type][key] # type: ignore
else:
loaded_func = func_dict[_type]
return loaded_func(name)
def import_chat_llm(llm: str) -> BaseChatModel:
"""Import chat llm from llm name"""
return import_class(f"langchain.chat_models.{llm}")
def import_memory(memory: str) -> Any:
"""Import memory from memory name"""
return import_module(f"from langchain.memory import {memory}")
def import_class(class_path: str) -> Any:
"""Import class from class path"""
module_path, class_name = class_path.rsplit(".", 1)
module = import_module(module_path)
return getattr(module, class_name)
def import_prompt(prompt: str) -> Type[PromptTemplate]:
from langflow.interface.prompts.custom import CUSTOM_PROMPTS
"""Import prompt from prompt name"""
if prompt == "ZeroShotPrompt":
return import_class("langchain.prompts.PromptTemplate")
elif prompt in CUSTOM_PROMPTS:
return CUSTOM_PROMPTS[prompt]
return import_class(f"langchain.prompts.{prompt}")
def import_wrapper(wrapper: str) -> Any:
"""Import wrapper from wrapper name"""
return import_module(f"from langchain.requests import {wrapper}")
def import_toolkit(toolkit: str) -> Any:
"""Import toolkit from toolkit name"""
return import_module(f"from langchain.agents.agent_toolkits import {toolkit}")
def import_agent(agent: str) -> Agent:
"""Import agent from agent name"""
# check for custom agent
return import_class(f"langchain.agents.{agent}")
def import_llm(llm: str) -> BaseLLM:
"""Import llm from llm name"""
return import_class(f"langchain.llms.{llm}")
def import_tool(tool: str) -> BaseTool:
"""Import tool from tool name"""
return tool_creator.type_to_loader_dict[tool]["fcn"]
def import_chain(chain: str) -> Type[Chain]:
"""Import chain from chain name"""
from langflow.interface.chains.custom import CUSTOM_CHAINS
if chain in CUSTOM_CHAINS:
return CUSTOM_CHAINS[chain]
return import_class(f"langchain.chains.{chain}")
def import_embedding(embedding: str) -> Any:
"""Import embedding from embedding name"""
return import_class(f"langchain.embeddings.{embedding}")
def import_vectorstore(vectorstore: str) -> Any:
"""Import vectorstore from vectorstore name"""
return import_class(f"langchain.vectorstores.{vectorstore}")
def import_documentloader(documentloader: str) -> Any:
"""Import documentloader from documentloader name"""
return import_class(f"langchain.document_loaders.{documentloader}")
def import_textsplitter(textsplitter: str) -> Any:
"""Import textsplitter from textsplitter name"""
return import_class(f"langchain.text_splitter.{textsplitter}")
def import_utility(utility: str) -> Any:
"""Import utility from utility name"""
if utility == "SQLDatabase":
return import_class(f"langchain.sql_database.{utility}")
return import_class(f"langchain.utilities.{utility}")

View file

@ -1,82 +1,41 @@
from langchain import agents, chains, prompts
from langchain.agents.load_tools import get_all_tool_names
from langflow.custom import customs
from langflow.interface.custom_lists import (
llm_type_to_cls_dict,
memory_type_to_cls_dict,
)
from langflow.settings import settings
from langflow.utils import util
from langflow.interface.agents.base import agent_creator
from langflow.interface.chains.base import chain_creator
from langflow.interface.document_loaders.base import documentloader_creator
from langflow.interface.embeddings.base import embedding_creator
from langflow.interface.llms.base import llm_creator
from langflow.interface.memories.base import memory_creator
from langflow.interface.prompts.base import prompt_creator
from langflow.interface.text_splitters.base import textsplitter_creator
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.utilities.base import utility_creator
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
def list_type(object_type: str):
"""List all components"""
def get_type_dict():
return {
"chains": list_chain_types,
"agents": list_agents,
"prompts": list_prompts,
"llms": list_llms,
"memories": list_memories,
"tools": list_tools,
}.get(object_type, lambda: "Invalid type")()
"agents": agent_creator.to_list(),
"prompts": prompt_creator.to_list(),
"llms": llm_creator.to_list(),
"tools": tool_creator.to_list(),
"chains": chain_creator.to_list(),
"memory": memory_creator.to_list(),
"toolkits": toolkits_creator.to_list(),
"wrappers": wrapper_creator.to_list(),
"documentLoaders": documentloader_creator.to_list(),
"vectorStore": vectorstore_creator.to_list(),
"embeddings": embedding_creator.to_list(),
"textSplitters": textsplitter_creator.to_list(),
"utilities": utility_creator.to_list(),
}
def list_agents():
"""List all agent types"""
return [
agent.__name__
for agent in agents.loading.AGENT_TO_CLASS.values()
if agent.__name__ in settings.agents or settings.dev
]
LANGCHAIN_TYPES_DICT = get_type_dict()
# Now we'll build a dict with Langchain types and ours
def list_prompts():
"""List all prompt types"""
custom_prompts = customs.get_custom_prompts()
library_prompts = [
prompt.__annotations__["return"].__name__
for prompt in prompts.loading.type_to_loader_dict.values()
if prompt.__annotations__["return"].__name__ in settings.prompts or settings.dev
]
return library_prompts + list(custom_prompts.keys())
def list_tools():
"""List all load tools"""
tools = []
for tool in get_all_tool_names():
tool_params = util.get_tool_params(util.get_tools_dict(tool))
if tool_params and tool_params["name"] in settings.tools or settings.dev:
tools.append(tool_params["name"])
return tools
def list_llms():
"""List all llm types"""
return [
llm.__name__
for llm in llm_type_to_cls_dict.values()
if llm.__name__ in settings.llms or settings.dev
]
def list_chain_types():
"""List all chain types"""
return [
chain.__annotations__["return"].__name__
for chain in chains.loading.type_to_loader_dict.values()
if chain.__annotations__["return"].__name__ in settings.chains or settings.dev
]
def list_memories():
"""List all memory types"""
return [
memory.__name__
for memory in memory_type_to_cls_dict.values()
if memory.__name__ in settings.memories or settings.dev
]
ALL_TYPES_DICT = {
**LANGCHAIN_TYPES_DICT,
"Custom": ["Custom Tool", "Python Function"],
}

View file

@ -0,0 +1,3 @@
from langflow.interface.llms.base import LLMCreator
__all__ = ["LLMCreator"]

View file

@ -0,0 +1,43 @@
from typing import Dict, List, Optional, Type
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import llm_type_to_cls_dict
from langflow.settings import settings
from langflow.template.nodes import LLMFrontendNode
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class LLMCreator(LangChainTypeCreator):
type_name: str = "llms"
@property
def frontend_node_class(self) -> Type[LLMFrontendNode]:
return LLMFrontendNode
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict = llm_type_to_cls_dict
return self.type_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of an llm."""
try:
return build_template_from_class(name, llm_type_to_cls_dict)
except ValueError as exc:
raise ValueError("LLM not found") from exc
except AttributeError as exc:
logger.error(f"LLM {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return [
llm.__name__
for llm in self.type_to_loader_dict.values()
if llm.__name__ in settings.llms or settings.dev
]
llm_creator = LLMCreator()

View file

@ -1,7 +1,10 @@
import json
from typing import Any, Dict, Optional
from typing import Any, Callable, Dict, Optional
from langchain.agents import ZeroShotAgent
from langchain.agents import agent as agent_module
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.agents.load_tools import (
_BASE_TOOLS,
_EXTRA_LLM_TOOLS,
@ -15,29 +18,95 @@ from langchain.chains.loading import load_chain_from_config
from langchain.llms.base import BaseLLM
from langchain.llms.loading import load_llm_from_config
from langflow.interface.agents.custom import CUSTOM_AGENTS
from langflow.interface.importing.utils import import_by_type
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.types import get_type_list
from langflow.utils import payload, util
from langflow.interface.utils import load_file_into_dict
from langflow.utils import util, validate
def load_flow_from_json(path: str):
def instantiate_class(node_type: str, base_type: str, params: Dict) -> Any:
"""Instantiate class from module type and key, and params"""
if node_type in CUSTOM_AGENTS:
if custom_agent := CUSTOM_AGENTS.get(node_type):
return custom_agent.initialize(**params) # type: ignore
class_object = import_by_type(_type=base_type, name=node_type)
if base_type == "agents":
# We need to initialize it differently
return load_agent_executor(class_object, params)
elif base_type == "prompts":
if node_type == "ZeroShotPrompt":
if "tools" not in params:
params["tools"] = []
return ZeroShotAgent.create_prompt(**params)
elif base_type == "tools":
if node_type == "JsonSpec":
params["dict_"] = load_file_into_dict(params.pop("path"))
return class_object(**params)
elif node_type == "PythonFunction":
# If the node_type is "PythonFunction"
# we need to get the function from the params
# which will be a str containing a python function
# and then we need to compile it and return the function
# as the instance
function_string = params["code"]
if isinstance(function_string, str):
return validate.eval_function(function_string)
raise ValueError("Function should be a string")
elif node_type.lower() == "tool":
return class_object(**params)
elif base_type == "toolkits":
loaded_toolkit = class_object(**params)
# Check if node_type has a loader
if toolkits_creator.has_create_function(node_type):
return load_toolkits_executor(node_type, loaded_toolkit, params)
return loaded_toolkit
elif base_type == "embeddings":
params.pop("model")
return class_object(**params)
elif base_type == "vectorstores":
if len(params.get("documents", [])) == 0:
# Error when the pdf or other source was not correctly
# loaded.
raise ValueError(
"The source you provided did not load correctly or was empty."
"This may cause an error in the vectorstore."
)
return class_object.from_documents(**params)
elif base_type == "documentloaders":
return class_object(**params).load()
elif base_type == "textsplitters":
documents = params.pop("documents")
text_splitter = class_object(**params)
return text_splitter.split_documents(documents)
elif base_type == "utilities":
if node_type == "SQLDatabase":
return class_object.from_uri(params.pop("uri"))
return class_object(**params)
def load_flow_from_json(path: str, build=True):
# This is done to avoid circular imports
from langflow.graph import Graph
"""Load flow from json file"""
with open(path, "r") as f:
with open(path, "r", encoding="utf-8") as f:
flow_graph = json.load(f)
data_graph = flow_graph["data"]
extracted_json = extract_json(data_graph)
return load_langchain_type_from_config(config=extracted_json)
def extract_json(data_graph):
nodes = data_graph["nodes"]
# Substitute ZeroShotPrompt with PromptTemplate
nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
# nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
# Add input variables
nodes = payload.extract_input_variables(nodes)
# nodes = payload.extract_input_variables(nodes)
# Nodes, edges and root node
edges = data_graph["edges"]
root = payload.get_root_node(nodes, edges)
return payload.build_json(root, nodes, edges)
graph = Graph(nodes, edges)
return graph.build() if build else graph
def replace_zero_shot_prompt_with_prompt_template(nodes):
@ -92,6 +161,25 @@ def load_agent_executor_from_config(
)
def load_agent_executor(agent_class: type[agent_module.Agent], params, **kwargs):
"""Load agent executor from agent class, tools and chain"""
allowed_tools = params["allowed_tools"]
llm_chain = params["llm_chain"]
tool_names = [tool.name for tool in allowed_tools]
agent = agent_class(allowed_tools=tool_names, llm_chain=llm_chain)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=allowed_tools,
**kwargs,
)
def load_toolkits_executor(node_type: str, toolkit: BaseToolkit, params: dict):
create_function: Callable = toolkits_creator.get_create_function(node_type)
if llm := params.get("llm"):
return create_function(llm=llm, toolkit=toolkit)
def load_tools_from_config(tool_list: list[dict]) -> list:
"""Load tools based on a config list.

View file

@ -0,0 +1,3 @@
from langflow.interface.memories.base import MemoryCreator
__all__ = ["MemoryCreator"]

View file

@ -0,0 +1,44 @@
from typing import Dict, List, Optional, Type
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import memory_type_to_cls_dict
from langflow.settings import settings
from langflow.template.base import FrontendNode
from langflow.template.nodes import MemoryFrontendNode
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class MemoryCreator(LangChainTypeCreator):
type_name: str = "memories"
@property
def frontend_node_class(self) -> Type[FrontendNode]:
"""The class type of the FrontendNode created in frontend_node."""
return MemoryFrontendNode
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict = memory_type_to_cls_dict
return self.type_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a memory."""
try:
return build_template_from_class(name, memory_type_to_cls_dict)
except ValueError as exc:
raise ValueError("Memory not found") from exc
except AttributeError as exc:
logger.error(f"Memory {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return [
memory.__name__
for memory in self.type_to_loader_dict.values()
if memory.__name__ in settings.memories or settings.dev
]
memory_creator = MemoryCreator()

View file

@ -0,0 +1,3 @@
from langflow.interface.prompts.base import PromptCreator
__all__ = ["PromptCreator"]

View file

@ -0,0 +1,64 @@
from typing import Dict, List, Optional, Type
from langchain import prompts
from langflow.custom.customs import get_custom_nodes
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.importing.utils import import_class
from langflow.settings import settings
from langflow.template.nodes import PromptFrontendNode
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class PromptCreator(LangChainTypeCreator):
type_name: str = "prompts"
@property
def frontend_node_class(self) -> Type[PromptFrontendNode]:
return PromptFrontendNode
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict = {
prompt_name: import_class(f"langchain.prompts.{prompt_name}")
# if prompt_name is not lower case it is a class
for prompt_name in prompts.__all__
}
# Merge CUSTOM_PROMPTS into self.type_dict
from langflow.interface.prompts.custom import CUSTOM_PROMPTS
self.type_dict.update(CUSTOM_PROMPTS)
# Now filter according to settings.prompts
self.type_dict = {
name: prompt
for name, prompt in self.type_dict.items()
if name in settings.prompts or settings.dev
}
return self.type_dict
def get_signature(self, name: str) -> Optional[Dict]:
try:
if name in get_custom_nodes(self.type_name).keys():
return get_custom_nodes(self.type_name)[name]
return build_template_from_class(name, self.type_to_loader_dict)
except ValueError as exc:
# raise ValueError("Prompt not found") from exc
logger.error(f"Prompt {name} not found: {exc}")
except AttributeError as exc:
logger.error(f"Prompt {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
custom_prompts = get_custom_nodes("prompts")
# library_prompts = [
# prompt.__annotations__["return"].__name__
# for prompt in self.type_to_loader_dict.values()
# if prompt.__annotations__["return"].__name__ in settings.prompts
# or settings.dev
# ]
return list(self.type_to_loader_dict.keys()) + list(custom_prompts.keys())
prompt_creator = PromptCreator()

View file

@ -0,0 +1,77 @@
from typing import Dict, List, Optional, Type
from langchain.prompts import PromptTemplate
from pydantic import root_validator
from langflow.graph.utils import extract_input_variables_from_prompt
# Steps to create a BaseCustomPrompt:
# 1. Create a prompt template that endes with:
# Current conversation:
# {history}
# Human: {input}
# {ai_prefix}:
# 2. Create a class that inherits from BaseCustomPrompt
# 3. Add the following class attributes:
# template: str = ""
# description: Optional[str]
# ai_prefix: Optional[str] = "{ai_prefix}"
# 3.1. The ai_prefix should be a value in input_variables
# SeriesCharacterPrompt is a working example
# If used in a LLMChain, with a Memory module, it will work as expected
# We should consider creating ConversationalChains that expose custom parameters
# That way it will be easier to create custom prompts
class BaseCustomPrompt(PromptTemplate):
template: str = ""
description: Optional[str]
ai_prefix: Optional[str]
@root_validator(pre=False)
def build_template(cls, values):
format_dict = {}
ai_prefix_format_dict = {}
for key in values.get("input_variables", []):
new_value = values.get(key, f"{{{key}}}")
format_dict[key] = new_value
if key in values["ai_prefix"]:
ai_prefix_format_dict[key] = new_value
values["ai_prefix"] = values["ai_prefix"].format(**ai_prefix_format_dict)
values["template"] = values["template"].format(**format_dict)
values["template"] = values["template"]
values["input_variables"] = extract_input_variables_from_prompt(
values["template"]
)
return values
class SeriesCharacterPrompt(BaseCustomPrompt):
# Add a very descriptive description for the prompt generator
description: Optional[
str
] = "A prompt that asks the AI to act like a character from a series."
character: str
series: str
template: str = """I want you to act like {character} from {series}.
I want you to respond and answer like {character}. do not write any explanations. only answer like {character}.
You must know all of the knowledge of {character}.
Current conversation:
{history}
Human: {input}
{character}:"""
ai_prefix: str = "{character}"
input_variables: List[str] = ["character", "series"]
CUSTOM_PROMPTS: Dict[str, Type[BaseCustomPrompt]] = {
"SeriesCharacterPrompt": SeriesCharacterPrompt
}
if __name__ == "__main__":
prompt = SeriesCharacterPrompt(character="Harry Potter", series="Harry Potter")
print(prompt.template)

View file

@ -1,47 +1,286 @@
import contextlib
import io
import re
from typing import Any, Dict
from chromadb.errors import NotEnoughElementsException # type: ignore
from langflow.cache.utils import compute_dict_hash, load_cache, memoize_dict
from langflow.graph.graph import Graph
from langflow.interface import loading
from langflow.utils.logger import logger
def process_data_graph(data_graph: Dict[str, Any]):
def load_langchain_object(data_graph, is_first_message=False):
"""
Process data graph by extracting input variables and replacing ZeroShotPrompt
Load langchain object from cache if it exists, otherwise build it.
"""
computed_hash = compute_dict_hash(data_graph)
if is_first_message:
langchain_object = build_langchain_object(data_graph)
else:
logger.debug("Loading langchain object from cache")
langchain_object = load_cache(computed_hash)
return computed_hash, langchain_object
def load_or_build_langchain_object(data_graph, is_first_message=False):
"""
Load langchain object from cache if it exists, otherwise build it.
"""
if is_first_message:
build_langchain_object_with_caching.clear_cache()
return build_langchain_object_with_caching(data_graph)
@memoize_dict(maxsize=1)
def build_langchain_object_with_caching(data_graph):
"""
Build langchain object from data_graph.
"""
logger.debug("Building langchain object")
nodes = data_graph["nodes"]
# Add input variables
# nodes = payload.extract_input_variables(nodes)
# Nodes, edges and root node
edges = data_graph["edges"]
graph = Graph(nodes, edges)
return graph.build()
def build_langchain_object(data_graph):
"""
Build langchain object from data_graph.
"""
logger.debug("Building langchain object")
nodes = data_graph["nodes"]
# Add input variables
# nodes = payload.extract_input_variables(nodes)
# Nodes, edges and root node
edges = data_graph["edges"]
graph = Graph(nodes, edges)
return graph.build()
def process_graph(data_graph: Dict[str, Any]):
"""
Process graph by extracting input variables and replacing ZeroShotPrompt
with PromptTemplate,then run the graph and return the result and thought.
"""
# Load langchain object
logger.debug("Loading langchain object")
message = data_graph.pop("message", "")
is_first_message = len(data_graph.get("chatHistory", [])) == 0
computed_hash, langchain_object = load_langchain_object(
data_graph, is_first_message
)
logger.debug("Loaded langchain object")
extracted_json = loading.extract_json(data_graph)
if langchain_object is None:
# Raise user facing error
raise ValueError(
"There was an error loading the langchain_object. Please, check all the nodes and try again."
)
message = data_graph["message"]
# Generate result and thought
logger.debug("Generating result and thought")
result, thought = get_result_and_thought_using_graph(langchain_object, message)
logger.debug("Generated result and thought")
# Process json
result, thought = get_result_and_thought(extracted_json, message)
# Save langchain_object to cache
# We have to save it here because if the
# memory is updated we need to keep the new values
logger.debug("Saving langchain object to cache")
# save_cache(computed_hash, langchain_object, is_first_message)
logger.debug("Saved langchain object to cache")
return {"result": str(result), "thought": thought.strip()}
return {
"result": result,
"thought": re.sub(
r"\x1b\[([0-9,A-Z]{1,2}(;[0-9,A-Z]{1,2})?)?[m|K]", "", thought
).strip(),
def process_graph_cached(data_graph: Dict[str, Any]):
"""
Process graph by extracting input variables and replacing ZeroShotPrompt
with PromptTemplate,then run the graph and return the result and thought.
"""
# Load langchain object
message = data_graph.pop("message", "")
is_first_message = len(data_graph.get("chatHistory", [])) == 0
langchain_object = load_or_build_langchain_object(data_graph, is_first_message)
logger.debug("Loaded langchain object")
if langchain_object is None:
# Raise user facing error
raise ValueError(
"There was an error loading the langchain_object. Please, check all the nodes and try again."
)
# Generate result and thought
logger.debug("Generating result and thought")
result, thought = get_result_and_thought_using_graph(langchain_object, message)
logger.debug("Generated result and thought")
return {"result": str(result), "thought": thought.strip()}
def get_memory_key(langchain_object):
"""
Given a LangChain object, this function retrieves the current memory key from the object's memory attribute.
It then checks if the key exists in a dictionary of known memory keys and returns the corresponding key,
or None if the current key is not recognized.
"""
mem_key_dict = {
"chat_history": "history",
"history": "chat_history",
}
memory_key = langchain_object.memory.memory_key
return mem_key_dict.get(memory_key)
def update_memory_keys(langchain_object, possible_new_mem_key):
"""
Given a LangChain object and a possible new memory key, this function updates the input and output keys in the
object's memory attribute to exclude the current memory key and the possible new key. It then sets the memory key
to the possible new key.
"""
input_key = [
key
for key in langchain_object.input_keys
if key not in [langchain_object.memory.memory_key, possible_new_mem_key]
][0]
output_key = [
key
for key in langchain_object.output_keys
if key not in [langchain_object.memory.memory_key, possible_new_mem_key]
][0]
langchain_object.memory.input_key = input_key
langchain_object.memory.output_key = output_key
langchain_object.memory.memory_key = possible_new_mem_key
def fix_memory_inputs(langchain_object):
"""
Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the
object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the
get_memory_key function and updates the memory keys using the update_memory_keys function.
"""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
try:
if langchain_object.memory.memory_key in langchain_object.input_variables:
return
except AttributeError:
input_variables = (
langchain_object.prompt.input_variables
if hasattr(langchain_object, "prompt")
else langchain_object.input_keys
)
if langchain_object.memory.memory_key in input_variables:
return
possible_new_mem_key = get_memory_key(langchain_object)
if possible_new_mem_key is not None:
update_memory_keys(langchain_object, possible_new_mem_key)
def get_result_and_thought_using_graph(langchain_object, message: str):
"""Get result and thought from extracted json"""
try:
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
chat_input = None
memory_key = ""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
memory_key = langchain_object.memory.memory_key
if hasattr(langchain_object, "input_keys"):
for key in langchain_object.input_keys:
if key not in [memory_key, "chat_history"]:
chat_input = {key: message}
else:
chat_input = message # type: ignore
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer):
try:
output = langchain_object(chat_input)
except ValueError as exc:
# make the error message more informative
logger.debug(f"Error: {str(exc)}")
output = langchain_object.run(chat_input)
intermediate_steps = (
output.get("intermediate_steps", []) if isinstance(output, dict) else []
)
result = (
output.get(langchain_object.output_keys[0])
if isinstance(output, dict)
else output
)
if intermediate_steps:
thought = format_intermediate_steps(intermediate_steps)
else:
thought = output_buffer.getvalue()
except NotEnoughElementsException as exc:
raise ValueError(
"Error: Not enough documents for ChromaDB to index. Try reducing chunk size in TextSplitter."
) from exc
except Exception as exc:
raise ValueError(f"Error: {str(exc)}") from exc
return result, thought
def get_result_and_thought(extracted_json: Dict[str, Any], message: str):
"""Get result and thought from extracted json"""
try:
loaded_langchain = loading.load_langchain_type_from_config(
langchain_object = loading.load_langchain_type_from_config(
config=extracted_json
)
with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer):
result = loaded_langchain(message)
result = (
result.get(loaded_langchain.output_keys[0])
if isinstance(result, dict)
else result
output = langchain_object(message)
intermediate_steps = (
output.get("intermediate_steps", []) if isinstance(output, dict) else []
)
thought = output_buffer.getvalue()
result = (
output.get(langchain_object.output_keys[0])
if isinstance(output, dict)
else output
)
if intermediate_steps:
thought = format_intermediate_steps(intermediate_steps)
else:
thought = output_buffer.getvalue()
except Exception as e:
result = f"Error: {str(e)}"
thought = ""
return result, thought
def format_intermediate_steps(intermediate_steps):
formatted_chain = "> Entering new AgentExecutor chain...\n"
for step in intermediate_steps:
action = step[0]
observation = step[1]
formatted_chain += (
f" {action.log}\nAction: {action.tool}\nAction Input: {action.tool_input}\n"
)
formatted_chain += f"Observation: {observation}\n"
final_answer = f"Final Answer: {observation}\n"
formatted_chain += f"Thought: I now know the final answer\n{final_answer}\n"
formatted_chain += "> Finished chain.\n"
return formatted_chain

View file

@ -1,133 +0,0 @@
from typing import Any, Dict # noqa: F401
from langchain import agents, chains, prompts
from langchain.agents.load_tools import (
_BASE_TOOLS,
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
get_all_tool_names,
)
from langflow.custom import customs
from langflow.interface.custom_lists import (
llm_type_to_cls_dict,
memory_type_to_cls_dict,
)
from langflow.utils import util
def get_signature(name: str, object_type: str):
"""Get the signature of an object."""
return {
"chains": get_chain_signature,
"agents": get_agent_signature,
"prompts": get_prompt_signature,
"llms": get_llm_signature,
"memories": get_memory_signature,
"tools": get_tool_signature,
}.get(object_type, lambda name: f"Invalid type: {name}")(name)
def get_chain_signature(name: str):
"""Get the chain type by signature."""
try:
return util.build_template_from_function(
name, chains.loading.type_to_loader_dict
)
except ValueError as exc:
raise ValueError("Chain not found") from exc
def get_agent_signature(name: str):
"""Get the signature of an agent."""
try:
return util.build_template_from_class(name, agents.loading.AGENT_TO_CLASS)
except ValueError as exc:
raise ValueError("Agent not found") from exc
def get_prompt_signature(name: str):
"""Get the signature of a prompt."""
try:
if name in customs.get_custom_prompts().keys():
return customs.get_custom_prompts()[name]
return util.build_template_from_function(
name, prompts.loading.type_to_loader_dict
)
except ValueError as exc:
raise ValueError("Prompt not found") from exc
def get_llm_signature(name: str):
"""Get the signature of an llm."""
try:
return util.build_template_from_class(name, llm_type_to_cls_dict)
except ValueError as exc:
raise ValueError("LLM not found") from exc
def get_memory_signature(name: str):
"""Get the signature of a memory."""
try:
return util.build_template_from_class(name, memory_type_to_cls_dict)
except ValueError as exc:
raise ValueError("Memory not found") from exc
def get_tool_signature(name: str):
"""Get the signature of a tool."""
all_tools = {}
for tool in get_all_tool_names():
if tool_params := util.get_tool_params(util.get_tools_dict(tool)):
all_tools[tool_params["name"]] = tool
# Raise error if name is not in tools
if name not in all_tools.keys():
raise ValueError("Tool not found")
type_dict = {
"str": {
"type": "str",
"required": True,
"list": False,
"show": True,
"placeholder": "",
"value": "",
},
"llm": {"type": "BaseLLM", "required": True, "list": False, "show": True},
}
tool_type = all_tools[name]
if tool_type in _BASE_TOOLS:
params = []
elif tool_type in _LLM_TOOLS:
params = ["llm"]
elif tool_type in _EXTRA_LLM_TOOLS:
_, extra_keys = _EXTRA_LLM_TOOLS[tool_type]
params = ["llm"] + extra_keys
elif tool_type in _EXTRA_OPTIONAL_TOOLS:
_, extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type]
params = extra_keys
else:
params = []
template = {
param: (type_dict[param].copy() if param == "llm" else type_dict["str"].copy())
for param in params
}
# Remove required from aiosession
if "aiosession" in template.keys():
template["aiosession"]["required"] = False
template["aiosession"]["show"] = False
template["_type"] = tool_type # type: ignore
return {
"template": template,
**util.get_tool_params(util.get_tools_dict(tool_type)),
"base_classes": ["Tool"],
}

View file

@ -0,0 +1,3 @@
from langflow.interface.text_splitters.base import TextSplitterCreator
__all__ = ["TextSplitterCreator"]

View file

@ -0,0 +1,71 @@
from typing import Dict, List, Optional
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import textsplitter_type_to_cls_dict
from langflow.settings import settings
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class TextSplitterCreator(LangChainTypeCreator):
type_name: str = "textsplitters"
@property
def type_to_loader_dict(self) -> Dict:
return textsplitter_type_to_cls_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a text splitter."""
try:
signature = build_template_from_class(name, textsplitter_type_to_cls_dict)
signature["template"]["documents"] = {
"type": "BaseLoader",
"required": True,
"show": True,
"name": "documents",
}
signature["template"]["separator"] = {
"type": "str",
"required": True,
"show": True,
"value": ".",
"name": "separator",
"display_name": "Separator",
}
signature["template"]["chunk_size"] = {
"type": "int",
"required": True,
"show": True,
"value": 4000,
"name": "chunk_size",
"display_name": "Chunk Size",
}
signature["template"]["chunk_overlap"] = {
"type": "int",
"required": True,
"show": True,
"value": 200,
"name": "chunk_overlap",
"display_name": "Chunk Overlap",
}
return signature
except ValueError as exc:
raise ValueError(f"Text Splitter {name} not found") from exc
except AttributeError as exc:
logger.error(f"Text Splitter {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return [
textsplitter.__name__
for textsplitter in self.type_to_loader_dict.values()
if textsplitter.__name__ in settings.textsplitters or settings.dev
]
textsplitter_creator = TextSplitterCreator()

View file

@ -0,0 +1,69 @@
from typing import Callable, Dict, List, Optional
from langchain.agents import agent_toolkits
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.importing.utils import import_class, import_module
from langflow.settings import settings
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class ToolkitCreator(LangChainTypeCreator):
type_name: str = "toolkits"
all_types: List[str] = agent_toolkits.__all__
create_functions: Dict = {
"JsonToolkit": [],
"SQLDatabaseToolkit": [],
"OpenAPIToolkit": ["create_openapi_agent"],
"VectorStoreToolkit": [
"create_vectorstore_agent",
"create_vectorstore_router_agent",
"VectorStoreInfo",
],
"ZapierToolkit": [],
"PandasToolkit": ["create_pandas_dataframe_agent"],
"CSVToolkit": ["create_csv_agent"],
}
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict = {
toolkit_name: import_class(
f"langchain.agents.agent_toolkits.{toolkit_name}"
)
# if toolkit_name is not lower case it is a class
for toolkit_name in agent_toolkits.__all__
if not toolkit_name.islower() and toolkit_name in settings.toolkits
}
return self.type_dict
def get_signature(self, name: str) -> Optional[Dict]:
try:
return build_template_from_class(name, self.type_to_loader_dict)
except ValueError as exc:
raise ValueError("Prompt not found") from exc
except AttributeError as exc:
logger.error(f"Prompt {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return list(self.type_to_loader_dict.keys())
def get_create_function(self, name: str) -> Callable:
if loader_name := self.create_functions.get(name, None):
# import loader
return import_module(
f"from langchain.agents.agent_toolkits import {loader_name[0]}"
)
else:
raise ValueError("Loader not found")
def has_create_function(self, name: str) -> bool:
# check if the function list is not empty
return bool(self.create_functions.get(name, None))
toolkits_creator = ToolkitCreator()

View file

@ -0,0 +1,3 @@
from langflow.interface.tools.base import ToolCreator
__all__ = ["ToolCreator"]

View file

@ -0,0 +1,164 @@
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CUSTOM_TOOLS,
FILE_TOOLS,
OTHER_TOOLS,
)
from langflow.interface.tools.util import get_tool_params
from langflow.settings import settings
from langflow.template.base import Template, TemplateField
from langflow.utils import util
from langflow.utils.util import build_template_from_class
TOOL_INPUTS = {
"str": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
placeholder="",
value="",
),
"llm": TemplateField(field_type="BaseLLM", required=True, is_list=False, show=True),
"func": TemplateField(
field_type="function",
required=True,
is_list=False,
show=True,
multiline=True,
),
"code": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
value="",
multiline=True,
),
"path": TemplateField(
field_type="file",
required=True,
is_list=False,
show=True,
value="",
suffixes=[".json", ".yaml", ".yml"],
fileTypes=["json", "yaml", "yml"],
),
}
class ToolCreator(LangChainTypeCreator):
type_name: str = "tools"
tools_dict: Optional[Dict] = None
@property
def type_to_loader_dict(self) -> Dict:
if self.tools_dict is None:
all_tools = {}
for tool, tool_fcn in ALL_TOOLS_NAMES.items():
tool_params = get_tool_params(tool_fcn)
tool_name = tool_params.get("name", tool)
if tool_name in settings.tools or settings.dev:
if tool_name == "JsonSpec":
tool_params["path"] = tool_params.pop("dict_") # type: ignore
all_tools[tool_name] = {
"type": tool,
"params": tool_params,
"fcn": tool_fcn,
}
self.tools_dict = all_tools
return self.tools_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a tool."""
base_classes = ["Tool"]
fields = []
params = []
tool_params = {}
# Raise error if name is not in tools
if name not in self.type_to_loader_dict.keys():
raise ValueError("Tool not found")
tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore
# if tool_type in _BASE_TOOLS.keys():
# params = []
if tool_type in _LLM_TOOLS.keys():
params = ["llm"]
elif tool_type in _EXTRA_LLM_TOOLS.keys():
extra_keys = _EXTRA_LLM_TOOLS[tool_type][1]
params = ["llm"] + extra_keys
elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys():
extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1]
params = extra_keys
# elif tool_type == "Tool":
# params = ["name", "description", "func"]
elif tool_type in CUSTOM_TOOLS:
# Get custom tool params
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes = ["function"]
if node := customs.get_custom_nodes("tools").get(tool_type):
return node
elif tool_type in FILE_TOOLS:
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes += [name]
elif tool_type in OTHER_TOOLS:
tool_dict = build_template_from_class(tool_type, OTHER_TOOLS)
fields = tool_dict["template"]
# Pop unnecessary fields and add name
fields.pop("_type") # type: ignore
fields.pop("return_direct") # type: ignore
fields.pop("verbose") # type: ignore
tool_params = {
"name": fields.pop("name")["value"], # type: ignore
"description": fields.pop("description")["value"], # type: ignore
}
fields = [
TemplateField(name=name, field_type=field["type"], **field)
for name, field in fields.items() # type: ignore
]
base_classes += tool_dict["base_classes"]
# Copy the field and add the name
for param in params:
field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy()
field.name = param
if param == "aiosession":
field.show = False
field.required = False
fields.append(field)
template = Template(fields=fields, type_name=tool_type)
tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]}
return {
"template": util.format_dict(template.to_dict()),
**tool_params,
"base_classes": base_classes,
}
def to_list(self) -> List[str]:
"""List all load tools"""
return list(self.type_to_loader_dict.keys())
tool_creator = ToolCreator()

View file

@ -0,0 +1,60 @@
from langchain.agents import Tool
from langchain.agents.load_tools import (
_BASE_TOOLS,
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.json.tool import JsonGetValueTool, JsonListKeysTool, JsonSpec
from langchain.tools.python.tool import PythonAstREPLTool, PythonREPLTool
from langchain.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QueryCheckerTool,
QuerySQLDataBaseTool,
)
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langflow.interface.tools.custom import PythonFunction
FILE_TOOLS = {"JsonSpec": JsonSpec}
CUSTOM_TOOLS = {"Tool": Tool, "PythonFunction": PythonFunction}
OTHER_TOOLS = {
"QuerySQLDataBaseTool": QuerySQLDataBaseTool,
"InfoSQLDatabaseTool": InfoSQLDatabaseTool,
"ListSQLDatabaseTool": ListSQLDatabaseTool,
"QueryCheckerTool": QueryCheckerTool,
"BingSearchRun": BingSearchRun,
"GoogleSearchRun": GoogleSearchRun,
"GoogleSearchResults": GoogleSearchResults,
"JsonListKeysTool": JsonListKeysTool,
"JsonGetValueTool": JsonGetValueTool,
"PythonREPLTool": PythonREPLTool,
"PythonAstREPLTool": PythonAstREPLTool,
"RequestsGetTool": RequestsGetTool,
"RequestsPostTool": RequestsPostTool,
"RequestsPatchTool": RequestsPatchTool,
"RequestsPutTool": RequestsPutTool,
"RequestsDeleteTool": RequestsDeleteTool,
"WikipediaQueryRun": WikipediaQueryRun,
"WolframAlphaQueryRun": WolframAlphaQueryRun,
}
ALL_TOOLS_NAMES = {
**_BASE_TOOLS,
**_LLM_TOOLS, # type: ignore
**{k: v[0] for k, v in _EXTRA_LLM_TOOLS.items()}, # type: ignore
**{k: v[0] for k, v in _EXTRA_OPTIONAL_TOOLS.items()},
**CUSTOM_TOOLS,
**FILE_TOOLS, # type: ignore
**OTHER_TOOLS,
}

View file

@ -0,0 +1,37 @@
from typing import Callable, Optional
from pydantic import BaseModel, validator
from langflow.utils import validate
class Function(BaseModel):
code: str
function: Optional[Callable] = None
imports: Optional[str] = None
# Eval code and store the function
def __init__(self, **data):
super().__init__(**data)
# Validate the function
@validator("code")
def validate_func(cls, v):
try:
validate.eval_function(v)
except Exception as e:
raise e
return v
def get_function(self):
"""Get the function"""
function_name = validate.extract_function_name(self.code)
return validate.create_function(self.code, function_name)
class PythonFunction(Function):
"""Python function"""
code: str

View file

@ -0,0 +1,97 @@
import ast
import inspect
from typing import Dict, Union
from langchain.agents.tools import Tool
def get_func_tool_params(func, **kwargs) -> Union[Dict, None]:
tree = ast.parse(inspect.getsource(func))
# Iterate over the statements in the abstract syntax tree
for node in ast.walk(tree):
# Find the first return statement
if isinstance(node, ast.Return):
tool = node.value
if isinstance(tool, ast.Call):
if isinstance(tool.func, ast.Name) and tool.func.id == "Tool":
if tool.keywords:
tool_params = {}
for keyword in tool.keywords:
if keyword.arg == "name":
tool_params["name"] = ast.literal_eval(keyword.value)
elif keyword.arg == "description":
tool_params["description"] = ast.literal_eval(
keyword.value
)
return tool_params
return {
"name": ast.literal_eval(tool.args[0]),
"description": ast.literal_eval(tool.args[2]),
}
#
else:
# get the class object from the return statement
try:
class_obj = eval(
compile(ast.Expression(tool), "<string>", "eval")
)
except Exception:
return None
return {
"name": getattr(class_obj, "name"),
"description": getattr(class_obj, "description"),
}
# Return None if no return statement was found
return None
def get_class_tool_params(cls, **kwargs) -> Union[Dict, None]:
tree = ast.parse(inspect.getsource(cls))
tool_params = {}
# Iterate over the statements in the abstract syntax tree
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
# Find the class definition and look for methods
for stmt in node.body:
if isinstance(stmt, ast.FunctionDef) and stmt.name == "__init__":
# There is no assignment statements in the __init__ method
# So we need to get the params from the function definition
for arg in stmt.args.args:
if arg.arg == "name":
# It should be the name of the class
tool_params[arg.arg] = cls.__name__
elif arg.arg == "self":
continue
# If there is not default value, set it to an empty string
else:
try:
annotation = ast.literal_eval(arg.annotation) # type: ignore
tool_params[arg.arg] = annotation
except ValueError:
tool_params[arg.arg] = ""
# Get the attribute name and the annotation
elif cls != Tool and isinstance(stmt, ast.AnnAssign):
# Get the attribute name and the annotation
tool_params[stmt.target.id] = "" # type: ignore
return tool_params
def get_tool_params(tool, **kwargs) -> Dict:
# Parse the function code into an abstract syntax tree
# Define if it is a function or a class
if inspect.isfunction(tool):
return get_func_tool_params(tool, **kwargs) or {}
elif inspect.isclass(tool):
# Get the parameters necessary to
# instantiate the class
return get_class_tool_params(tool, **kwargs) or {}
else:
raise ValueError("Tool must be a function or class.")

View file

@ -1,12 +1,23 @@
from langflow.interface.listing import list_type
from langflow.interface.signature import get_signature
from langflow.interface.agents.base import agent_creator
from langflow.interface.chains.base import chain_creator
from langflow.interface.document_loaders.base import documentloader_creator
from langflow.interface.embeddings.base import embedding_creator
from langflow.interface.llms.base import llm_creator
from langflow.interface.memories.base import memory_creator
from langflow.interface.prompts.base import prompt_creator
from langflow.interface.text_splitters.base import textsplitter_creator
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.utilities.base import utility_creator
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
def get_type_list():
"""Get a list of all langchain types"""
all_types = build_langchain_types_dict()
all_types.pop("tools")
# all_types.pop("tools")
for key, value in all_types.items():
all_types[key] = [item["template"]["_type"] for item in value.values()]
@ -14,23 +25,30 @@ def get_type_list():
return all_types
def build_langchain_types_dict():
def build_langchain_types_dict(): # sourcery skip: dict-assign-update-to-union
"""Build a dictionary of all langchain types"""
return {
"chains": {
chain: get_signature(chain, "chains") for chain in list_type("chains")
},
"agents": {
agent: get_signature(agent, "agents") for agent in list_type("agents")
},
"prompts": {
prompt: get_signature(prompt, "prompts") for prompt in list_type("prompts")
},
"llms": {llm: get_signature(llm, "llms") for llm in list_type("llms")},
"memories": {
memory: get_signature(memory, "memories")
for memory in list_type("memories")
},
"tools": {tool: get_signature(tool, "tools") for tool in list_type("tools")},
}
all_types = {}
creators = [
chain_creator,
agent_creator,
prompt_creator,
llm_creator,
memory_creator,
tool_creator,
toolkits_creator,
wrapper_creator,
embedding_creator,
vectorstore_creator,
documentloader_creator,
textsplitter_creator,
utility_creator,
]
all_types = {}
for creator in creators:
created_types = creator.to_dict()
if created_types[creator.type_name].values():
all_types.update(created_types)
return all_types

View file

@ -0,0 +1,39 @@
from typing import Dict, List, Optional
from langflow.custom.customs import get_custom_nodes
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import utility_type_to_cls_dict
from langflow.settings import settings
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class UtilityCreator(LangChainTypeCreator):
type_name: str = "utilities"
@property
def type_to_loader_dict(self) -> Dict:
return utility_type_to_cls_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a utility."""
try:
if name in get_custom_nodes(self.type_name).keys():
return get_custom_nodes(self.type_name)[name]
return build_template_from_class(name, utility_type_to_cls_dict)
except ValueError as exc:
raise ValueError(f"Utility {name} not found") from exc
except AttributeError as exc:
logger.error(f"Utility {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return [
utility.__name__
for utility in self.type_to_loader_dict.values()
if utility.__name__ in settings.utilities or settings.dev
]
utility_creator = UtilityCreator()

View file

@ -0,0 +1,22 @@
import json
import os
import yaml
def load_file_into_dict(file_path: str) -> dict:
if not os.path.exists(file_path):
raise FileNotFoundError(f"File not found: {file_path}")
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension == ".json":
with open(file_path, "r") as json_file:
data = json.load(json_file)
elif file_extension in [".yaml", ".yml"]:
with open(file_path, "r") as yaml_file:
data = yaml.safe_load(yaml_file)
else:
raise ValueError("Unsupported file type. Please provide a JSON or YAML file.")
return data

View file

@ -0,0 +1,55 @@
from typing import Dict, List, Optional
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import vectorstores_type_to_cls_dict
from langflow.settings import settings
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class VectorstoreCreator(LangChainTypeCreator):
type_name: str = "vectorstores"
@property
def type_to_loader_dict(self) -> Dict:
return vectorstores_type_to_cls_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of an embedding."""
try:
signature = build_template_from_class(name, vectorstores_type_to_cls_dict)
# TODO: Use FrontendendNode class to build the signature
signature["template"] = {
"documents": {
"type": "TextSplitter",
"required": True,
"show": True,
"name": "documents",
"display_name": "Text Splitter",
},
"embedding": {
"type": "Embeddings",
"required": True,
"show": True,
"name": "embedding",
"display_name": "Embedding",
},
}
return signature
except ValueError as exc:
raise ValueError(f"Vector Store {name} not found") from exc
except AttributeError as exc:
logger.error(f"Vector Store {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return [
vectorstore
for vectorstore in self.type_to_loader_dict.keys()
if vectorstore in settings.vectorstores or settings.dev
]
vectorstore_creator = VectorstoreCreator()

View file

@ -0,0 +1,34 @@
from typing import Dict, List, Optional
from langchain import requests
from langflow.interface.base import LangChainTypeCreator
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
class WrapperCreator(LangChainTypeCreator):
type_name: str = "wrappers"
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict = {
wrapper.__name__: wrapper for wrapper in [requests.TextRequestsWrapper]
}
return self.type_dict
def get_signature(self, name: str) -> Optional[Dict]:
try:
return build_template_from_class(name, self.type_to_loader_dict)
except ValueError as exc:
raise ValueError("Wrapper not found") from exc
except AttributeError as exc:
logger.error(f"Wrapper {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return list(self.type_to_loader_dict.keys())
wrapper_creator = WrapperCreator()

View file

@ -2,8 +2,7 @@ from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from langflow.api.endpoints import router as endpoints_router
from langflow.api.list_endpoints import router as list_router
from langflow.api.signature import router as signatures_router
from langflow.api.validate import router as validate_router
def create_app():
@ -23,8 +22,7 @@ def create_app():
)
app.include_router(endpoints_router)
app.include_router(list_router)
app.include_router(signatures_router)
app.include_router(validate_router)
return app

View file

@ -1,29 +1,51 @@
import os
from typing import List, Optional
from typing import List
import yaml
from pydantic import BaseSettings, Field, root_validator
from pydantic import BaseSettings, root_validator
class Settings(BaseSettings):
chains: Optional[List[str]] = Field(...)
agents: Optional[List[str]] = Field(...)
prompts: Optional[List[str]] = Field(...)
llms: Optional[List[str]] = Field(...)
tools: Optional[List[str]] = Field(...)
memories: Optional[List[str]] = Field(...)
dev: bool = Field(...)
chains: List[str] = []
agents: List[str] = []
prompts: List[str] = []
llms: List[str] = []
tools: List[str] = []
memories: List[str] = []
embeddings: List[str] = []
vectorstores: List[str] = []
documentloaders: List[str] = []
wrappers: List[str] = []
toolkits: List[str] = []
textsplitters: List[str] = []
utilities: List[str] = []
dev: bool = False
class Config:
validate_assignment = True
extra = "ignore"
@root_validator
@root_validator(allow_reuse=True)
def validate_lists(cls, values):
for key, value in values.items():
if key != "dev" and not value:
values[key] = []
return values
def update_from_yaml(self, file_path: str):
new_settings = load_settings_from_yaml(file_path)
self.chains = new_settings.chains or []
self.agents = new_settings.agents or []
self.prompts = new_settings.prompts or []
self.llms = new_settings.llms or []
self.tools = new_settings.tools or []
self.memories = new_settings.memories or []
self.wrappers = new_settings.wrappers or []
self.toolkits = new_settings.toolkits or []
self.textsplitters = new_settings.textsplitters or []
self.utilities = new_settings.utilities or []
self.dev = new_settings.dev or False
def save_settings_to_yaml(settings: Settings, file_path: str):
with open(file_path, "w") as f:
@ -41,9 +63,8 @@ def load_settings_from_yaml(file_path: str) -> Settings:
with open(file_path, "r") as f:
settings_dict = yaml.safe_load(f)
a = Settings.parse_obj(settings_dict)
return a
return Settings(**settings_dict)
settings = load_settings_from_yaml("config.yaml")

View file

@ -0,0 +1,234 @@
from abc import ABC
from typing import Any, Callable, Dict, Optional, Union
from pydantic import BaseModel
from langflow.template.constants import FORCE_SHOW_FIELDS
from langflow.utils import constants
class TemplateFieldCreator(BaseModel, ABC):
field_type: str = "str"
required: bool = False
placeholder: str = ""
is_list: bool = False
show: bool = True
multiline: bool = False
value: Any = None
suffixes: list[str] = []
fileTypes: list[str] = []
file_types: list[str] = []
content: Union[str, None] = None
password: bool = False
options: list[str] = []
name: str = ""
display_name: Optional[str] = None
def to_dict(self):
result = self.dict()
# Remove key if it is None
for key in list(result.keys()):
if result[key] is None or result[key] == []:
del result[key]
result["type"] = result.pop("field_type")
result["list"] = result.pop("is_list")
if result.get("file_types"):
result["fileTypes"] = result.pop("file_types")
if self.field_type == "file":
result["content"] = self.content
return result
def process_field(
self, key: str, value: Dict[str, Any], name: Optional[str] = None
) -> None:
_type = value["type"]
# Remove 'Optional' wrapper
if "Optional" in _type:
_type = _type.replace("Optional[", "")[:-1]
# Check for list type
if "List" in _type:
_type = _type.replace("List[", "")[:-1]
self.is_list = True
# Replace 'Mapping' with 'dict'
if "Mapping" in _type:
_type = _type.replace("Mapping", "dict")
# Change type from str to Tool
self.field_type = "Tool" if key in {"allowed_tools"} else self.field_type
self.field_type = "int" if key in {"max_value_length"} else self.field_type
# Show or not field
self.show = bool(
(self.required and key not in ["input_variables"])
or key in FORCE_SHOW_FIELDS
or "api_key" in key
)
# Add password field
self.password = any(
text in key.lower() for text in {"password", "token", "api", "key"}
)
# Add multline
self.multiline = key in {
"suffix",
"prefix",
"template",
"examples",
"code",
"headers",
}
# Replace dict type with str
if "dict" in self.field_type.lower():
self.field_type = "code"
if key == "dict_":
self.field_type = "file"
self.suffixes = [".json", ".yaml", ".yml"]
self.file_types = ["json", "yaml", "yml"]
# Replace default value with actual value
if "default" in value:
self.value = value["default"]
if key == "headers":
self.value = """{'Authorization':
'Bearer <token>'}"""
# Add options to openai
if name == "OpenAI" and key == "model_name":
self.options = constants.OPENAI_MODELS
self.is_list = True
elif name == "ChatOpenAI" and key == "model_name":
self.options = constants.CHAT_OPENAI_MODELS
self.is_list = True
class TemplateField(TemplateFieldCreator):
pass
class Template(BaseModel):
type_name: str
fields: list[TemplateField]
def process_fields(
self,
name: Optional[str] = None,
format_field_func: Union[Callable, None] = None,
):
if format_field_func:
for field in self.fields:
format_field_func(field, name)
def to_dict(self, format_field_func=None):
self.process_fields(self.type_name, format_field_func)
result = {field.name: field.to_dict() for field in self.fields}
result["_type"] = self.type_name # type: ignore
return result
class FrontendNode(BaseModel):
template: Template
description: str
base_classes: list
name: str = ""
def to_dict(self):
return {
self.name: {
"template": self.template.to_dict(self.format_field),
"description": self.description,
"base_classes": self.base_classes,
}
}
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
key = field.name
value = field.to_dict()
_type = value["type"]
# Remove 'Optional' wrapper
if "Optional" in _type:
_type = _type.replace("Optional[", "")[:-1]
# Check for list type
if "List" in _type:
_type = _type.replace("List[", "")[:-1]
field.is_list = True
# Replace 'Mapping' with 'dict'
if "Mapping" in _type:
_type = _type.replace("Mapping", "dict")
# Change type from str to Tool
field.field_type = "Tool" if key in {"allowed_tools"} else field.field_type
field.field_type = "int" if key in {"max_value_length"} else field.field_type
# Show or not field
field.show = bool(
(field.required and key not in ["input_variables"])
or key in FORCE_SHOW_FIELDS
or "api" in key
or ("key" in key and "input" not in key and "output" not in key)
)
# Add password field
field.password = (
any(text in key.lower() for text in {"password", "token", "api", "key"})
and field.show
)
# Add multline
field.multiline = key in {
"suffix",
"prefix",
"template",
"examples",
"code",
"headers",
}
# Replace dict type with str
if "dict" in field.field_type.lower():
field.field_type = "code"
if key == "dict_":
field.field_type = "file"
field.suffixes = [".json", ".yaml", ".yml"]
field.file_types = ["json", "yaml", "yml"]
# Replace default value with actual value
if "default" in value:
field.value = value["default"]
if key == "headers":
field.value = """{'Authorization':
'Bearer <token>'}"""
# Add options to openai
if name == "OpenAI" and key == "model_name":
field.options = constants.OPENAI_MODELS
field.is_list = True
elif name == "ChatOpenAI":
if key == "model_name":
field.options = constants.CHAT_OPENAI_MODELS
field.is_list = True
if "api_key" in key and "OpenAI" in str(name):
field.display_name = "OpenAI API Key"
field.required = False
if field.value is None:
field.value = ""
# If the field.name contains api or api and key, then it might be an api key
# other conditions are to make sure that it is not an input or output variable
if "api" in key.lower() and "key" in key.lower():
field.required = False

View file

@ -0,0 +1,32 @@
FORCE_SHOW_FIELDS = [
"allowed_tools",
"memory",
"prefix",
"examples",
"temperature",
"model_name",
"headers",
"max_value_length",
"max_tokens",
]
DEFAULT_PROMPT = """
I want you to act as a naming consultant for new companies.
Here are some examples of good company names:
- search engine, Google
- social media, Facebook
- video sharing, YouTube
The name should be short, catchy and easy to remember.
What is a good name for a company that makes {product}?
"""
SYSTEM_PROMPT = """
You are a helpful assistant that talks casually about life in general.
You are a good listener and you can talk about anything.
"""
HUMAN_PROMPT = "{input}"

View file

View file

@ -0,0 +1,461 @@
from typing import Optional
from langchain.agents import loading
from langchain.agents.mrkl import prompt
from langflow.template.base import FrontendNode, Template, TemplateField
from langflow.template.constants import DEFAULT_PROMPT, HUMAN_PROMPT, SYSTEM_PROMPT
from langflow.utils.constants import DEFAULT_PYTHON_FUNCTION
NON_CHAT_AGENTS = {
agent_type: agent_class
for agent_type, agent_class in loading.AGENT_TO_CLASS.items()
if "chat" not in agent_type.value
}
class BasePromptFrontendNode(FrontendNode):
name: str
template: Template
description: str
base_classes: list[str]
def to_dict(self):
return super().to_dict()
class ZeroShotPromptNode(BasePromptFrontendNode):
name: str = "ZeroShotPrompt"
template: Template = Template(
type_name="zero_shot",
fields=[
TemplateField(
field_type="str",
required=False,
placeholder="",
is_list=False,
show=True,
multiline=True,
value=prompt.PREFIX,
name="prefix",
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=True,
value=prompt.SUFFIX,
name="suffix",
),
TemplateField(
field_type="str",
required=False,
placeholder="",
is_list=False,
show=True,
multiline=True,
value=prompt.FORMAT_INSTRUCTIONS,
name="format_instructions",
),
],
)
description: str = "Prompt template for Zero Shot Agent."
base_classes: list[str] = ["BasePromptTemplate"]
def to_dict(self):
return super().to_dict()
class PromptTemplateNode(FrontendNode):
name: str = "PromptTemplate"
template: Template
description: str
base_classes: list[str] = ["BasePromptTemplate"]
def to_dict(self):
return super().to_dict()
class PythonFunctionNode(FrontendNode):
name: str = "PythonFunction"
template: Template = Template(
type_name="python_function",
fields=[
TemplateField(
field_type="code",
required=True,
placeholder="",
is_list=False,
show=True,
value=DEFAULT_PYTHON_FUNCTION,
name="code",
)
],
)
description: str = "Python function to be executed."
base_classes: list[str] = ["function"]
def to_dict(self):
return super().to_dict()
class ToolNode(FrontendNode):
name: str = "Tool"
template: Template = Template(
type_name="Tool",
fields=[
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=True,
value="",
name="name",
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=True,
value="",
name="description",
),
TemplateField(
name="func",
field_type="function",
required=True,
is_list=False,
show=True,
multiline=True,
),
TemplateField(
field_type="bool",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
value=False,
name="return_direct",
),
],
)
description: str = "Tool to be used in the flow."
base_classes: list[str] = ["Tool"]
def to_dict(self):
return super().to_dict()
class JsonAgentNode(FrontendNode):
name: str = "JsonAgent"
template: Template = Template(
type_name="json_agent",
fields=[
TemplateField(
field_type="BaseToolkit",
required=True,
show=True,
name="toolkit",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
),
],
)
description: str = """Construct a json agent from an LLM and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class InitializeAgentNode(FrontendNode):
name: str = "initialize_agent"
template: Template = Template(
type_name="initailize_agent",
fields=[
TemplateField(
field_type="str",
required=True,
is_list=True,
show=True,
multiline=False,
options=list(NON_CHAT_AGENTS.keys()),
value=list(NON_CHAT_AGENTS.keys())[0],
name="agent",
),
TemplateField(
field_type="BaseChatMemory",
required=False,
show=True,
name="memory",
),
TemplateField(
field_type="Tool",
required=False,
show=True,
name="tools",
is_list=True,
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
),
],
)
description: str = """Construct a json agent from an LLM and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
# do nothing and don't return anything
pass
class CSVAgentNode(FrontendNode):
name: str = "CSVAgent"
template: Template = Template(
type_name="csv_agent",
fields=[
TemplateField(
field_type="file",
required=True,
show=True,
name="path",
value="",
suffixes=[".csv"],
fileTypes=["csv"],
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
),
],
)
description: str = """Construct a json agent from a CSV and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class SQLDatabaseNode(FrontendNode):
name: str = "SQLDatabase"
template: Template = Template(
type_name="sql_database",
fields=[
TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
multiline=False,
value="",
name="uri",
),
],
)
description: str = """SQLAlchemy wrapper around a database."""
base_classes: list[str] = ["SQLDatabase"]
def to_dict(self):
return super().to_dict()
class VectorStoreAgentNode(FrontendNode):
name: str = "VectorStoreAgent"
template: Template = Template(
type_name="vectorstore_agent",
fields=[
TemplateField(
field_type="VectorStoreInfo",
required=True,
show=True,
name="vectorstoreinfo",
display_name="Vector Store Info",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct an agent from a Vector Store."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class VectorStoreRouterAgentNode(FrontendNode):
name: str = "VectorStoreRouterAgent"
template: Template = Template(
type_name="vectorstorerouter_agent",
fields=[
TemplateField(
field_type="VectorStoreRouterToolkit",
required=True,
show=True,
name="vectorstoreroutertoolkit",
display_name="Vector Store Router Toolkit",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct an agent from a Vector Store Router."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class SQLAgentNode(FrontendNode):
name: str = "SQLAgent"
template: Template = Template(
type_name="sql_agent",
fields=[
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
value="",
name="database_uri",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct an agent from a Vector Store Router."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class PromptFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
# if field.field_type == "StringPromptTemplate"
# change it to str
PROMPT_FIELDS = [
"template",
"suffix",
"prefix",
"examples",
]
if field.field_type == "StringPromptTemplate" and "Message" in str(name):
field.field_type = "prompt"
field.multiline = True
field.value = HUMAN_PROMPT if "Human" in field.name else SYSTEM_PROMPT
if field.name == "template" and field.value == "":
field.value = DEFAULT_PROMPT
if field.name in PROMPT_FIELDS:
field.field_type = "prompt"
if (
"Union" in field.field_type
and "BaseMessagePromptTemplate" in field.field_type
):
field.field_type = "BaseMessagePromptTemplate"
# All prompt fields should be password=False
field.password = False
class MemoryFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
FrontendNode.format_field(field, name)
if not isinstance(field.value, str):
field.value = None
if field.name == "k":
field.required = True
field.show = True
field.field_type = "int"
field.value = 10
field.display_name = "Memory Size"
field.password = False
class ChainFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
FrontendNode.format_field(field, name)
if "key" in field.name:
field.password = False
field.show = False
if field.name in ["input_key", "output_key"]:
field.required = True
field.show = True
# Separated for possible future changes
if field.name == "prompt":
# if no prompt is provided, use the default prompt
field.required = False
field.show = True
class LLMFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
display_names_dict = {
"huggingfacehub_api_token": "HuggingFace Hub API Token",
}
FrontendNode.format_field(field, name)
SHOW_FIELDS = ["repo_id", "task", "model_kwargs"]
if field.name in SHOW_FIELDS:
field.show = True
if "api" in field.name and ("key" in field.name or "token" in field.name):
field.password = True
field.show = True
# Required should be False to support
# loading the API key from environment variables
field.required = False
if field.name == "task":
field.required = True
field.show = True
field.is_list = True
field.options = ["text-generation", "text2text-generation"]
if display_name := display_names_dict.get(field.name):
field.display_name = display_name
if field.name == "model_kwargs":
field.field_type = "code"

View file

@ -6,3 +6,10 @@ OPENAI_MODELS = [
"text-ada-001",
]
CHAT_OPENAI_MODELS = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
DEFAULT_PYTHON_FUNCTION = """
def python_function(text: str) -> str:
\"\"\"This is a default python function that returns the input text\"\"\"
return text
"""

View file

@ -0,0 +1,30 @@
import logging
from pathlib import Path
from rich.logging import RichHandler
logger = logging.getLogger("langflow")
def configure(log_level: str = "INFO", log_file: Path = None): # type: ignore
log_format = "%(asctime)s - %(levelname)s - %(message)s"
log_level_value = getattr(logging, log_level.upper(), logging.INFO)
logging.basicConfig(
level=log_level_value,
format=log_format,
datefmt="[%X]",
handlers=[RichHandler(rich_tracebacks=True)],
)
if log_file:
log_file = Path(log_file)
log_file.parent.mkdir(parents=True, exist_ok=True)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(logging.Formatter(log_format))
logger.addHandler(file_handler)
logger.info(f"Logger set up with log level: {log_level_value}({log_level})")
if log_file:
logger.info(f"Log file: {log_file}")

View file

@ -1,5 +1,6 @@
import contextlib
import re
from typing import Dict
def extract_input_variables(nodes):
@ -27,48 +28,63 @@ def extract_input_variables(nodes):
return nodes
def get_root_node(nodes, edges):
def get_root_node(graph):
"""
Returns the root node of the template.
"""
incoming_edges = {edge["source"] for edge in edges}
return next((node for node in nodes if node["id"] not in incoming_edges), None)
incoming_edges = {edge.source for edge in graph.edges}
if not incoming_edges and len(graph.nodes) == 1:
return graph.nodes[0]
return next((node for node in graph.nodes if node not in incoming_edges), None)
def build_json(root, nodes, edges):
"""
Builds a json from the nodes and edges
"""
edge_ids = [edge["source"] for edge in edges if edge["target"] == root["id"]]
local_nodes = [node for node in nodes if node["id"] in edge_ids]
def build_json(root, graph) -> Dict:
if "node" not in root.data:
# If the root node has no "node" key, then it has only one child,
# which is the target of the single outgoing edge
edge = root.edges[0]
local_nodes = [edge.target]
else:
# Otherwise, find all children whose type matches the type
# specified in the template
node_type = root.node_type
local_nodes = graph.get_nodes_with_target(root)
if "node" not in root["data"]:
return build_json(local_nodes[0], nodes, edges)
final_dict = root["data"]["node"]["template"].copy()
if len(local_nodes) == 1:
return build_json(local_nodes[0], graph)
# Build a dictionary from the template
template = root.data["node"]["template"]
final_dict = template.copy()
for key, value in final_dict.items():
if key == "_type":
continue
module_type = value["type"]
node_type = value["type"]
if "value" in value and value["value"] is not None:
# If the value is specified, use it
value = value["value"]
elif "dict" in module_type:
elif "dict" in node_type:
# If the value is a dictionary, create an empty dictionary
value = {}
else:
# Otherwise, recursively build the child nodes
children = []
for c in local_nodes:
module_types = [c["data"]["type"]]
if "node" in c["data"]:
module_types += c["data"]["node"]["base_classes"]
if module_type in module_types:
children.append(c)
for local_node in local_nodes:
node_children = graph.get_children_by_node_type(local_node, node_type)
children.extend(node_children)
if value["required"] and not children:
raise ValueError(f"No child with type {module_type} found")
values = [build_json(child, nodes, edges) for child in children]
value = list(values) if value["list"] else next(iter(values), None)
raise ValueError(f"No child with type {node_type} found")
values = [build_json(child, graph) for child in children]
value = (
list(values)
if value["list"]
else next(iter(values), None) # type: ignore
)
final_dict[key] = value
return final_dict

View file

@ -1,20 +1,60 @@
import ast
import importlib
import inspect
import re
from typing import Dict, Optional
from langchain.agents.load_tools import (
_BASE_TOOLS,
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from docstring_parser import parse # type: ignore
from langflow.template.constants import FORCE_SHOW_FIELDS
from langflow.utils import constants
def build_template_from_function(name: str, type_to_loader_dict: Dict):
def build_template_from_parameters(
name: str, type_to_loader_dict: Dict, add_function: bool = False
):
# Retrieve the function that matches the provided name
func = None
for _, v in type_to_loader_dict.items():
if v.__name__ == name:
func = v
break
if func is None:
raise ValueError(f"{name} not found")
# Process parameters
parameters = func.__annotations__
variables = {}
for param_name, param_type in parameters.items():
if param_name in ["return", "kwargs"]:
continue
variables[param_name] = {
"type": param_type.__name__,
"default": parameters[param_name].__repr_args__()[0][1],
# Op
"placeholder": "",
}
# Get the base classes of the return type
return_type = parameters.get("return")
base_classes = get_base_classes(return_type) if return_type else []
if add_function:
base_classes.append("function")
# Get the function's docstring
docs = inspect.getdoc(func) or ""
return {
"template": format_dict(variables, name),
"description": docs["Description"], # type: ignore
"base_classes": base_classes,
}
def build_template_from_function(
name: str, type_to_loader_dict: Dict, add_function: bool = False
):
classes = [
item.__annotations__["return"].__name__ for item in type_to_loader_dict.values()
]
@ -27,52 +67,8 @@ def build_template_from_function(name: str, type_to_loader_dict: Dict):
if v.__annotations__["return"].__name__ == name:
_class = v.__annotations__["return"]
docs = get_class_doc(_class)
variables = {"_type": _type}
for class_field_items, value in _class.__fields__.items():
if class_field_items in ["callback_manager", "requests_wrapper"]:
continue
variables[class_field_items] = {}
for name_, value_ in value.__repr_args__():
if name_ == "default_factory":
try:
variables[class_field_items][
"default"
] = get_default_factory(
module=_class.__base__.__module__, function=value_
)
except Exception:
variables[class_field_items]["default"] = None
elif name_ not in ["name"]:
variables[class_field_items][name_] = value_
variables[class_field_items]["placeholder"] = (
docs["Attributes"][class_field_items]
if class_field_items in docs["Attributes"]
else ""
)
return {
"template": format_dict(variables, name),
"description": docs["Description"],
"base_classes": get_base_classes(_class),
}
def build_template_from_class(name: str, type_to_cls_dict: Dict):
classes = [item.__name__ for item in type_to_cls_dict.values()]
# Raise error if name is not in chains
if name not in classes:
raise ValueError(f"{name} not found.")
for _type, v in type_to_cls_dict.items():
if v.__name__ == name:
_class = v
# Get the docstring
docs = get_class_doc(_class)
docs = parse(_class.__doc__)
variables = {"_type": _type}
for class_field_items, value in _class.__fields__.items():
@ -93,30 +89,97 @@ def build_template_from_class(name: str, type_to_cls_dict: Dict):
variables[class_field_items][name_] = value_
variables[class_field_items]["placeholder"] = (
docs["Attributes"][class_field_items]
if class_field_items in docs["Attributes"]
docs.params[class_field_items]
if class_field_items in docs.params
else ""
)
# Adding function to base classes to allow
# the output to be a function
base_classes = get_base_classes(_class)
if add_function:
base_classes.append("function")
return {
"template": format_dict(variables, name),
"description": docs["Description"],
"base_classes": get_base_classes(_class),
"description": docs.short_description or "",
"base_classes": base_classes,
}
def build_template_from_class(
name: str, type_to_cls_dict: Dict, add_function: bool = False
):
classes = [item.__name__ for item in type_to_cls_dict.values()]
# Raise error if name is not in chains
if name not in classes:
raise ValueError(f"{name} not found.")
for _type, v in type_to_cls_dict.items():
if v.__name__ == name:
_class = v
# Get the docstring
docs = parse(_class.__doc__)
variables = {"_type": _type}
if "__fields__" in _class.__dict__:
for class_field_items, value in _class.__fields__.items():
if class_field_items in ["callback_manager"]:
continue
variables[class_field_items] = {}
for name_, value_ in value.__repr_args__():
if name_ == "default_factory":
try:
variables[class_field_items][
"default"
] = get_default_factory(
module=_class.__base__.__module__, function=value_
)
except Exception:
variables[class_field_items]["default"] = None
elif name_ not in ["name"]:
variables[class_field_items][name_] = value_
variables[class_field_items]["placeholder"] = (
docs.params[class_field_items]
if class_field_items in docs.params
else ""
)
base_classes = get_base_classes(_class)
# Adding function to base classes to allow
# the output to be a function
if add_function:
base_classes.append("function")
return {
"template": format_dict(variables, name),
"description": docs.short_description or "",
"base_classes": base_classes,
}
def get_base_classes(cls):
bases = cls.__bases__
if not bases:
return []
else:
"""Get the base classes of a class.
These are used to determine the output of the nodes.
"""
if bases := cls.__bases__:
result = []
for base in bases:
if any(type in base.__module__ for type in ["pydantic", "abc"]):
continue
result.append(base.__name__)
result.extend(get_base_classes(base))
return result
base_classes = get_base_classes(base)
# check if the base_classes are in the result
# if not, add them
for base_class in base_classes:
if base_class not in result:
result.append(base_class)
else:
result = [cls.__name__]
if not result:
result = [cls.__name__]
return list(set(result + [cls.__name__]))
def get_default_factory(module: str, function: str):
@ -128,114 +191,6 @@ def get_default_factory(module: str, function: str):
return None
def get_tools_dict(name: Optional[str] = None):
"""Get the tools dictionary."""
tools = {
**_BASE_TOOLS,
**_LLM_TOOLS, # type: ignore
**{k: v[0] for k, v in _EXTRA_LLM_TOOLS.items()}, # type: ignore
**{k: v[0] for k, v in _EXTRA_OPTIONAL_TOOLS.items()},
}
return tools[name] if name else tools
def get_tool_params(func, **kwargs):
# Parse the function code into an abstract syntax tree
tree = ast.parse(inspect.getsource(func))
# Iterate over the statements in the abstract syntax tree
for node in ast.walk(tree):
# Find the first return statement
if isinstance(node, ast.Return):
tool = node.value
if isinstance(tool, ast.Call):
if tool.func.id == "Tool":
if tool.keywords:
tool_params = {}
for keyword in tool.keywords:
if keyword.arg == "name":
tool_params["name"] = ast.literal_eval(keyword.value)
elif keyword.arg == "description":
tool_params["description"] = ast.literal_eval(
keyword.value
)
return tool_params
return {
"name": ast.literal_eval(tool.args[0]),
"description": ast.literal_eval(tool.args[2]),
}
else:
# get the class object from the return statement
try:
class_obj = eval(
compile(ast.Expression(tool), "<string>", "eval")
)
except Exception:
return None
return {
"name": getattr(class_obj, "name"),
"description": getattr(class_obj, "description"),
}
# Return None if no return statement was found
return None
def get_class_doc(class_name):
"""
Extracts information from the docstring of a given class.
Args:
class_name: the class to extract information from
Returns:
A dictionary containing the extracted information, with keys
for 'Description', 'Parameters', 'Attributes', and 'Returns'.
"""
# Template
data = {
"Description": "",
"Parameters": {},
"Attributes": {},
"Example": [],
"Returns": {},
}
# Get the class docstring
docstring = class_name.__doc__
if not docstring:
return data
# Parse the docstring to extract information
lines = docstring.split("\n")
current_section = "Description"
for line in lines:
line = line.strip()
if not line:
continue
if (
line.startswith(tuple(data.keys()))
and len(line.split()) == 1
and line.endswith(":")
):
current_section = line[:-1]
continue
if current_section in ["Description", "Example"]:
data[current_section] += line
else:
param, desc = line.split(":")
data[current_section][param.strip()] = desc.strip()
return data
def format_dict(d, name: Optional[str] = None):
"""
Formats a dictionary by removing certain keys and modifying the
@ -272,41 +227,58 @@ def format_dict(d, name: Optional[str] = None):
_type = _type.replace("Mapping", "dict")
# Change type from str to Tool
value["type"] = "Tool" if key == "allowed_tools" else _type
value["type"] = "Tool" if key in ["allowed_tools"] else _type
value["type"] = "int" if key in ["max_value_length"] else value["type"]
# Show or not field
value["show"] = bool(
(value["required"] and key not in ["input_variables"])
or key
in [
"allowed_tools",
"memory",
"prefix",
"examples",
"temperature",
"model_name",
]
or key in FORCE_SHOW_FIELDS
or "api_key" in key
)
# Add password field
value["password"] = any(
text in key for text in ["password", "token", "api", "key"]
text in key.lower() for text in ["password", "token", "api", "key"]
)
# Add multline
value["multiline"] = key in ["suffix", "prefix", "template", "examples"]
value["multiline"] = key in [
"suffix",
"prefix",
"template",
"examples",
"code",
"headers",
]
# Replace dict type with str
if "dict" in value["type"].lower():
value["type"] = "code"
if key == "dict_":
value["type"] = "file"
value["suffixes"] = [".json", ".yaml", ".yml"]
value["fileTypes"] = ["json", "yaml", "yml"]
# Replace default value with actual value
if "default" in value:
value["value"] = value["default"]
value.pop("default")
if key == "headers":
value[
"value"
] = """{'Authorization':
'Bearer <token>'}"""
# Add options to openai
if name == "OpenAI" and key == "model_name":
value["options"] = constants.OPENAI_MODELS
elif name == "OpenAIChat" and key == "model_name":
value["list"] = True
elif name == "ChatOpenAI" and key == "model_name":
value["options"] = constants.CHAT_OPENAI_MODELS
value["list"] = True
return d

View file

@ -0,0 +1,173 @@
import ast
import importlib
import types
from typing import Dict
def add_type_ignores():
if not hasattr(ast, "TypeIgnore"):
class TypeIgnore(ast.AST):
_fields = ()
ast.TypeIgnore = TypeIgnore
def validate_code(code):
# Initialize the errors dictionary
errors = {"imports": {"errors": []}, "function": {"errors": []}}
# Parse the code string into an abstract syntax tree (AST)
try:
tree = ast.parse(code)
except Exception as e:
errors["function"]["errors"].append(str(e))
return errors
# Add a dummy type_ignores field to the AST
add_type_ignores()
tree.type_ignores = []
# Evaluate the import statements
for node in tree.body:
if isinstance(node, ast.Import):
for alias in node.names:
try:
importlib.import_module(alias.name)
except ModuleNotFoundError as e:
errors["imports"]["errors"].append(str(e))
# Evaluate the function definition
for node in tree.body:
if isinstance(node, ast.FunctionDef):
code_obj = compile(
ast.Module(body=[node], type_ignores=[]), "<string>", "exec"
)
try:
exec(code_obj)
except Exception as e:
errors["function"]["errors"].append(str(e))
# Return the errors dictionary
return errors
def eval_function(function_string: str):
# Create an empty dictionary to serve as a separate namespace
namespace: Dict = {}
# Execute the code string in the new namespace
exec(function_string, namespace)
function_object = next(
(
obj
for name, obj in namespace.items()
if isinstance(obj, types.FunctionType)
and obj.__code__.co_filename == "<string>"
),
None,
)
if function_object is None:
raise ValueError("Function string does not contain a function")
return function_object
def execute_function(code, function_name, *args, **kwargs):
add_type_ignores()
module = ast.parse(code)
exec_globals = globals().copy()
for node in module.body:
if isinstance(node, ast.Import):
for alias in node.names:
try:
exec(
f"{alias.asname or alias.name} = importlib.import_module('{alias.name}')",
exec_globals,
locals(),
)
exec_globals[alias.asname or alias.name] = importlib.import_module(
alias.name
)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Module {alias.name} not found. Please install it and try again."
) from e
function_code = next(
node
for node in module.body
if isinstance(node, ast.FunctionDef) and node.name == function_name
)
function_code.parent = None
code_obj = compile(
ast.Module(body=[function_code], type_ignores=[]), "<string>", "exec"
)
try:
exec(code_obj, exec_globals, locals())
except Exception as exc:
raise ValueError("Function string does not contain a function") from exc
# Add the function to the exec_globals dictionary
exec_globals[function_name] = locals()[function_name]
return exec_globals[function_name](*args, **kwargs)
def create_function(code, function_name):
if not hasattr(ast, "TypeIgnore"):
class TypeIgnore(ast.AST):
_fields = ()
ast.TypeIgnore = TypeIgnore
module = ast.parse(code)
exec_globals = globals().copy()
for node in module.body:
if isinstance(node, ast.Import):
for alias in node.names:
try:
exec_globals[alias.asname or alias.name] = importlib.import_module(
alias.name
)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Module {alias.name} not found. Please install it and try again."
) from e
function_code = next(
node
for node in module.body
if isinstance(node, ast.FunctionDef) and node.name == function_name
)
function_code.parent = None
code_obj = compile(
ast.Module(body=[function_code], type_ignores=[]), "<string>", "exec"
)
try:
exec(code_obj, exec_globals, locals())
except Exception:
pass
exec_globals[function_name] = locals()[function_name]
# Return a function that imports necessary modules and calls the target function
def wrapped_function(*args, **kwargs):
for module_name, module in exec_globals.items():
if isinstance(module, type(importlib)):
globals()[module_name] = module
return exec_globals[function_name](*args, **kwargs)
return wrapped_function
def extract_function_name(code):
module = ast.parse(code)
for node in module.body:
if isinstance(node, ast.FunctionDef):
return node.name
raise ValueError("No function definition found in the code string")

View file

@ -14,6 +14,7 @@
"@heroicons/react": "^2.0.15",
"@mui/material": "^5.11.9",
"@tailwindcss/forms": "^0.5.3",
"@tailwindcss/line-clamp": "^0.4.4",
"@testing-library/jest-dom": "^5.16.5",
"@testing-library/react": "^13.4.0",
"@testing-library/user-event": "^13.5.0",
@ -21,9 +22,12 @@
"@types/node": "^16.18.12",
"@types/react": "^18.0.27",
"@types/react-dom": "^18.0.10",
"ace-builds": "^1.16.0",
"ansi-to-html": "^0.7.2",
"axios": "^1.3.2",
"lodash": "^4.17.21",
"react": "^18.2.0",
"react-ace": "^10.1.0",
"react-cookie": "^4.1.1",
"react-dom": "^18.2.0",
"react-error-boundary": "^4.0.2",
@ -3927,6 +3931,14 @@
"tailwindcss": ">=3.0.0 || >= 3.0.0-alpha.1"
}
},
"node_modules/@tailwindcss/line-clamp": {
"version": "0.4.4",
"resolved": "https://registry.npmjs.org/@tailwindcss/line-clamp/-/line-clamp-0.4.4.tgz",
"integrity": "sha512-5U6SY5z8N42VtrCrKlsTAA35gy2VSyYtHWCsg1H87NU1SXnEfekTVlrga9fzUDrrHcGi2Lb5KenUWb4lRQT5/g==",
"peerDependencies": {
"tailwindcss": ">=2.0.0 || >=3.0.0 || >=3.0.0-alpha.1"
}
},
"node_modules/@testing-library/dom": {
"version": "8.20.0",
"resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-8.20.0.tgz",
@ -5098,6 +5110,11 @@
"node": ">= 0.6"
}
},
"node_modules/ace-builds": {
"version": "1.16.0",
"resolved": "https://registry.npmjs.org/ace-builds/-/ace-builds-1.16.0.tgz",
"integrity": "sha512-EriMhoxdfhh0zKm7icSt8EXekODAOVsYh9fpnlru9ALwf0Iw7J7bpuqLjhi3QRxvVKR7P0teQdJwTvjVMcYHuw=="
},
"node_modules/acorn": {
"version": "8.8.2",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz",
@ -5308,6 +5325,20 @@
"node": ">=4"
}
},
"node_modules/ansi-to-html": {
"version": "0.7.2",
"resolved": "https://registry.npmjs.org/ansi-to-html/-/ansi-to-html-0.7.2.tgz",
"integrity": "sha512-v6MqmEpNlxF+POuyhKkidusCHWWkaLcGRURzivcU3I9tv7k4JVhFcnukrM5Rlk2rUywdZuzYAZ+kbZqWCnfN3g==",
"dependencies": {
"entities": "^2.2.0"
},
"bin": {
"ansi-to-html": "bin/ansi-to-html"
},
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/anymatch": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
@ -7205,6 +7236,11 @@
"resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
"integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw=="
},
"node_modules/diff-match-patch": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/diff-match-patch/-/diff-match-patch-1.0.5.tgz",
"integrity": "sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw=="
},
"node_modules/diff-sequences": {
"version": "27.5.1",
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-27.5.1.tgz",
@ -12409,6 +12445,16 @@
"resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz",
"integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow=="
},
"node_modules/lodash.get": {
"version": "4.4.2",
"resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz",
"integrity": "sha512-z+Uw/vLuy6gQe8cfaFWD7p0wVv8fJl3mbzXh33RS+0oW2wvUqiRXiQ69gLWSLpgB5/6sU+r6BlQR0MBILadqTQ=="
},
"node_modules/lodash.isequal": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz",
"integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ=="
},
"node_modules/lodash.memoize": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
@ -14803,6 +14849,22 @@
"node": ">=0.10.0"
}
},
"node_modules/react-ace": {
"version": "10.1.0",
"resolved": "https://registry.npmjs.org/react-ace/-/react-ace-10.1.0.tgz",
"integrity": "sha512-VkvUjZNhdYTuKOKQpMIZi7uzZZVgzCjM7cLYu6F64V0mejY8a2XTyPUIMszC6A4trbeMIHbK5fYFcT/wkP/8VA==",
"dependencies": {
"ace-builds": "^1.4.14",
"diff-match-patch": "^1.0.5",
"lodash.get": "^4.4.2",
"lodash.isequal": "^4.5.0",
"prop-types": "^15.7.2"
},
"peerDependencies": {
"react": "^0.13.0 || ^0.14.0 || ^15.0.1 || ^16.0.0 || ^17.0.0 || ^18.0.0",
"react-dom": "^0.13.0 || ^0.14.0 || ^15.0.1 || ^16.0.0 || ^17.0.0 || ^18.0.0"
}
},
"node_modules/react-app-polyfill": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/react-app-polyfill/-/react-app-polyfill-3.0.0.tgz",

View file

@ -9,6 +9,7 @@
"@heroicons/react": "^2.0.15",
"@mui/material": "^5.11.9",
"@tailwindcss/forms": "^0.5.3",
"@tailwindcss/line-clamp": "^0.4.4",
"@testing-library/jest-dom": "^5.16.5",
"@testing-library/react": "^13.4.0",
"@testing-library/user-event": "^13.5.0",
@ -16,9 +17,12 @@
"@types/node": "^16.18.12",
"@types/react": "^18.0.27",
"@types/react-dom": "^18.0.10",
"ace-builds": "^1.16.0",
"ansi-to-html": "^0.7.2",
"axios": "^1.3.2",
"lodash": "^4.17.21",
"react": "^18.2.0",
"react-ace": "^10.1.0",
"react-cookie": "^4.1.1",
"react-dom": "^18.2.0",
"react-error-boundary": "^4.0.2",

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 KiB

View file

@ -4,7 +4,8 @@
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LangFLow</title>
<link rel="icon" href="%PUBLIC_URL%/favicon.ico" />
<title>LangFlow</title>
</head>
<body id='body' style="width: 100%; height:100%">
<noscript>You need to enable JavaScript to run this app.</noscript>

View file

@ -40,3 +40,8 @@
transform: rotate(360deg);
}
}
@font-face{
font-family: text-security-disc;
src: url("assets/text-security-disc.woff") format("woff");
}

View file

@ -10,6 +10,11 @@ import { typesContext } from "../../../../contexts/typesContext";
import { ParameterComponentType } from "../../../../types/components";
import FloatComponent from "../../../../components/floatComponent";
import Dropdown from "../../../../components/dropdownComponent";
import CodeAreaComponent from "../../../../components/codeAreaComponent";
import InputFileComponent from "../../../../components/inputFileComponent";
import { TabsContext } from "../../../../contexts/tabsContext";
import IntComponent from "../../../../components/intComponent";
import PromptAreaComponent from "../../../../components/promptComponent";
export default function ParameterComponent({
left,
@ -42,6 +47,7 @@ export default function ParameterComponent({
const { reactFlowInstance } = useContext(typesContext);
let disabled =
reactFlowInstance?.getEdges().some((e) => e.targetHandle === id) ?? false;
const { save } = useContext(TabsContext);
return (
<div
@ -49,11 +55,18 @@ export default function ParameterComponent({
className="w-full flex flex-wrap justify-between items-center bg-gray-50 dark:bg-gray-800 dark:text-white mt-1 px-5 py-2"
>
<>
<div className="text-sm truncate">
<div className={"text-sm truncate w-full " + (left ? "" : "text-end")}>
{title}
<span className="text-red-600">{required ? " *" : ""}</span>
</div>
{left && (type === "str" || type === "bool" || type === "float") ? (
{left &&
(type === "str" ||
type === "bool" ||
type === "float" ||
type === "code" ||
type === "prompt" ||
type === "file" ||
type === "int") ? (
<></>
) : (
<Tooltip title={tooltipTitle + (required ? " (required)" : "")}>
@ -91,6 +104,7 @@ export default function ParameterComponent({
}
onChange={(t: string[]) => {
data.node.template[name].value = t;
save();
}}
/>
) : data.node.template[name].multiline ? (
@ -99,15 +113,17 @@ export default function ParameterComponent({
value={data.node.template[name].value ?? ""}
onChange={(t: string) => {
data.node.template[name].value = t;
save();
}}
/>
) : (
<InputComponent
disabled={disabled}
password={data.node.template[name].password ?? true}
password={data.node.template[name].password ?? false}
value={data.node.template[name].value ?? ""}
onChange={(t) => {
data.node.template[name].value = t;
save();
}}
/>
)}
@ -120,6 +136,7 @@ export default function ParameterComponent({
setEnabled={(t) => {
data.node.template[name].value = t;
setEnabled(t);
save();
}}
/>
</div>
@ -129,6 +146,7 @@ export default function ParameterComponent({
value={data.node.template[name].value ?? ""}
onChange={(t) => {
data.node.template[name].value = t;
save();
}}
/>
) : left === true &&
@ -136,12 +154,51 @@ export default function ParameterComponent({
data.node.template[name].options ? (
<Dropdown
options={data.node.template[name].options}
onSelect={(newValue) => data.node.template[name].value=newValue}
value={data.node.template[name].value??"chose an option"}
onSelect={(newValue) => (data.node.template[name].value = newValue)}
value={data.node.template[name].value ?? "Choose an option"}
></Dropdown>
) : (
<></>
)}
) : left === true && type === "code" ? (
<CodeAreaComponent
disabled={disabled}
value={data.node.template[name].value ?? ""}
onChange={(t: string) => {
data.node.template[name].value = t;
save();
}}
/>
) : left === true && type === "file" ? (
<InputFileComponent
disabled={disabled}
value={data.node.template[name].value ?? ""}
onChange={(t: string) => {
data.node.template[name].value = t;
}}
fileTypes={data.node.template[name].fileTypes}
suffixes={data.node.template[name].suffixes}
onFileChange={(t: string) => {
data.node.template[name].content = t;
save();
}}
></InputFileComponent>
) : left === true && type === "int" ? (
<IntComponent
disabled={disabled}
value={data.node.template[name].value ?? ""}
onChange={(t) => {
data.node.template[name].value = t;
save();
}}
/>
) : left === true && type === "prompt" ? (
<PromptAreaComponent
disabled={disabled}
value={data.node.template[name].value ?? ""}
onChange={(t: string) => {
data.node.template[name].value = t;
save();
}}
/>
):(<></>)}
</>
</div>
);

View file

@ -23,7 +23,6 @@ export default function GenericNode({
const { types, deleteNode } = useContext(typesContext);
const Icon = nodeIcons[types[data.type]];
if (!Icon) {
console.log(data);
if (showError.current) {
setErrorData({
title: data.type
@ -32,9 +31,9 @@ export default function GenericNode({
});
showError.current = false;
}
deleteNode(data.id);
return;
}
return (
<div
className={classNames(
@ -72,8 +71,18 @@ export default function GenericNode({
.map((t: string, idx) => (
<div key={idx}>
{idx === 0 ? (
<div className="px-5 py-2 mt-2 dark:text-white text-center">
Inputs:
<div
className={classNames(
"px-5 py-2 mt-2 dark:text-white text-center",
Object.keys(data.node.template).filter(
(key) =>
!key.startsWith("_") && data.node.template[key].show
).length === 0
? "hidden"
: ""
)}
>
Inputs
</div>
) : (
<></>
@ -85,7 +94,13 @@ export default function GenericNode({
nodeColors[types[data.node.template[t].type]] ??
nodeColors.unknown
}
title={snakeToNormalCase(t)}
title={
data.node.template[t].display_name
? data.node.template[t].display_name
: data.node.template[t].name
? snakeToNormalCase(data.node.template[t].name)
: snakeToNormalCase(t)
}
name={t}
tooltipTitle={
"Type: " +
@ -103,7 +118,7 @@ export default function GenericNode({
</div>
))}
<div className="px-5 py-2 mt-2 dark:text-white text-center">
Output:
Output
</div>
<ParameterComponent
data={data}
@ -111,7 +126,7 @@ export default function GenericNode({
title={data.type}
tooltipTitle={`Type: ${data.node.base_classes.join(" | ")}`}
id={[data.type, data.id, ...data.node.base_classes].join("|")}
type={"str"}
type={data.node.base_classes.join("|")}
left={false}
/>
</>

Binary file not shown.

View file

@ -1,6 +1,7 @@
import { ReactElement } from "react";
import { LightTooltip } from "../LightTooltipComponent";
import { TooltipComponentType } from "../../types/components";
export default function Tooltip({ children, title }:{children:ReactElement,title:string}) {
return <LightTooltip title={title} arrow>{children}</LightTooltip>;
export default function Tooltip({ children, title,placement }:TooltipComponentType) {
return <LightTooltip placement={placement} title={title} arrow>{children}</LightTooltip>;
}

View file

@ -2,6 +2,8 @@ import { ChatBubbleLeftEllipsisIcon, ChatBubbleOvalLeftEllipsisIcon, PlusSmallIc
import { useState } from "react";
import { ChatMessageType } from "../../../types/chat";
import { nodeColors } from "../../../utils";
var Convert = require('ansi-to-html');
var convert = new Convert({newline:true});
export default function ChatMessage({ chat }: { chat: ChatMessageType }) {
const [hidden, setHidden] = useState(true);
@ -27,7 +29,7 @@ export default function ChatMessage({ chat }: { chat: ChatMessageType }) {
style={{ backgroundColor: nodeColors["thought"] }}
className=" text-start inline-block w-full pb-3 pt-3 px-5 cursor-pointer"
dangerouslySetInnerHTML={{
__html: chat.thought.replace(/\n/g, "<br />"),
__html: convert.toHtml(chat.thought)
}}
></div>
)}

View file

@ -5,34 +5,42 @@ import {
PaperAirplaneIcon,
XMarkIcon,
} from "@heroicons/react/24/outline";
import { MouseEventHandler, useContext, useEffect, useRef, useState } from "react";
import { sendAll } from "../../controllers/NodesServices";
import {
MouseEventHandler,
useContext,
useEffect,
useRef,
useState,
} from "react";
import { sendAll } from "../../controllers/API";
import { alertContext } from "../../contexts/alertContext";
import { classNames, nodeColors } from "../../utils";
import { classNames, nodeColors, snakeToNormalCase } from "../../utils";
import { TabsContext } from "../../contexts/tabsContext";
import { ChatType } from "../../types/chat";
import ChatMessage from "./chatMessage";
import { NodeType } from "../../types/flow";
const _ = require("lodash");
export default function Chat({ flow, reactFlowInstance }: ChatType) {
const { updateFlow,lockChat,setLockChat,flows,tabIndex } = useContext(TabsContext);
const { updateFlow, lockChat, setLockChat, flows, tabIndex } =
useContext(TabsContext);
const [saveChat, setSaveChat] = useState(false);
const [open, setOpen] = useState(true);
const [chatValue, setChatValue] = useState("");
const [chatHistory, setChatHistory] = useState(flow.chat);
const { setErrorData } = useContext(alertContext);
const { setErrorData, setNoticeData } = useContext(alertContext);
const addChatHistory = (
message: string,
isSend: boolean,
thought?: string,
thought?: string
) => {
let tabsChange = false;
setChatHistory((old) => {
let newChat = _.cloneDeep(old);
if(JSON.stringify(flow.chat) !==JSON.stringify(old)){
tabsChange = true
return old
if (JSON.stringify(flow.chat) !== JSON.stringify(old)) {
tabsChange = true;
return old;
}
if (thought) {
newChat.push({ message, isSend, thought });
@ -41,12 +49,17 @@ export default function Chat({ flow, reactFlowInstance }: ChatType) {
}
return newChat;
});
if(tabsChange){
if(thought){
updateFlow({..._.cloneDeep(flow),chat:[...flow.chat,{isSend,message,thought}]})
}
else{
updateFlow({..._.cloneDeep(flow),chat:[...flow.chat,{isSend,message}]})
if (tabsChange) {
if (thought) {
updateFlow({
..._.cloneDeep(flow),
chat: [...flow.chat, { isSend, message, thought }],
});
} else {
updateFlow({
..._.cloneDeep(flow),
chat: [...flow.chat, { isSend, message }],
});
}
}
setSaveChat((chat) => !chat);
@ -61,55 +74,93 @@ export default function Chat({ flow, reactFlowInstance }: ChatType) {
useEffect(() => {
if (ref.current) ref.current.scrollIntoView({ behavior: "smooth" });
}, [chatHistory]);
function validateNodes() {
if (
reactFlowInstance
.getNodes()
.some(
(n) =>
n.data.node &&
Object.keys(n.data.node.template).some(
(t: any) =>
n.data.node.template[t].required &&
n.data.node.template[t].value === "" &&
n.data.node.template[t].required &&
!reactFlowInstance
.getEdges()
.some(
(e) =>
e.sourceHandle.split("|")[1] === t &&
e.sourceHandle.split("|")[2] === n.id
)
)
)
) {
return false;
function validateNode(n: NodeType): Array<string> {
if (!n.data?.node?.template || !Object.keys(n.data.node.template)) {
setNoticeData({
title:
"We've noticed a potential issue with a node in the flow. Please review it and, if necessary, submit a bug report with your exported flow file. Thank you for your help!",
});
return [];
}
return true;
const {
type,
node: { template },
} = n.data;
return Object.keys(template).reduce(
(errors: Array<string>, t) =>
errors.concat(
(template[t].required && template[t].show) &&
(!template[t].value || template[t].value === "") &&
!reactFlowInstance
.getEdges()
.some(
(e) =>
e.targetHandle.split("|")[1] === t &&
e.targetHandle.split("|")[2] === n.id
)
? [
`${type} is missing ${
template.display_name
? template.display_name
: snakeToNormalCase(template[t].name)
}.`,
]
: []
),
[] as string[]
);
}
function validateNodes() {
return reactFlowInstance
.getNodes()
.flatMap((n: NodeType) => validateNode(n));
}
const ref = useRef(null);
function sendMessage() {
if (chatValue !== "") {
if (validateNodes()) {
let nodeValidationErrors = validateNodes();
if (nodeValidationErrors.length === 0) {
setLockChat(true);
let message = chatValue;
setChatValue("");
addChatHistory(message, true);
sendAll({ ...reactFlowInstance.toObject(), message, chatHistory,name:flow.name,description:flow.description})
sendAll({
...reactFlowInstance.toObject(),
message,
chatHistory,
name: flow.name,
description: flow.description,
})
.then((r) => {
addChatHistory(r.data.result, false, r.data.thought);
setLockChat(false);
})
.catch((error) => {
setErrorData({ title: error.message ?? "unknow error" });
setErrorData({
title: error.message ?? "Unknown Error",
list: [error.response.data.detail],
});
setLockChat(false);
let lastMessage;
setChatHistory((chatHistory) => {
let newChat = chatHistory;
lastMessage = newChat.pop().message;
return newChat;
});
setChatValue(lastMessage);
});
} else {
setErrorData({
title: "Error sending message",
list: [ "Oops! Looks like you missed some required information. Please fill in all the required fields before continuing."],
title: "Oops! Looks like you missed some required information:",
list: nodeValidationErrors,
});
}
} else {
@ -120,8 +171,8 @@ export default function Chat({ flow, reactFlowInstance }: ChatType) {
}
}
function clearChat() {
setChatHistory([])
updateFlow({ ..._.cloneDeep(flow), chat: []});
setChatHistory([]);
updateFlow({ ..._.cloneDeep(flow), chat: [] });
}
return (
@ -151,9 +202,10 @@ export default function Chat({ flow, reactFlowInstance }: ChatType) {
/>
Chat
</div>
<button className="hover:text-blue-500"
<button
className="hover:text-blue-500 dark:text-white"
onClick={(e) => {
e.stopPropagation()
e.stopPropagation();
clearChat();
}}
>

Some files were not shown because too many files have changed in this diff Show more