Release Langflow 1.0 (#2303)

This PR releases Langflow 1.0 in the main branch and updates the
documentation.
This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-06-24 06:20:50 -07:00 committed by GitHub
commit 900e3f8006
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
1544 changed files with 114276 additions and 42486 deletions

View file

@ -3,7 +3,7 @@
{
"name": "Langflow Dev Container",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
"image": "mcr.microsoft.com/devcontainers/python:1-3.10-bullseye",
"image": "mcr.microsoft.com/devcontainers/python:3.10",
// Features to add to the dev container. More info: https://containers.dev/features.
"features": {

View file

@ -1,7 +0,0 @@
.venv/
**/aws
# node_modules
**/node_modules/
dist/
**/build/
src/backend/langflow/frontend

View file

@ -4,6 +4,19 @@
# Do not commit .env file to git
# Do not change .env.example file
# Config directory
# Directory where files, logs and database will be stored
# Example: LANGFLOW_CONFIG_DIR=~/.langflow
LANGFLOW_CONFIG_DIR=
# Save database in the config directory
# Values: true, false
# If false, the database will be saved in Langflow's root directory
# This means that the database will be deleted when Langflow is uninstalled
# and that the database will not be shared between different virtual environments
# Example: LANGFLOW_SAVE_DB_IN_CONFIG_DIR=true
LANGFLOW_SAVE_DB_IN_CONFIG_DIR=
# Database URL
# Postgres example: LANGFLOW_DATABASE_URL=postgresql://postgres:postgres@localhost:5432/langflow
# SQLite example:
@ -56,6 +69,12 @@ LANGFLOW_REMOVE_API_KEYS=
# LANGFLOW_REDIS_CACHE_EXPIRE (default: 3600)
LANGFLOW_CACHE_TYPE=
# Set AUTO_LOGIN to false if you want to disable auto login
# and use the login form to login. LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD
# must be set if AUTO_LOGIN is set to false
# Values: true, false
LANGFLOW_AUTO_LOGIN=
# Superuser username
# Example: LANGFLOW_SUPERUSER=admin
LANGFLOW_SUPERUSER=
@ -64,6 +83,10 @@ LANGFLOW_SUPERUSER=
# Example: LANGFLOW_SUPERUSER_PASSWORD=123456
LANGFLOW_SUPERUSER_PASSWORD=
# Should store environment variables in the database
# Values: true, false
LANGFLOW_STORE_ENVIRONMENT_VARIABLES=
# STORE_URL
# Example: LANGFLOW_STORE_URL=https://api.langflow.store
# LANGFLOW_STORE_URL=
@ -74,4 +97,8 @@ LANGFLOW_SUPERUSER_PASSWORD=
# LIKE_WEBHOOK_URL
#
# LANGFLOW_LIKE_WEBHOOK_URL=
# LANGFLOW_LIKE_WEBHOOK_URL=
# Value must finish with slash /
#BACKEND_URL=http://localhost:7860/
BACKEND_URL=

90
.eslintrc.json Normal file
View file

@ -0,0 +1,90 @@
{
"extends": [
"eslint:recommended",
"plugin:react/recommended",
"plugin:prettier/recommended"
],
"plugins": [
"react",
"import-helpers",
"prettier"
],
"parser": "@typescript-eslint/parser",
"parserOptions": {
"project": [
"./tsconfig.node.json",
"./tsconfig.json"
],
"extraFileExtensions:": [
".mdx"
],
"extensions:": [
".mdx"
]
},
"env": {
"browser": true,
"es2021": true
},
"settings": {
"react": {
"version": "detect"
}
},
"rules": {
"no-console": "warn",
"no-self-assign": "warn",
"no-self-compare": "warn",
"complexity": [
"error",
{
"max": 15
}
],
"indent": [
"error",
2,
{
"SwitchCase": 1
}
],
"no-dupe-keys": "error",
"no-invalid-regexp": "error",
"no-undef": "error",
"no-return-assign": "error",
"no-redeclare": "error",
"no-empty": "error",
"no-await-in-loop": "error",
"react/react-in-jsx-scope": 0,
"node/exports-style": [
"error",
"module.exports"
],
"node/file-extension-in-import": [
"error",
"always"
],
"node/prefer-global/buffer": [
"error",
"always"
],
"node/prefer-global/console": [
"error",
"always"
],
"node/prefer-global/process": [
"error",
"always"
],
"node/prefer-global/url-search-params": [
"error",
"always"
],
"node/prefer-global/url": [
"error",
"always"
],
"node/prefer-promises/dns": "error",
"node/prefer-promises/fs": "error"
}
}

1
.gitattributes vendored
View file

@ -32,3 +32,4 @@ Dockerfile text
*.mp4 binary
*.svg binary
*.csv binary

View file

@ -0,0 +1,99 @@
# An action for setting up poetry install with caching.
# Using a custom action since the default action does not
# take poetry install groups into account.
# Action code from:
# https://github.com/actions/setup-python/issues/505#issuecomment-1273013236
# Copy of https://github.com/langchain-ai/langchain/blob/2f8dd1a1619f25daa4737df4d378b1acd6ff83c4/.github/actions/poetry_setup/action.yml
name: poetry-install-with-caching
description: Poetry install with support for caching of dependency groups.
inputs:
python-version:
description: Python version, supporting MAJOR.MINOR only
required: true
poetry-version:
description: Poetry version
required: true
cache-key:
description: Cache key to use for manual handling of caching
required: true
working-directory:
description: Directory whose poetry.lock file should be cached
required: true
runs:
using: composite
steps:
- uses: actions/setup-python@v5
name: Setup python ${{ inputs.python-version }}
id: setup-python
with:
python-version: ${{ inputs.python-version }}
- uses: actions/cache@v4
id: cache-bin-poetry
name: Cache Poetry binary - Python ${{ inputs.python-version }}
env:
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "1"
with:
path: |
/opt/pipx/venvs/poetry
# This step caches the poetry installation, so make sure it's keyed on the poetry version as well.
key: bin-poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-${{ inputs.poetry-version }}
- name: Refresh shell hashtable and fixup softlinks
if: steps.cache-bin-poetry.outputs.cache-hit == 'true'
shell: bash
env:
POETRY_VERSION: ${{ inputs.poetry-version }}
PYTHON_VERSION: ${{ inputs.python-version }}
run: |
set -eux
# Refresh the shell hashtable, to ensure correct `which` output.
hash -r
# `actions/cache@v3` doesn't always seem able to correctly unpack softlinks.
# Delete and recreate the softlinks pipx expects to have.
rm /opt/pipx/venvs/poetry/bin/python
cd /opt/pipx/venvs/poetry/bin
ln -s "$(which "python$PYTHON_VERSION")" python
chmod +x python
cd /opt/pipx_bin/
ln -s /opt/pipx/venvs/poetry/bin/poetry poetry
chmod +x poetry
# Ensure everything got set up correctly.
/opt/pipx/venvs/poetry/bin/python --version
/opt/pipx_bin/poetry --version
- name: Install poetry
if: steps.cache-bin-poetry.outputs.cache-hit != 'true'
shell: bash
env:
POETRY_VERSION: ${{ inputs.poetry-version || env.POETRY_VERSION }}
PYTHON_VERSION: ${{ inputs.python-version }}
# Install poetry using the python version installed by setup-python step.
run: |
pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose
pipx ensurepath
# Ensure the poetry binary is available in the PATH.
# Test that the poetry binary is available.
poetry --version
- name: Restore pip and poetry cached dependencies
uses: actions/cache@v4
env:
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "4"
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
with:
path: |
~/.cache/pip
~/.cache/pypoetry/virtualenvs
~/.cache/pypoetry/cache
~/.cache/pypoetry/artifacts
${{ env.WORKDIR }}/.venv
key: py-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles(format('{0}/**/poetry.lock', env.WORKDIR)) }}

View file

@ -1,44 +0,0 @@
name: "Async API tests"
on:
push:
branches:
- dev
pull_request:
branches:
- dev
- main
jobs:
build-and-test:
runs-on: ubuntu-latest
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Set up Docker
run: docker --version && docker-compose --version
- name: "Create env file"
working-directory: ./deploy
run: |
echo "${{ secrets.ENV_FILE }}" > .env
- name: Build and start services
working-directory: ./deploy
run: docker compose up --exit-code-from tests tests result_backend broker celeryworker db --build
continue-on-error: true
# - name: Stop services
# run: docker compose down

64
.github/workflows/create-release.yml vendored Normal file
View file

@ -0,0 +1,64 @@
name: Create Release
on:
workflow_dispatch:
inputs:
version:
description: "Version to release"
required: true
type: string
release_type:
description: "Type of release (base or main)"
required: true
type: choice
options:
- base
- main
env:
POETRY_VERSION: "1.8.2"
jobs:
release:
name: Build Langflow
runs-on: ubuntu-latest
outputs:
version: ${{ steps.check-version.outputs.version }}
steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==${{ env.POETRY_VERSION }}
- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: "3.12"
cache: "poetry"
- name: Build project for distribution
run: |
if [ "${{ inputs.release_type }}" == "base" ]; then
make build base=true
else
make build main=true
fi
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: dist${{ inputs.release_type }}
path: ${{ inputs.release_type == 'base' && 'src/backend/base/dist' || 'dist' }}
create_release:
name: Create Release Job
runs-on: ubuntu-latest
needs: release
steps:
- uses: actions/download-artifact@v4
with:
name: dist${{ inputs.release_type }}
path: dist
- name: Create Release Notes
uses: ncipollo/release-action@v1
with:
artifacts: "dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: true
prerelease: true
tag: v${{ inputs.version }}
commit: dev

View file

@ -27,7 +27,7 @@ jobs:
# Popular action to deploy to GitHub Pages:
# Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
uses: peaceiris/actions-gh-pages@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
# Build output to publish to the `gh-pages` branch:

140
.github/workflows/docker-build.yml vendored Normal file
View file

@ -0,0 +1,140 @@
name: Docker Build and Push
on:
workflow_call:
inputs:
version:
required: true
type: string
release_type:
required: true
type: string
pre_release:
required: false
type: boolean
default: true
workflow_dispatch:
inputs:
version:
required: true
type: string
release_type:
required: true
type: choice
options:
- base
- main
pre_release:
required: false
type: boolean
default: true
env:
POETRY_VERSION: "1.8.2"
TEST_TAG: "langflowai/langflow:test"
jobs:
setup:
runs-on: ubuntu-latest
outputs:
tags: ${{ steps.set-vars.outputs.tags }}
file: ${{ steps.set-vars.outputs.file }}
steps:
- uses: actions/checkout@v4
- name: Set Dockerfile and Tags
id: set-vars
run: |
if [[ "${{ inputs.release_type }}" == "base" ]]; then
echo "tags=langflowai/langflow:base-${{ inputs.version }},langflowai/langflow:base-latest" >> $GITHUB_OUTPUT
echo "file=./docker/build_and_push_base.Dockerfile" >> $GITHUB_OUTPUT
else
if [[ "${{ inputs.pre_release }}" == "true" ]]; then
echo "tags=langflowai/langflow:${{ inputs.version }}" >> $GITHUB_OUTPUT
else
echo "tags=langflowai/langflow:${{ inputs.version }},langflowai/langflow:latest" >> $GITHUB_OUTPUT
fi
echo "file=./docker/build_and_push.Dockerfile" >> $GITHUB_OUTPUT
fi
build:
runs-on: ubuntu-latest
needs: setup
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and Push Docker Image
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ${{ needs.setup.outputs.file }}
tags: ${{ needs.setup.outputs.tags }}
# provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines)
provenance: false
build_components:
if: ${{ inputs.release_type == 'main' }}
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
component: [backend, frontend]
include:
- component: backend
dockerfile: ./docker/build_and_push_backend.Dockerfile
tags: langflowai/langflow-backend:${{ inputs.version }},langflowai/langflow-backend:1.0-alpha
- component: frontend
dockerfile: ./docker/frontend/build_and_push_frontend.Dockerfile
tags: langflowai/langflow-frontend:${{ inputs.version }},langflowai/langflow-frontend:1.0-alpha
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Wait for Docker Hub to propagate (for backend)
run: sleep 120
- name: Build and push ${{ matrix.component }}
uses: docker/build-push-action@v5
with:
context: .
push: true
build-args: |
LANGFLOW_IMAGE=langflowai/langflow:${{ inputs.version }}
file: ${{ matrix.dockerfile }}
tags: ${{ matrix.tags }}
# provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines)
provenance: false
restart-space:
name: Restart HuggingFace Spaces
if: ${{ inputs.release_type == 'main' }}
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
python-version:
- "3.12"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_caching"
with:
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
- name: Install Python dependencies
run: |
poetry env use ${{ matrix.python-version }}
poetry install
- name: Restart HuggingFace Spaces Build
run: |
poetry run python ./scripts/factory_restart_space.py --space "Langflow/Langflow-Preview" --token ${{ secrets.HUGGINGFACE_API_TOKEN }}

64
.github/workflows/docker_test.yml vendored Normal file
View file

@ -0,0 +1,64 @@
name: Test Docker images
on:
push:
branches: [main]
paths:
- "docker/**"
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
- ".github/workflows/docker_test.yml"
pull_request:
branches: [dev]
paths:
- "docker/**"
- "poetry.lock"
- "pyproject.toml"
- "src/**"
- ".github/workflows/docker_test.yml"
env:
POETRY_VERSION: "1.8.2"
jobs:
test-docker:
runs-on: ubuntu-latest
name: Test docker images
steps:
- uses: actions/checkout@v4
- name: Build image
run: |
docker build -t langflowai/langflow:latest-dev \
-f docker/build_and_push.Dockerfile \
.
- name: Test image
run: |
expected_version=$(cat pyproject.toml | grep version | head -n 1 | cut -d '"' -f 2)
version=$(docker run --rm --entrypoint bash langflowai/langflow:latest-dev -c 'python -c "from langflow.version import __version__ as langflow_version; print(langflow_version)"')
if [ "$expected_version" != "$version" ]; then
echo "Expected version: $expected_version"
echo "Actual version: $version"
exit 1
fi
- name: Build backend image
run: |
docker build -t langflowai/langflow-backend:latest-dev \
--build-arg LANGFLOW_IMAGE=langflowai/langflow:latest-dev \
-f docker/build_and_push_backend.Dockerfile \
.
- name: Test backend image
run: |
expected_version=$(cat pyproject.toml | grep version | head -n 1 | cut -d '"' -f 2)
version=$(docker run --rm --entrypoint bash langflowai/langflow-backend:latest-dev -c 'python -c "from langflow.version import __version__ as langflow_version; print(langflow_version)"')
if [ "$expected_version" != "$version" ]; then
echo "Expected version: $expected_version"
echo "Actual version: $version"
exit 1
fi
- name: Build frontend image
run: |
docker build -t langflowai/langflow-frontend:latest-dev \
-f docker/frontend/build_and_push_frontend.Dockerfile \
.

52
.github/workflows/lint-js.yml vendored Normal file
View file

@ -0,0 +1,52 @@
name: Lint Frontend
on:
pull_request:
paths:
- "src/frontend/**"
env:
NODE_VERSION: "21"
jobs:
run-linters:
name: Run Prettier
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
id: setup-node
with:
node-version: ${{ env.NODE_VERSION }}
- name: Cache Node.js dependencies
uses: actions/cache@v4
id: npm-cache
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('src/frontend/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install Node.js dependencies
run: |
cd src/frontend
npm install
if: ${{ steps.setup-node.outputs.cache-hit != 'true' }}
- name: Run Prettier
run: |
cd src/frontend
npm run format
- name: Commit changes
uses: stefanzweifel/git-auto-commit-action@v5
with:
commit_message: Apply Prettier formatting
branch: ${{ github.head_ref }}

37
.github/workflows/lint-py.yml vendored Normal file
View file

@ -0,0 +1,37 @@
name: Lint Python
on:
pull_request:
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
- "tests/**"
env:
POETRY_VERSION: "1.8.2"
jobs:
lint:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.12"
- "3.11"
- "3.10"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_caching"
with:
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
- name: Install Python dependencies
run: |
poetry env use ${{ matrix.python-version }}
poetry install
make lint
env:
GITHUB_TOKEN: ${{ secrets.github_token }}

View file

@ -1,43 +0,0 @@
name: lint
on:
push:
branches: [main]
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
pull_request:
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
env:
POETRY_VERSION: "1.7.0"
jobs:
lint:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.9"
- "3.10"
- "3.11"
steps:
- uses: actions/checkout@v4
- name: Install poetry
run: |
pipx install poetry==$POETRY_VERSION
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: poetry
- name: Install dependencies
run: |
poetry install
- name: Analysing the code with our lint
run: |
make lint

14
.github/workflows/matchers/ruff.json vendored Normal file
View file

@ -0,0 +1,14 @@
{
"problemMatcher": [
{
"owner": "ruff",
"pattern": [
{
"regexp": "^(Would reformat): (.+)$",
"message": 1,
"file": 2
}
]
}
]
}

26
.github/workflows/pr-checker.yml vendored Normal file
View file

@ -0,0 +1,26 @@
name: PR checker
on:
pull_request:
types: [opened, edited, labeled, unlabeled, synchronize]
jobs:
pr-checker:
name: Check PR description
runs-on: [ubuntu-latest]
steps:
- name: Run PR title check
uses: transferwise/actions-pr-checker@v3
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_TITLE_CONTAINS_PATTERN: ".{15,}" # Require at least 10 characters in the title
PR_COMMENT: |
Please provide a more meaningful PR title with at least 15 characters.
- name: Run PR description check
uses: transferwise/actions-pr-checker@v3
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_CONTAINS_PATTERN: ".{25,}" # Require at least 10 characters in the description
PR_COMMENT: |
Please provide a more meaningful PR description with at least 25 characters.

80
.github/workflows/pre-release-base.yml vendored Normal file
View file

@ -0,0 +1,80 @@
name: Langflow Base Pre-release
run-name: Langflow Base Pre-release by @${{ github.actor }}
on:
workflow_dispatch:
inputs:
release_package:
description: "Release package"
required: true
type: boolean
default: false
env:
POETRY_VERSION: "1.8.2"
jobs:
release:
name: Release Langflow Base
if: inputs.release_package == true
runs-on: ubuntu-latest
outputs:
version: ${{ steps.check-version.outputs.version }}
steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==${{ env.POETRY_VERSION }}
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
cache: "poetry"
- name: Check Version
id: check-version
# In this step, we should check the version of the package
# and see if it is a version that is already released
# echo version=$(cd src/backend/base && poetry version --short) >> $GITHUB_OUTPUT
# cd src/backend/base && poetry version --short should
# be different than the last release version in pypi
# which we can get from curl -s "https://pypi.org/pypi/langflow/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1
run: |
version=$(cd src/backend/base && poetry version --short)
last_released_version=$(curl -s "https://pypi.org/pypi/langflow-base/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
if [ "$version" = "$last_released_version" ]; then
echo "Version $version is already released. Skipping release."
exit 1
else
echo version=$version >> $GITHUB_OUTPUT
fi
- name: Build project for distribution
run: make build base=true
- name: Publish to PyPI
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
make publish base=true
docker_build:
name: Build Docker Image
runs-on: ubuntu-latest
needs: release
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
id: qemu
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./docker/build_and_push_base.Dockerfile
tags: |
langflowai/langflow:base-${{ needs.release.outputs.version }}
# provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines)
provenance: false

View file

@ -0,0 +1,133 @@
name: Langflow Pre-release
run-name: Langflow Pre-release by @${{ github.actor }}
on:
workflow_dispatch:
inputs:
release_package:
description: "Release package"
required: true
type: boolean
default: false
workflow_run:
workflows: ["pre-release-base"]
types: [completed]
branches: [dev]
env:
POETRY_VERSION: "1.8.2"
jobs:
release:
name: Release Langflow
if: inputs.release_package == true
runs-on: ubuntu-latest
outputs:
version: ${{ steps.check-version.outputs.version }}
steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==${{ env.POETRY_VERSION }}
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
cache: "poetry"
- name: Check Version
id: check-version
run: |
version=$(poetry version --short)
last_released_version=$(curl -s "https://pypi.org/pypi/langflow/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
if [ "$version" = "$last_released_version" ]; then
echo "Version $version is already released. Skipping release."
exit 1
else
echo version=$version >> $GITHUB_OUTPUT
fi
- name: Build project for distribution
run: make build main=true
- name: Display pyproject.toml langflow-base Version
run: cat pyproject.toml | grep langflow-base
- name: Publish to PyPI
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
make publish main=true
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: dist
path: dist
docker_build:
name: Build Docker Image
runs-on: ubuntu-latest
needs: release
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
id: qemu
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./docker/build_and_push.Dockerfile
# provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines)
provenance: false
tags: |
langflowai/langflow:${{ needs.release.outputs.version }}
langflowai/langflow:1.0-alpha
- name: Build and push (frontend)
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./docker/frontend/build_and_push_frontend.Dockerfile
# provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines)
provenance: false
tags: |
langflowai/langflow-frontend:${{ needs.release.outputs.version }}
langflowai/langflow-frontend:1.0-alpha
- name: Wait for Docker Hub to propagate
run: sleep 120
- name: Build and push (backend)
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./docker/build_and_push_backend.Dockerfile
# provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines)
provenance: false
build-args: |
LANGFLOW_IMAGE=langflowai/langflow:${{ needs.release.outputs.version }}
tags: |
langflowai/langflow-backend:${{ needs.release.outputs.version }}
langflowai/langflow-backend:1.0-alpha
create_release:
name: Create Release
runs-on: ubuntu-latest
needs: [docker_build, release]
steps:
- uses: actions/download-artifact@v4
with:
name: dist
path: dist
- name: Create Release
uses: ncipollo/release-action@v1
with:
artifacts: "dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: true
prerelease: true
tag: v${{ needs.release.outputs.version }}
commit: dev

View file

@ -1,37 +1,122 @@
name: pre-release
name: Langflow Pre-release (Unified)
run-name: Langflow (${{inputs.release_type}}) Pre-release by @${{ github.actor }}
on:
pull_request:
types:
- closed
branches:
- dev
paths:
- "pyproject.toml"
workflow_dispatch:
inputs:
release_package:
description: "Release package"
required: true
type: boolean
default: false
release_type:
description: "Type of release (base or main)"
required: true
type: choice
options:
- base
- main
env:
POETRY_VERSION: "1.5.1"
POETRY_VERSION: "1.8.2"
jobs:
if_release:
if: ${{ (github.event.pull_request.merged == true) && contains(github.event.pull_request.labels.*.name, 'pre-release') }}
release:
name: Release Langflow
if: inputs.release_package == true
runs-on: ubuntu-latest
outputs:
version: ${{ steps.check-version.outputs.version }}
steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
run: pipx install poetry==${{ env.POETRY_VERSION }}
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
cache: "poetry"
- name: Build project for distribution
run: make build
- name: Set up Nodejs 20
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Check Version
id: check-version
run: |
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
if [ "${{ inputs.release_type }}" == "base" ]; then
version=$(cd src/backend/base && poetry version --short)
last_released_version=$(curl -s "https://pypi.org/pypi/langflow-base/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
else
version=$(poetry version --short)
last_released_version=$(curl -s "https://pypi.org/pypi/langflow/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
fi
if [ "$version" = "$last_released_version" ]; then
echo "Version $version is already released. Skipping release."
exit 1
else
echo version=$version >> $GITHUB_OUTPUT
fi
- name: Build project for distribution
run: |
if [ "${{ inputs.release_type }}" == "base" ]; then
make build base=true
else
make build main=true
fi
- name: Test CLI
run: |
if [ "${{ inputs.release_type }}" == "base" ]; then
python -m pip install src/backend/base/dist/*.whl
else
python -m pip install dist/*.whl
fi
python -m langflow run --host 127.0.0.1 --port 7860 &
SERVER_PID=$!
# Wait for the server to start
timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/health; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1)
# Terminate the server
kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1)
sleep 10 # give the server some time to terminate
# Check if the server is still running
if kill -0 $SERVER_PID 2>/dev/null; then
echo "Failed to terminate the server"
exit 1
else
echo "Server terminated successfully"
fi
- name: Publish to PyPI
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
if [ "${{ inputs.release_type }}" == "base" ]; then
make publish base=true
else
make publish main=true
fi
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: dist${{ inputs.release_type }}
path: ${{ inputs.release_type == 'base' && 'src/backend/base/dist' || 'dist' }}
call_docker_build:
name: Call Docker Build Workflow
needs: release
uses: langflow-ai/langflow/.github/workflows/docker-build.yml@dev
with:
version: ${{ needs.release.outputs.version }}
release_type: ${{ inputs.release_type }}
secrets: inherit
create_release:
name: Create Release
runs-on: ubuntu-latest
needs: [release]
if: ${{ inputs.release_type == 'main' }}
steps:
- uses: actions/download-artifact@v4
with:
name: dist${{ inputs.release_type }}
path: dist
- name: Create Release
uses: ncipollo/release-action@v1
with:
@ -40,26 +125,5 @@ jobs:
draft: false
generateReleaseNotes: true
prerelease: true
tag: v${{ steps.check-version.outputs.version }}
tag: v${{ needs.release.outputs.version }}
commit: dev
- name: Publish to PyPI
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
poetry publish
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./build_and_push.Dockerfile
tags: logspace/langflow:${{ steps.check-version.outputs.version }}

View file

@ -15,7 +15,7 @@ on:
- "src/backend/**"
env:
POETRY_VERSION: "1.5.0"
POETRY_VERSION: "1.8.2"
jobs:
build:
@ -23,21 +23,23 @@ jobs:
strategy:
matrix:
python-version:
- "3.10"
- "3.12"
- "3.11"
- "3.10"
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_caching"
with:
python-version: ${{ matrix.python-version }}
cache: "poetry"
- name: Install dependencies
run: poetry install
poetry-version: ${{ env.POETRY_VERSION }}
cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
- name: Install Python dependencies
run: |
poetry env use ${{ matrix.python-version }}
poetry install
- name: Run unit tests
run: |
make tests
make unit_tests args="-n auto"

View file

@ -1,36 +1,128 @@
name: release
name: Langflow Release
run-name: Langflow (${{inputs.release_type}}) Release by @${{ github.actor }}
on:
pull_request:
types:
- closed
branches:
- main
paths:
- "pyproject.toml"
workflow_dispatch:
inputs:
release_package:
description: "Release package"
required: true
type: boolean
default: false
release_type:
description: "Type of release (base or main)"
required: true
type: choice
options:
- base
- main
pre_release:
description: "Pre-release"
required: false
type: boolean
default: true
env:
POETRY_VERSION: "1.5.1"
POETRY_VERSION: "1.8.2"
jobs:
if_release:
if: ${{ (github.event.pull_request.merged == true) && contains(github.event.pull_request.labels.*.name, 'Release') }}
release:
name: Release Langflow
if: inputs.release_package == true
runs-on: ubuntu-latest
outputs:
version: ${{ steps.check-version.outputs.version }}
steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
run: pipx install poetry==${{ env.POETRY_VERSION }}
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
cache: "poetry"
- name: Build project for distribution
run: make build
- name: Set up Nodejs 20
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Check Version
id: check-version
run: |
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
if [ "${{ inputs.release_type }}" == "base" ]; then
version=$(cd src/backend/base && poetry version --short)
last_released_version=$(curl -s "https://pypi.org/pypi/langflow-base/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
else
version=$(poetry version --short)
last_released_version=$(curl -s "https://pypi.org/pypi/langflow/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
fi
if [ "$version" = "$last_released_version" ]; then
echo "Version $version is already released. Skipping release."
exit 1
else
echo version=$version >> $GITHUB_OUTPUT
fi
- name: Build project for distribution
run: |
if [ "${{ inputs.release_type }}" == "base" ]; then
make build base=true
else
make build main=true
fi
- name: Test CLI
run: |
if [ "${{ inputs.release_type }}" == "base" ]; then
python -m pip install src/backend/base/dist/*.whl
else
python -m pip install dist/*.whl
fi
python -m langflow run --host 127.0.0.1 --port 7860 &
SERVER_PID=$!
# Wait for the server to start
timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/health; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1)
# Terminate the server
kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1)
sleep 10 # give the server some time to terminate
# Check if the server is still running
if kill -0 $SERVER_PID 2>/dev/null; then
echo "Failed to terminate the server"
exit 1
else
echo "Server terminated successfully"
fi
- name: Publish to PyPI
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
if [ "${{ inputs.release_type }}" == "base" ]; then
make publish base=true
else
make publish main=true
fi
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: dist${{ inputs.release_type }}
path: ${{ inputs.release_type == 'base' && 'src/backend/base/dist' || 'dist' }}
call_docker_build:
name: Call Docker Build Workflow
needs: release
uses: langflow-ai/langflow/.github/workflows/docker-build.yml@main
with:
version: ${{ needs.release.outputs.version }}
release_type: ${{ inputs.release_type }}
pre_release: ${{ inputs.pre_release }}
secrets: inherit
create_release:
name: Create Release
runs-on: ubuntu-latest
needs: [release]
if: ${{ inputs.release_type == 'main' }}
steps:
- uses: actions/download-artifact@v4
with:
name: dist${{ inputs.release_type }}
path: dist
- name: Create Release
uses: ncipollo/release-action@v1
with:
@ -38,28 +130,6 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: true
tag: v${{ steps.check-version.outputs.version }}
commit: main
- name: Publish to PyPI
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
poetry publish
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
file: ./build_and_push.Dockerfile
tags: |
logspace/langflow:${{ steps.check-version.outputs.version }}
logspace/langflow:latest
prerelease: false
tag: v${{ needs.release.outputs.version }}
commit: dev

46
.github/workflows/style-check-py.yml vendored Normal file
View file

@ -0,0 +1,46 @@
name: Ruff Style Check
on:
pull_request:
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
- "tests/**"
env:
POETRY_VERSION: "1.8.2"
jobs:
lint:
name: Ruff Style Check
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.12"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_caching"
with:
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
- name: Install Python dependencies
run: |
poetry env use ${{ matrix.python-version }}
poetry install
- name: Register problem matcher
run: echo "::add-matcher::.github/workflows/matchers/ruff.json"
- name: Run Ruff
run: poetry run ruff check --output-format=github .
- name: Run Ruff format
run: poetry run ruff format .
- name: Commit changes
uses: stefanzweifel/git-auto-commit-action@v5
with:
commit_message: Apply Ruff formatting
branch: ${{ github.head_ref }}

130
.github/workflows/typescript_test.yml vendored Normal file
View file

@ -0,0 +1,130 @@
name: Run Frontend Tests
on:
pull_request:
paths:
- "src/frontend/**"
env:
POETRY_VERSION: "1.8.2"
NODE_VERSION: "21"
PYTHON_VERSION: "3.12"
# Define the directory where Playwright browsers will be installed.
# Adjust if your project uses a different path.
PLAYWRIGHT_BROWSERS_PATH: "ms-playwright"
jobs:
setup-and-test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
shardIndex: [1, 2, 3, 4]
shardTotal: [4]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
id: setup-node
with:
node-version: ${{ env.NODE_VERSION }}
- name: Cache Node.js dependencies
uses: actions/cache@v4
id: npm-cache
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('src/frontend/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install Node.js dependencies
run: |
cd src/frontend
npm ci
if: ${{ steps.setup-node.outputs.cache-hit != 'true' }}
- name: Cache playwright binaries
uses: actions/cache@v4
id: playwright-cache
with:
path: |
~/.cache/ms-playwright
key: ${{ runner.os }}-playwright-${{ hashFiles('src/frontend/package-lock.json') }}
- name: Install Frontend dependencies
run: |
cd src/frontend
npm ci
- name: Install Playwright's browser binaries
run: |
cd src/frontend
npx playwright install --with-deps
if: steps.playwright-cache.outputs.cache-hit != 'true'
- name: Install Playwright's dependencies
run: |
cd src/frontend
npx playwright install-deps
if: steps.playwright-cache.outputs.cache-hit != 'true'
- name: Set up Python ${{ env.PYTHON_VERSION }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_caching"
with:
python-version: ${{ env.PYTHON_VERSION }}
poetry-version: ${{ env.POETRY_VERSION }}
cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
- name: Install Python dependencies
run: |
poetry env use ${{ env.PYTHON_VERSION }}
poetry install
- name: create .env
run: |
touch .env
echo "${{ secrets.ENV_VARS }}" > .env
- name: Run Playwright Tests
run: |
cd src/frontend
npx playwright test --shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }} --workers 2
- name: Upload blob report to GitHub Actions Artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: blob-report-${{ matrix.shardIndex }}
path: src/frontend/blob-report
retention-days: 1
merge-reports:
needs: setup-and-test
runs-on: ubuntu-latest
if: always()
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
- name: Download blob reports from GitHub Actions Artifacts
uses: actions/download-artifact@v4
with:
path: all-blob-reports
pattern: blob-report-*
merge-multiple: true
- name: Merge into HTML Report
run: |
npx playwright merge-reports --reporter html ./all-blob-reports
- name: Upload HTML report
uses: actions/upload-artifact@v4
with:
name: html-report--attempt-${{ github.run_attempt }}
path: playwright-report
retention-days: 14

14
.gitignore vendored
View file

@ -180,6 +180,8 @@ coverage.xml
local_settings.py
db.sqlite3
db.sqlite3-journal
*.db-shm
*.db-wal
# Flask stuff:
instance/
@ -256,10 +258,6 @@ langflow.db
# docusaurus
.docusaurus/
/tmp/*
src/backend/langflow/frontend/
.docker
scratchpad*
/tmp/*
src/backend/langflow/frontend/
src/backend/base/langflow/frontend/
@ -267,4 +265,10 @@ src/backend/base/langflow/frontend/
scratchpad*
chroma*/*
stuff/*
src/frontend/playwright-report/index.html
src/frontend/playwright-report/index.html
*.bak
prof/*
src/frontend/temp
*-shm
*-wal

14
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,14 @@
fail_fast: true
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.1.0
hooks:
- id: check-case-conflict
- id: end-of-file-fixer
# python, js and ts only
files: \.(py|js|ts)$
- id: mixed-line-ending
files: \.(py|js|ts)$
args:
- --fix=lf
- id: trailing-whitespace

View file

@ -1,31 +0,0 @@
# Read the Docs configuration file for Sphinx projects
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Set the OS, Python version and other tools you might need
build:
os: ubuntu-22.04
tools:
python: "3.11"
# You can also specify other tool versions:
# nodejs: "19"
# rust: "1.64"
# golang: "1.19"
# Build documentation in the "docs/" directory with Sphinx
sphinx:
configuration: docs/conf.py
# Optionally build your docs in additional formats such as PDF and ePub
# formats:
# - pdf
# - epub
# Optional but recommended, declare the Python requirements required
# to build your documentation
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
# python:
# install:
# - requirements: docs/requirements.txt

34
.vscode/launch.json vendored
View file

@ -3,7 +3,7 @@
"configurations": [
{
"name": "Debug Backend",
"type": "python",
"type": "debugpy",
"request": "launch",
"module": "uvicorn",
"args": [
@ -13,15 +13,39 @@
"7860",
"--reload",
"--log-level",
"debug"
"debug",
"--loop",
"asyncio",
"--reload-include",
"src/backend/*"
],
"jinja": true,
"justMyCode": true,
"justMyCode": false,
"env": {
"LANGFLOW_LOG_LEVEL": "debug"
},
"envFile": "${workspaceFolder}/.env"
},
{
"name": "Debug CLI",
"type": "debugpy",
"request": "launch",
"module": "langflow",
"args": [
"run",
"--path",
"${workspaceFolder}/src/backend/base/langflow/frontend"
],
"jinja": true,
"justMyCode": false,
"env": {
"LANGFLOW_LOG_LEVEL": "debug"
},
"envFile": "${workspaceFolder}/.env"
},
{
"name": "Python: Remote Attach",
"type": "python",
"type": "debugpy",
"request": "attach",
"justMyCode": true,
"connect": {
@ -43,7 +67,7 @@
},
{
"name": "Python: Debug Tests",
"type": "python",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"purpose": ["debug-test"],

View file

@ -5,7 +5,7 @@
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
identity and expression, level of experience, education, socioeconomic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
@ -17,23 +17,23 @@ diverse, inclusive, and healthy community.
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
- Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
- Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
- The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
- Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
@ -60,7 +60,7 @@ representative at an online or offline event.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
contact@logspace.ai.
contact@langflow.org.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
@ -106,7 +106,7 @@ Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within

View file

@ -7,6 +7,19 @@ to contributions, whether it be in the form of a new feature, improved infra, or
To contribute to this project, please follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
Please do not try to push directly to this repo unless you are a maintainer.
## Linear History
We strive to maintain a linear history in our git repository. This means that we do not accept merge commits in pull requests. To achieve this, we ask that you rebase your branch on top of the `dev` branch before opening a pull request. This can be done by running the following commands:
```bash
git checkout dev
git pull
git checkout <your-branch>
git rebase dev
# Fix any conflicts that arise
git push --force-with-lease
```
The branch structure is as follows:
- `main`: The stable version of Langflow
@ -16,12 +29,12 @@ The branch structure is as follows:
## 🚩GitHub Issues
Our [issues](https://github.com/logspace-ai/langflow/issues) page is kept up to date
Our [issues](https://github.com/langflow-ai/langflow/issues) page is kept up to date
with bugs, improvements, and feature requests. There is a taxonomy of labels to help
with sorting and discovery of issues of interest.
If you're looking for help with your code, consider posting a question on the
[GitHub Discussions board](https://github.com/logspace-ai/langflow/discussions). Please
[GitHub Discussions board](https://github.com/langflow-ai/langflow/discussions). Please
understand that we won't be able to provide individual support via email. We
also believe that help is much more valuable if it's **shared publicly**,
so that more people can benefit from it.
@ -40,7 +53,7 @@ so that more people can benefit from it.
## Issue labels
[See this page](https://github.com/logspace-ai/langflow/labels) for an overview of
[See this page](https://github.com/langflow-ai/langflow/labels) for an overview of
the system we use to tag our issues and pull requests.
## Local development

View file

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2023 Logspace
Copyright (c) 2024 Langflow
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

266
Makefile
View file

@ -1,111 +1,238 @@
.PHONY: all init format lint build build_frontend install_frontend run_frontend run_backend dev help tests coverage
all: help
log_level ?= debug
host ?= 0.0.0.0
port ?= 7860
env ?= .env
open_browser ?= true
path = src/backend/base/langflow/frontend
workers ?= 1
codespell:
@poetry install --with spelling
poetry run codespell --toml pyproject.toml
fix_codespell:
@poetry install --with spelling
poetry run codespell --toml pyproject.toml --write
setup_poetry:
pipx install poetry
add:
@echo 'Adding dependencies'
ifdef devel
cd src/backend/base && poetry add --group dev $(devel)
endif
ifdef main
poetry add $(main)
endif
ifdef base
cd src/backend/base && poetry add $(base)
endif
init:
@echo 'Installing pre-commit hooks'
git config core.hooksPath .githooks
@echo 'Making pre-commit hook executable'
chmod +x .githooks/pre-commit
@echo 'Installing backend dependencies'
make install_backend
@echo 'Installing frontend dependencies'
make install_frontend
coverage:
coverage: ## run the tests and generate a coverage report
poetry run pytest --cov \
--cov-config=.coveragerc \
--cov-report xml \
--cov-report term-missing:skip-covered
--cov-report term-missing:skip-covered \
--cov-report lcov:coverage/lcov-pytest.info
# allow passing arguments to pytest
tests:
@make install_backend
unit_tests:
poetry run pytest --ignore=tests/integration --instafail -ra -n auto -m "not api_key_required" $(args)
poetry run pytest tests --instafail $(args)
# Use like:
format:
poetry run ruff . --fix
integration_tests:
poetry run pytest tests/integration --instafail -ra -n auto $(args)
format: ## run code formatters
poetry run ruff check . --fix
poetry run ruff format .
cd src/frontend && npm run format
lint:
make install_backend
poetry run mypy src/backend/langflow
poetry run ruff . --fix
install_frontend:
lint: ## run linters
poetry run mypy --namespace-packages -p "langflow"
install_frontend: ## install the frontend dependencies
cd src/frontend && npm install
install_frontendci:
cd src/frontend && npm ci
install_frontendc:
cd src/frontend && rm -rf node_modules package-lock.json && npm install
run_frontend:
@-kill -9 `lsof -t -i:3000`
cd src/frontend && npm start
tests_frontend:
ifeq ($(UI), true)
cd src/frontend && ./run-tests.sh --ui
cd src/frontend && npx playwright test --ui --project=chromium
else
cd src/frontend && ./run-tests.sh
cd src/frontend && npx playwright test --project=chromium
endif
run_cli:
poetry run langflow run --path src/frontend/build
@echo 'Running the CLI'
@make install_frontend > /dev/null
@echo 'Install backend dependencies'
@make install_backend > /dev/null
@echo 'Building the frontend'
@make build_frontend > /dev/null
ifdef env
@make start env=$(env) host=$(host) port=$(port) log_level=$(log_level)
else
@make start host=$(host) port=$(port) log_level=$(log_level)
endif
run_cli_debug:
poetry run langflow run --path src/frontend/build --log-level debug
@echo 'Running the CLI in debug mode'
@make install_frontend > /dev/null
@echo 'Building the frontend'
@make build_frontend > /dev/null
@echo 'Install backend dependencies'
@make install_backend > /dev/null
ifdef env
@make start env=$(env) host=$(host) port=$(port) log_level=debug
else
@make start host=$(host) port=$(port) log_level=debug
endif
start:
@echo 'Running the CLI'
ifeq ($(open_browser),false)
@make install_backend && poetry run langflow run --path $(path) --log-level $(log_level) --host $(host) --port $(port) --env-file $(env) --no-open-browser
else
@make install_backend && poetry run langflow run --path $(path) --log-level $(log_level) --host $(host) --port $(port) --env-file $(env)
endif
setup_devcontainer:
make init
make build_frontend
poetry run langflow --path src/frontend/build
frontend:
setup_env:
@sh ./scripts/setup/update_poetry.sh 1.8.2
@sh ./scripts/setup/setup_env.sh
frontend: ## run the frontend in development mode
make install_frontend
make run_frontend
frontendc:
make install_frontendc
make run_frontend
install_backend:
poetry install --extras deploy
backend:
install_backend:
@echo 'Installing backend dependencies'
@poetry install
@poetry run pre-commit install
backend: ## run the backend in development mode
@echo 'Setting up the environment'
@make setup_env
make install_backend
@-kill -9 `lsof -t -i:7860`
ifeq ($(login),1)
@echo "Running backend without autologin";
poetry run langflow run --backend-only --port 7860 --host 0.0.0.0 --no-open-browser --env-file .env
@-kill -9 $(lsof -t -i:7860)
ifdef login
@echo "Running backend autologin is $(login)";
LANGFLOW_AUTO_LOGIN=$(login) poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio --workers $(workers)
else
@echo "Running backend with autologin";
LANGFLOW_AUTO_LOGIN=True poetry run langflow run --backend-only --port 7860 --host 0.0.0.0 --no-open-browser --env-file .env
@echo "Running backend respecting the .env file";
poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio --workers $(workers)
endif
build_and_run:
echo 'Removing dist folder'
@echo 'Removing dist folder'
@make setup_env
rm -rf dist
make build && poetry run pip install dist/*.tar.gz && poetry run langflow run
rm -rf src/backend/base/dist
make build
poetry run pip install dist/*.tar.gz
poetry run langflow run
build_and_install:
echo 'Removing dist folder'
@echo 'Removing dist folder'
rm -rf dist
make build && poetry run pip install dist/*.tar.gz
rm -rf src/backend/base/dist
make build && poetry run pip install dist/*.whl && pip install src/backend/base/dist/*.whl --force-reinstall
build_frontend:
build_frontend: ## build the frontend static files
cd src/frontend && CI='' npm run build
cp -r src/frontend/build src/backend/langflow/frontend
rm -rf src/backend/base/langflow/frontend
cp -r src/frontend/build src/backend/base/langflow/frontend
build:
make install_frontend
build: ## build the frontend static files and package the project
@echo 'Building the project'
@make setup_env
ifdef base
make install_frontendci
make build_frontend
poetry build --format sdist
rm -rf src/backend/langflow/frontend
make build_langflow_base
endif
dev:
ifdef main
make build_langflow
endif
build_langflow_base:
cd src/backend/base && poetry build
rm -rf src/backend/base/langflow/frontend
build_langflow_backup:
poetry lock && poetry build
build_langflow:
cd ./scripts && poetry run python update_dependencies.py
poetry lock
poetry build
ifdef restore
mv pyproject.toml.bak pyproject.toml
mv poetry.lock.bak poetry.lock
endif
dev: ## run the project in development mode with docker compose
make install_frontend
ifeq ($(build),1)
@echo 'Running docker compose up with build'
@ -115,21 +242,48 @@ else
docker compose $(if $(debug),-f docker-compose.debug.yml) up
endif
publish:
make build
lock_base:
cd src/backend/base && poetry lock
lock_langflow:
poetry lock
lock:
# Run both in parallel
@echo 'Locking dependencies'
cd src/backend/base && poetry lock
poetry lock
update:
@echo 'Updating dependencies'
cd src/backend/base && poetry update
poetry update
publish_base:
cd src/backend/base && poetry publish
publish_langflow:
poetry publish
help:
publish: ## build the frontend static files and package the project and publish it to PyPI
@echo 'Publishing the project'
ifdef base
make publish_base
endif
ifdef main
make publish_langflow
endif
help: ## show this help message
@echo '----'
@echo 'format - run code formatters'
@echo 'lint - run linters'
@echo 'install_frontend - install the frontend dependencies'
@echo 'build_frontend - build the frontend static files'
@echo 'run_frontend - run the frontend in development mode'
@echo 'run_backend - run the backend in development mode'
@echo 'build - build the frontend static files and package the project'
@echo 'publish - build the frontend static files and package the project and publish it to PyPI'
@echo 'dev - run the project in development mode with docker compose'
@echo 'tests - run the tests'
@echo 'coverage - run the tests and generate a coverage report'
@echo -e "$$(grep -hE '^\S+:.*##' $(MAKEFILE_LIST) | \
sed -e 's/:.*##\s*/:/' \
-e 's/^\(.\+\):\(.*\)/\\x1b[36mmake \1\\x1b[m:\2/' | \
column -c2 -t -s :']]')"
@echo '----'

171
README.PT.md Normal file
View file

@ -0,0 +1,171 @@
<!-- markdownlint-disable MD030 -->
# [![Langflow](./docs/static/img/hero.png)](https://www.langflow.org)
<p align="center"><strong>
Um framework visual para criar apps de agentes autônomos e RAG
</strong></p>
<p align="center" style="font-size: 12px;">
Open-source, construído em Python, totalmente personalizável, agnóstico em relação a modelos e databases
</p>
<p align="center" style="font-size: 12px;">
<a href="https://docs.langflow.org" style="text-decoration: underline;">Docs</a> -
<a href="https://discord.com/invite/EqksyE2EX9" style="text-decoration: underline;">Junte-se ao nosso Discord</a> -
<a href="https://twitter.com/langflow_ai" style="text-decoration: underline;">Siga-nos no X</a> -
<a href="https://huggingface.co/spaces/Langflow/Langflow-Preview" style="text-decoration: underline;">Demonstração</a>
</p>
<p align="center">
<a href="https://github.com/langflow-ai/langflow">
<img src="https://img.shields.io/github/stars/langflow-ai/langflow">
</a>
<a href="https://discord.com/invite/EqksyE2EX9">
<img src="https://img.shields.io/discord/1116803230643527710?label=Discord">
</a>
</p>
<div align="center">
<a href="./README.md"><img alt="README em Inglês" src="https://img.shields.io/badge/English-d9d9d9"></a>
<a href="./README.zh_CN.md"><img alt="README em Chinês Simplificado" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
</div>
<p align="center">
<img src="./docs/static/img/langflow_basic_howto.gif" alt="Seu GIF" style="border: 3px solid #211C43;">
</p>
# 📝 Conteúdo
- [📝 Conteúdo](#-conteúdo)
- [📦 Introdução](#-introdução)
- [🎨 Criar Fluxos](#-criar-fluxos)
- [Deploy](#deploy)
- [Deploy usando Google Cloud Platform](#deploy-usando-google-cloud-platform)
- [Deploy on Railway](#deploy-on-railway)
- [Deploy on Render](#deploy-on-render)
- [🖥️ Interface de Linha de Comando (CLI)](#-interface-de-linha-de-comando-cli)
- [Uso](#uso)
- [Variáveis de Ambiente](#variáveis-de-ambiente)
- [👋 Contribuir](#-contribuir)
- [🌟 Contribuidores](#-contribuidores)
- [📄 Licença](#-licença)
# 📦 Introdução
Você pode instalar o Langflow com pip:
```shell
# Certifique-se de ter >=Python 3.10 instalado no seu sistema.
# Instale a versão pré-lançamento (recomendada para as atualizações mais recentes)
python -m pip install langflow --pre --force-reinstall
# ou versão estável
python -m pip install langflow -U
```
Então, execute o Langflow com:
```shell
python -m langflow run
```
Você também pode visualizar o Langflow no [HuggingFace Spaces](https://huggingface.co/spaces/Langflow/Langflow-Preview). [Clone o Space usando este link](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true) para criar seu próprio workspace do Langflow em minutos.
# 🎨 Criar Fluxos
Criar fluxos com Langflow é fácil. Basta arrastar componentes da barra lateral para o workspace e conectá-los para começar a construir sua aplicação.
Explore editando os parâmetros do prompt, agrupando componentes e construindo seus próprios componentes personalizados (Custom Components).
Quando terminar, você pode exportar seu fluxo como um arquivo JSON.
Carregue o fluxo com:
```python
from langflow.load import run_flow_from_json
results = run_flow_from_json("path/to/flow.json", input_value="Hello, World!")
```
# Deploy
## Deploy usando Google Cloud Platform
Siga nosso passo a passo para fazer deploy do Langflow no Google Cloud Platform (GCP) usando o Google Cloud Shell. O guia está disponível no documento [**Langflow on Google Cloud Platform**](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/deployment/gcp-deployment.md).
Alternativamente, clique no botão **"Open in Cloud Shell"** abaixo para iniciar o Google Cloud Shell, clonar o repositório do Langflow e começar um **tutorial interativo** que o guiará pelo processo de configuração dos recursos necessários e deploy do Langflow no seu projeto GCP.
[![Open on Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## Deploy on Railway
Use este template para implantar o Langflow 1.0 Preview no Railway:
[![Deploy 1.0 Preview on Railway](https://railway.app/button.svg)](https://railway.app/template/UsJ1uB?referralCode=MnPSdg)
Ou este para implantar o Langflow 0.6.x:
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
## Deploy on Render
<a href="https://render.com/deploy?repo=https://github.com/langflow-ai/langflow/tree/dev">
<img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
</a>
# 🖥️ Interface de Linha de Comando (CLI)
O Langflow fornece uma interface de linha de comando (CLI) para fácil gerenciamento e configuração.
## Uso
Você pode executar o Langflow usando o seguinte comando:
```shell
langflow run [OPTIONS]
```
Cada opção é detalhada abaixo:
- `--help`: Exibe todas as opções disponíveis.
- `--host`: Define o host para vincular o servidor. Pode ser configurado usando a variável de ambiente `LANGFLOW_HOST`. O padrão é `127.0.0.1`.
- `--workers`: Define o número de processos. Pode ser configurado usando a variável de ambiente `LANGFLOW_WORKERS`. O padrão é `1`.
- `--timeout`: Define o tempo limite do worker em segundos. O padrão é `60`.
- `--port`: Define a porta para escutar. Pode ser configurado usando a variável de ambiente `LANGFLOW_PORT`. O padrão é `7860`.
- `--env-file`: Especifica o caminho para o arquivo .env contendo variáveis de ambiente. O padrão é `.env`.
- `--log-level`: Define o nível de log. Pode ser configurado usando a variável de ambiente `LANGFLOW_LOG_LEVEL`. O padrão é `critical`.
- `--components-path`: Especifica o caminho para o diretório contendo componentes personalizados. Pode ser configurado usando a variável de ambiente `LANGFLOW_COMPONENTS_PATH`. O padrão é `langflow/components`.
- `--log-file`: Especifica o caminho para o arquivo de log. Pode ser configurado usando a variável de ambiente `LANGFLOW_LOG_FILE`. O padrão é `logs/langflow.log`.
- `--cache`: Seleciona o tipo de cache a ser usado. As opções são `InMemoryCache` e `SQLiteCache`. Pode ser configurado usando a variável de ambiente `LANGFLOW_LANGCHAIN_CACHE`. O padrão é `SQLiteCache`.
- `--dev/--no-dev`: Alterna o modo de desenvolvimento. O padrão é `no-dev`.
- `--path`: Especifica o caminho para o diretório frontend contendo os arquivos de build. Esta opção é apenas para fins de desenvolvimento. Pode ser configurado usando a variável de ambiente `LANGFLOW_FRONTEND_PATH`.
- `--open-browser/--no-open-browser`: Alterna a opção de abrir o navegador após iniciar o servidor. Pode ser configurado usando a variável de ambiente `LANGFLOW_OPEN_BROWSER`. O padrão é `open-browser`.
- `--remove-api-keys/--no-remove-api-keys`: Alterna a opção de remover as chaves de API dos projetos salvos no banco de dados. Pode ser configurado usando a variável de ambiente `LANGFLOW_REMOVE_API_KEYS`. O padrão é `no-remove-api-keys`.
- `--install-completion [bash|zsh|fish|powershell|pwsh]`: Instala a conclusão para o shell especificado.
- `--show-completion [bash|zsh|fish|powershell|pwsh]`: Exibe a conclusão para o shell especificado, permitindo que você copie ou personalize a instalação.
- `--backend-only`: Este parâmetro, com valor padrão `False`, permite executar apenas o servidor backend sem o frontend. Também pode ser configurado usando a variável de ambiente `LANGFLOW_BACKEND_ONLY`.
- `--store`: Este parâmetro, com valor padrão `True`, ativa os recursos da loja, use `--no-store` para desativá-los. Pode ser configurado usando a variável de ambiente `LANGFLOW_STORE`.
Esses parâmetros são importantes para usuários que precisam personalizar o comportamento do Langflow, especialmente em cenários de desenvolvimento ou deploy especializado.
### Variáveis de Ambiente
Você pode configurar muitas das opções de CLI usando variáveis de ambiente. Estas podem ser exportadas no seu sistema operacional ou adicionadas a um arquivo `.env` e carregadas usando a opção `--env-file`.
Um arquivo de exemplo `.env` chamado `.env.example` está incluído no projeto. Copie este arquivo para um novo arquivo chamado `.env` e substitua os valores de exemplo pelas suas configurações reais. Se você estiver definindo valores tanto no seu sistema operacional quanto no arquivo `.env`, as configurações do `.env` terão precedência.
# 👋 Contribuir
Aceitamos contribuições de desenvolvedores de todos os níveis para nosso projeto open-source no GitHub. Se você deseja contribuir, por favor, confira nossas [diretrizes de contribuição](./CONTRIBUTING.md) e ajude a tornar o Langflow mais acessível.
---
[![Star History Chart](https://api.star-history.com/svg?repos=langflow-ai/langflow&type=Timeline)](https://star-history.com/#langflow-ai/langflow&Date)
# 🌟 Contribuidores
[![langflow contributors](https://contrib.rocks/image?repo=langflow-ai/langflow)](https://github.com/langflow-ai/langflow/graphs/contributors)
# 📄 Licença
O Langflow é lançado sob a licença MIT. Veja o arquivo [LICENSE](LICENSE) para detalhes.

181
README.md
View file

@ -1,68 +1,122 @@
<!-- markdownlint-disable MD030 -->
# ⛓️ Langflow
# [![Langflow](./docs/static/img/hero.png)](https://www.langflow.org)
<h3>Discover a simpler & smarter way to build around Foundation Models</h3>
<p align="center"><strong>
A visual framework for building multi-agent and RAG applications
</strong></p>
<p align="center" style="font-size: 12px;">
Open-source, Python-powered, fully customizable, LLM and vector store agnostic
</p>
[![Release Notes](https://img.shields.io/github/release/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/releases)
[![Contributors](https://img.shields.io/github/contributors/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/contributors)
[![Last Commit](https://img.shields.io/github/last-commit/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/last-commit)
[![Open Issues](https://img.shields.io/github/issues-raw/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/issues)
[![LRepo-size](https://img.shields.io/github/repo-size/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/repo-size)
[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/logspace-ai/langflow)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![GitHub star chart](https://img.shields.io/github/stars/logspace-ai/langflow?style=social)](https://star-history.com/#logspace-ai/langflow)
[![GitHub fork](https://img.shields.io/github/forks/logspace-ai/langflow?style=social)](https://github.com/logspace-ai/langflow/fork)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langflow_ai.svg?style=social&label=Follow%20%40langflow_ai)](https://twitter.com/langflow_ai)
[![](https://dcbadge.vercel.app/api/server/EqksyE2EX9?compact=true&style=flat)](https://discord.com/invite/EqksyE2EX9)
[![HuggingFace Spaces](https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg)](https://huggingface.co/spaces/Logspace/Langflow)
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/logspace-ai/langflow)
<p align="center" style="font-size: 12px;">
<a href="https://docs.langflow.org" style="text-decoration: underline;">Docs</a> -
<a href="https://discord.com/invite/EqksyE2EX9" style="text-decoration: underline;">Join our Discord</a> -
<a href="https://twitter.com/langflow_ai" style="text-decoration: underline;">Follow us on X</a> -
<a href="https://huggingface.co/spaces/Langflow/Langflow-Preview" style="text-decoration: underline;">Live demo</a>
</p>
The easiest way to create and customize your flow
<p align="center">
<a href="https://github.com/langflow-ai/langflow">
<img src="https://img.shields.io/github/stars/langflow-ai/langflow">
</a>
<a href="https://discord.com/invite/EqksyE2EX9">
<img src="https://img.shields.io/discord/1116803230643527710?label=Discord">
</a>
</p>
<a href="https://github.com/logspace-ai/langflow">
<img width="100%" src="https://github.com/logspace-ai/langflow/blob/dev/docs/static/img/new_langflow_demo.gif"></a>
<div align="center">
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
<a href="./README.PT.md"><img alt="README in Portuguese" src="https://img.shields.io/badge/Portuguese-d9d9d9"></a>
<a href="./README.zh_CN.md"><img alt="README in Simplified Chinese" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
</div>
# 📦 Installation
<p align="center">
<img src="./docs/static/img/langflow_basic_howto.gif" alt="Your GIF" style="border: 3px solid #211C43;">
</p>
### <b>Locally</b>
# 📝 Content
You can install Langflow from pip:
- [📝 Content](#-content)
- [📦 Get Started](#-get-started)
- [🎨 Create Flows](#-create-flows)
- [Deploy](#deploy)
- [Deploy Langflow on Google Cloud Platform](#deploy-langflow-on-google-cloud-platform)
- [Deploy on Railway](#deploy-on-railway)
- [Deploy on Render](#deploy-on-render)
- [🖥️ Command Line Interface (CLI)](#-command-line-interface-cli)
- [Usage](#usage)
- [Environment Variables](#environment-variables)
- [👋 Contribute](#-contribute)
- [🌟 Contributors](#-contributors)
- [📄 License](#-license)
# 📦 Get Started
You can install Langflow with pip:
```shell
# This installs the package without dependencies for local models
pip install langflow
# Make sure you have >=Python 3.10 installed on your system.
# Install the pre-release version (recommended for the latest updates)
python -m pip install langflow --pre --force-reinstall
# or stable version
python -m pip install langflow -U
```
To use local models (e.g llama-cpp-python) run:
Then, run Langflow with:
```shell
pip install langflow[local]
python -m langflow run
```
This will install the following dependencies:
You can also preview Langflow in [HuggingFace Spaces](https://huggingface.co/spaces/Langflow/Langflow-Preview). [Clone the space using this link](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true) to create your own Langflow workspace in minutes.
- [CTransformers](https://github.com/marella/ctransformers)
- [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
- [sentence-transformers](https://github.com/UKPLab/sentence-transformers)
# 🎨 Create Flows
You can still use models from projects like LocalAI, Ollama, LM Studio, Jan and others.
Creating flows with Langflow is easy. Simply drag components from the sidebar onto the workspace and connect them to start building your application.
Next, run:
Explore by editing prompt parameters, grouping components into a single high-level component, and building your own Custom Components.
```shell
python -m langflow
Once youre done, you can export your flow as a JSON file.
Load the flow with:
```python
from langflow.load import run_flow_from_json
results = run_flow_from_json("path/to/flow.json", input_value="Hello, World!")
```
or
# Deploy
```shell
langflow run # or langflow --help
```
## Deploy Langflow on Google Cloud Platform
### HuggingFace Spaces
Follow our step-by-step guide to deploy Langflow on Google Cloud Platform (GCP) using Google Cloud Shell. The guide is available in the [**Langflow in Google Cloud Platform**](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/deployment/gcp-deployment.md) document.
You can also check it out on [HuggingFace Spaces](https://huggingface.co/spaces/Logspace/Langflow) and run it in your browser! You can even clone it and have your own copy of Langflow to play with.
Alternatively, click the **"Open in Cloud Shell"** button below to launch Google Cloud Shell, clone the Langflow repository, and start an **interactive tutorial** that will guide you through the process of setting up the necessary resources and deploying Langflow on your GCP project.
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## Deploy on Railway
Use this template to deploy Langflow 1.0 Preview on Railway:
[![Deploy 1.0 Preview on Railway](https://railway.app/button.svg)](https://railway.app/template/UsJ1uB?referralCode=MnPSdg)
Or this one to deploy Langflow 0.6.x:
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
## Deploy on Render
<a href="https://render.com/deploy?repo=https://github.com/langflow-ai/langflow/tree/dev">
<img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
</a>
## Deploy on Kubernetes
Follow our step-by-step guide to deploy [Langflow on Kubernetes](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/deployment/kubernetes.md).
# 🖥️ Command Line Interface (CLI)
@ -83,7 +137,6 @@ Each option is detailed below:
- `--workers`: Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`.
- `--timeout`: Sets the worker timeout in seconds. The default is `60`.
- `--port`: Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`.
- `--config`: Defines the path to the configuration file. The default is `config.yaml`.
- `--env-file`: Specifies the path to the .env file containing environment variables. The default is `.env`.
- `--log-level`: Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`.
- `--components-path`: Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`.
@ -106,58 +159,18 @@ You can configure many of the CLI options using environment variables. These can
A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence.
# Deployment
## Deploy Langflow on Google Cloud Platform
Follow our step-by-step guide to deploy Langflow on Google Cloud Platform (GCP) using Google Cloud Shell. The guide is available in the [**Langflow in Google Cloud Platform**](GCP_DEPLOYMENT.md) document.
Alternatively, click the **"Open in Cloud Shell"** button below to launch Google Cloud Shell, clone the Langflow repository, and start an **interactive tutorial** that will guide you through the process of setting up the necessary resources and deploying Langflow on your GCP project.
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## Deploy on Railway
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
## Deploy on Render
<a href="https://render.com/deploy?repo=https://github.com/logspace-ai/langflow/tree/main">
<img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
</a>
# 🎨 Creating Flows
Creating flows with Langflow is easy. Simply drag components from the sidebar onto the canvas and connect them to start building your application.
Explore by editing prompt parameters, grouping components into a single high-level component, and building your own Custom Components.
Once youre done, you can export your flow as a JSON file.
Load the flow with:
```python
from langflow import load_flow_from_json
flow = load_flow_from_json("path/to/flow.json")
# Now you can use it
flow("Hey, have you heard of Langflow?")
```
# 👋 Contributing
# 👋 Contribute
We welcome contributions from developers of all levels to our open-source project on GitHub. If you'd like to contribute, please check our [contributing guidelines](./CONTRIBUTING.md) and help make Langflow more accessible.
Join our [Discord](https://discord.com/invite/EqksyE2EX9) server to ask questions, make suggestions, and showcase your projects! 🦾
---
[![Star History Chart](https://api.star-history.com/svg?repos=logspace-ai/langflow&type=Timeline)](https://star-history.com/#logspace-ai/langflow&Date)
[![Star History Chart](https://api.star-history.com/svg?repos=langflow-ai/langflow&type=Timeline)](https://star-history.com/#langflow-ai/langflow&Date)
# 🌟 Contributors
[![langflow contributors](https://contrib.rocks/image?repo=logspace-ai/langflow)](https://github.com/logspace-ai/langflow/graphs/contributors)
[![langflow contributors](https://contrib.rocks/image?repo=langflow-ai/langflow)](https://github.com/langflow-ai/langflow/graphs/contributors)
# 📄 License
Langflow is released under the MIT License. See the LICENSE file for details.
Langflow is released under the MIT License. See the [LICENSE](LICENSE) file for details.

172
README.zh_CN.md Normal file
View file

@ -0,0 +1,172 @@
<!-- markdownlint-disable MD030 -->
# [![Langflow](./docs/static/img/hero.png)](https://www.langflow.org)
<p align="center"><strong>
一种用于构建多智能体和RAG应用的可视化框架
</strong></p>
<p align="center" style="font-size: 12px;">
开源、Python驱动、完全可定制、大模型且不依赖于特定的向量存储
</p>
<p align="center" style="font-size: 12px;">
<a href="https://docs.langflow.org" style="text-decoration: underline;">文档</a> -
<a href="https://discord.com/invite/EqksyE2EX9" style="text-decoration: underline;">加入我们的Discord社区</a> -
<a href="https://twitter.com/langflow_ai" style="text-decoration: underline;">在X上关注我们</a> -
<a href="https://huggingface.co/spaces/Langflow/Langflow-Preview" style="text-decoration: underline;">在线体验</a>
</p>
<p align="center">
<a href="https://github.com/langflow-ai/langflow">
<img src="https://img.shields.io/github/stars/langflow-ai/langflow">
</a>
<a href="https://discord.com/invite/EqksyE2EX9">
<img src="https://img.shields.io/discord/1116803230643527710?label=Discord">
</a>
</p>
<div align="center">
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/英文-d9d9d9"></a>
<a href="./README.zh_CN.md"><img alt="README in Simplified Chinese" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
</div>
<p align="center">
<img src="./docs/static/img/langflow_basic_howto.gif" alt="Your GIF" style="border: 3px solid #211C43;">
</p>
# 📝 目录
- [📝 目录](#-目录)
- [📦 快速开始](#-快速开始)
- [🎨 创建工作流](#-创建工作流)
- [部署](#部署)
- [在Google Cloud Platform上部署Langflow](#在google-cloud-platform上部署langflow)
- [在Railway上部署](#在railway上部署)
- [在Render上部署](#在render上部署)
- [🖥️ 命令行界面 (CLI)](#-命令行界面-cli)
- [用法](#用法)
- [环境变量](#环境变量)
- [👋 贡献](#-贡献)
- [🌟 贡献者](#-贡献者)
- [📄 许可证](#-许可证)
# 📦 快速开始
使用 pip 安装 Langflow
```shell
# 确保您的系统已经安装上>=Python 3.10
# 安装Langflow预发布版本
python -m pip install langflow --pre --force-reinstall
# 安装Langflow稳定版本
python -m pip install langflow -U
```
然后运行Langflow
```shell
python -m langflow run
```
您可以在[HuggingFace Spaces](https://huggingface.co/spaces/Langflow/Langflow-Preview)中在线体验 Langflow也可以使用该链接[克隆空间](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true),在几分钟内创建您自己的 Langflow 运行工作空间。
# 🎨 创建工作流
使用 Langflow 来创建工作流非常简单。只需从侧边栏拖动组件到画布上,然后连接组件即可开始构建应用程序。
您可以通过编辑提示参数、将组件分组到单个高级组件中以及构建您自己的自定义组件来展开探索。
完成后,可以将工作流导出为 JSON 文件。
然后使用以下脚本加载工作流:
```python
from langflow.load import run_flow_from_json
results = run_flow_from_json("path/to/flow.json", input_value="Hello, World!")
```
# 部署
## 在Google Cloud Platform上部署Langflow
请按照我们的分步指南使用 Google Cloud Shell 在 Google Cloud Platform (GCP) 上部署 Langflow。该指南在 [**Langflow in Google Cloud Platform**](GCP_DEPLOYMENT.md) 文档中提供。
或者,点击下面的 "Open in Cloud Shell" 按钮,启动 Google Cloud Shell克隆 Langflow 仓库,并开始一个互动教程,该教程将指导您设置必要的资源并在 GCP 项目中部署 Langflow。
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## 在Railway上部署
使用此模板在 Railway 上部署 Langflow 1.0 预览版:
[![Deploy 1.0 Preview on Railway](https://railway.app/button.svg)](https://railway.app/template/UsJ1uB?referralCode=MnPSdg)
或者使用此模板部署 Langflow 0.6.x
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
## 在Render上部署
<a href="https://render.com/deploy?repo=https://github.com/langflow-ai/langflow/tree/dev">
<img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
</a>
# 🖥️ 命令行界面 (CLI)
Langflow提供了一个命令行界面以便于平台的管理和配置。
## 用法
您可以使用以下命令运行Langflow
```shell
langflow run [OPTIONS]
```
命令行参数的详细说明:
- `--help`: 显示所有可用参数。
- `--host`: 定义绑定服务器的主机host参数可以使用 LANGFLOW_HOST 环境变量设置,默认值为 127.0.0.1。
- `--workers`: 设置工作进程的数量,可以使用 LANGFLOW_WORKERS 环境变量设置,默认值为 1。
- `--timeout`: 设置工作进程的超时时间(秒),默认值为 60。
- `--port`: 设置服务监听的端口,可以使用 LANGFLOW_PORT 环境变量设置,默认值为 7860。
- `--config`: 定义配置文件的路径,默认值为 config.yaml。
- `--env-file`: 指定包含环境变量的 .env 文件路径,默认值为 .env。
- `--log-level`: 定义日志记录级别,可以使用 LANGFLOW_LOG_LEVEL 环境变量设置,默认值为 critical。
- `--components-path`: 指定包含自定义组件的目录路径,可以使用 LANGFLOW_COMPONENTS_PATH 环境变量设置,默认值为 langflow/components。
- `--log-file`: 指定日志文件的路径,可以使用 LANGFLOW_LOG_FILE 环境变量设置,默认值为 logs/langflow.log。
- `--cache`: 选择要使用的缓存类型,可选项为 InMemoryCache 和 SQLiteCache可以使用 LANGFLOW_LANGCHAIN_CACHE 环境变量设置,默认值为 SQLiteCache。
- `--dev/--no-dev`: 切换开发/非开发模式,默认值为 no-dev即非开发模式。
- `--path`: 指定包含前端构建文件的目录路径,此参数仅用于开发目的,可以使用 LANGFLOW_FRONTEND_PATH 环境变量设置。
- `--open-browser/--no-open-browser`: 切换启动服务器后是否打开浏览器,可以使用 LANGFLOW_OPEN_BROWSER 环境变量设置,默认值为 open-browser即启动后打开浏览器。
- `--remove-api-keys/--no-remove-api-keys`: 切换是否从数据库中保存的项目中移除 API 密钥,可以使用 LANGFLOW_REMOVE_API_KEYS 环境变量设置,默认值为 no-remove-api-keys。
- `--install-completion [bash|zsh|fish|powershell|pwsh]`: 为指定的 shell 安装自动补全。
- `--show-completion [bash|zsh|fish|powershell|pwsh]`: 显示指定 shell 的自动补全,使您可以复制或自定义安装。
- `--backend-only`: 此参数默认为 False允许仅运行后端服务器而不运行前端也可以使用 LANGFLOW_BACKEND_ONLY 环境变量设置。
- `--store`: 此参数默认为 True启用存储功能使用 --no-store 可禁用它,可以使用 LANGFLOW_STORE 环境变量配置。
这些参数对于需要定制 Langflow 行为的用户尤其重要,特别是在开发或者特殊部署场景中。
### 环境变量
您可以使用环境变量配置许多 CLI 参数选项。这些变量可以在操作系统中导出,或添加到 .env 文件中,并使用 --env-file 参数加载。
项目中包含一个名为 .env.example 的示例 .env 文件。将此文件复制为新文件 .env并用实际设置值替换示例值。如果同时在操作系统和 .env 文件中设置值,则 .env 设置优先。
# 👋 贡献
我们欢迎各级开发者为我们的 GitHub 开源项目做出贡献,并帮助 Langflow 更加易用,如果您想参与贡献,请查看我们的贡献指南 [contributing guidelines](./CONTRIBUTING.md) 。
---
[![Star History Chart](https://api.star-history.com/svg?repos=langflow-ai/langflow&type=Timeline)](https://star-history.com/#langflow-ai/langflow&Date)
# 🌟 贡献者
[![langflow contributors](https://contrib.rocks/image?repo=langflow-ai/langflow)](https://github.com/langflow-ai/langflow/graphs/contributors)
# 📄 许可证
Langflow 以 MIT 许可证发布。有关详细信息,请参阅 [LICENSE](LICENSE) 文件。

View file

@ -1,97 +0,0 @@
# syntax=docker/dockerfile:1
# Keep this syntax directive! It's used to enable Docker BuildKit
# Based on https://github.com/python-poetry/poetry/discussions/1879?sort=top#discussioncomment-216865
# but I try to keep it updated (see history)
################################
# PYTHON-BASE
# Sets up all our shared environment variables
################################
FROM python:3.10-slim as python-base
# python
ENV PYTHONUNBUFFERED=1 \
# prevents python creating .pyc files
PYTHONDONTWRITEBYTECODE=1 \
\
# pip
PIP_DISABLE_PIP_VERSION_CHECK=on \
PIP_DEFAULT_TIMEOUT=100 \
\
# poetry
# https://python-poetry.org/docs/configuration/#using-environment-variables
POETRY_VERSION=1.7.1 \
# make poetry install to this location
POETRY_HOME="/opt/poetry" \
# make poetry create the virtual environment in the project's root
# it gets named `.venv`
POETRY_VIRTUALENVS_IN_PROJECT=true \
# do not ask any interactive question
POETRY_NO_INTERACTION=1 \
\
# paths
# this is where our requirements + virtual environment will live
PYSETUP_PATH="/opt/pysetup" \
VENV_PATH="/opt/pysetup/.venv"
# prepend poetry and venv to path
ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH"
################################
# BUILDER-BASE
# Used to build deps + create our virtual environment
################################
FROM python-base as builder-base
RUN apt-get update \
&& apt-get install --no-install-recommends -y \
# deps for installing poetry
curl \
# deps for building python deps
build-essential
# install poetry - respects $POETRY_VERSION & $POETRY_HOME
# The --mount will mount the buildx cache directory to where
# Poetry and Pip store their cache so that they can re-use it
RUN --mount=type=cache,target=/root/.cache \
curl -sSL https://install.python-poetry.org | python3 -
# copy project requirement files here to ensure they will be cached.
WORKDIR $PYSETUP_PATH
COPY poetry.lock pyproject.toml ./
COPY ./src/backend/langflow/main.py ./src/backend/langflow/main.py
# Copy README.md to the build context
COPY README.md .
# install runtime deps - uses $POETRY_VIRTUALENVS_IN_PROJECT internally
RUN --mount=type=cache,target=/root/.cache \
poetry install --without dev --extras deploy
################################
# DEVELOPMENT
# Image used during development / testing
################################
FROM python-base as development
WORKDIR $PYSETUP_PATH
# copy in our built poetry + venv
COPY --from=builder-base $POETRY_HOME $POETRY_HOME
COPY --from=builder-base $PYSETUP_PATH $PYSETUP_PATH
# Copy just one file to avoid rebuilding the whole image
COPY ./src/backend/langflow/__init__.py ./src/backend/langflow/__init__.py
# quicker install as runtime deps are already installed
RUN --mount=type=cache,target=/root/.cache \
poetry install --with=dev --extras deploy
# copy in our app code
COPY ./src/backend ./src/backend
RUN --mount=type=cache,target=/root/.cache \
poetry install --with=dev --extras deploy
COPY ./tests ./tests=

View file

@ -1,92 +0,0 @@
# syntax=docker/dockerfile:1
# Keep this syntax directive! It's used to enable Docker BuildKit
# Based on https://github.com/python-poetry/poetry/discussions/1879?sort=top#discussioncomment-216865
# but I try to keep it updated (see history)
################################
# PYTHON-BASE
# Sets up all our shared environment variables
################################
FROM python:3.10-slim as python-base
# python
ENV PYTHONUNBUFFERED=1 \
# prevents python creating .pyc files
PYTHONDONTWRITEBYTECODE=1 \
\
# pip
PIP_DISABLE_PIP_VERSION_CHECK=on \
PIP_DEFAULT_TIMEOUT=100 \
\
# poetry
# https://python-poetry.org/docs/configuration/#using-environment-variables
POETRY_VERSION=1.5.1 \
# make poetry install to this location
POETRY_HOME="/opt/poetry" \
# make poetry create the virtual environment in the project's root
# it gets named `.venv`
POETRY_VIRTUALENVS_IN_PROJECT=true \
# do not ask any interactive question
POETRY_NO_INTERACTION=1 \
\
# paths
# this is where our requirements + virtual environment will live
PYSETUP_PATH="/opt/pysetup" \
VENV_PATH="/opt/pysetup/.venv"
# prepend poetry and venv to path
ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH"
################################
# BUILDER-BASE
# Used to build deps + create our virtual environment
################################
FROM python-base as builder-base
RUN apt-get update \
&& apt-get install --no-install-recommends -y \
# deps for installing poetry
curl \
# deps for building python deps
build-essential
# install poetry - respects $POETRY_VERSION & $POETRY_HOME
# The --mount will mount the buildx cache directory to where
# Poetry and Pip store their cache so that they can re-use it
RUN --mount=type=cache,target=/root/.cache \
curl -sSL https://install.python-poetry.org | python3 -
# copy project requirement files here to ensure they will be cached.
WORKDIR $PYSETUP_PATH
COPY ./poetry.lock ./pyproject.toml ./
# Copy README.md to the build context
COPY ./README.md ./
# install runtime deps - uses $POETRY_VIRTUALENVS_IN_PROJECT internally
RUN --mount=type=cache,target=/root/.cache \
poetry install --without dev --extras deploy
################################
# DEVELOPMENT
# Image used during development / testing
################################
FROM python-base as development
WORKDIR $PYSETUP_PATH
# copy in our built poetry + venv
COPY --from=builder-base $POETRY_HOME $POETRY_HOME
COPY --from=builder-base $PYSETUP_PATH $PYSETUP_PATH
# Copy just one file to avoid rebuilding the whole image
COPY ./src/backend/langflow/__init__.py ./src/backend/langflow/__init__.py
# quicker install as runtime deps are already installed
RUN --mount=type=cache,target=/root/.cache \
poetry install --with=dev --extras deploy
# copy in our app code
COPY ./src/backend ./src/backend
COPY ./tests ./tests

View file

@ -69,10 +69,7 @@ services:
- traefik.http.routers.${STACK_NAME?Variable not set}-proxy-http.middlewares=${STACK_NAME?Variable not set}-www-redirect,${STACK_NAME?Variable not set}-https-redirect
backend: &backend
image: "ogabrielluiz/langflow:latest"
build:
context: ../
dockerfile: base.Dockerfile
image: "langflowai/langflow:latest"
depends_on:
- db
- broker
@ -143,9 +140,6 @@ services:
<<: *backend
env_file:
- .env
build:
context: ../
dockerfile: base.Dockerfile
command: celery -A langflow.worker.celery_app worker --loglevel=INFO --concurrency=1 -n lf-worker@%h -P eventlet
healthcheck:
test: "exit 0"
@ -158,9 +152,6 @@ services:
- .env
networks:
- default
build:
context: ../
dockerfile: base.Dockerfile
environment:
- FLOWER_PORT=5555

View file

@ -1,33 +0,0 @@
version: "3.4"
services:
backend:
volumes:
- ./:/app
build:
context: ./
dockerfile: ./dev.Dockerfile
command:
[
"sh",
"-c",
"pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 -m uvicorn --factory src.backend.langflow.main:create_app --host 0.0.0.0 --port 7860 --reload",
]
ports:
- 7860:7860
- 5678:5678
restart: on-failure
frontend:
build:
context: ./src/frontend
dockerfile: ./dev.Dockerfile
args:
- BACKEND_URL=http://backend:7860
ports:
- "3000:3000"
volumes:
- ./src/frontend/public:/home/node/app/public
- ./src/frontend/src:/home/node/app/src
- ./src/frontend/package.json:/home/node/app/package.json
restart: on-failure

9
docker/.dockerignore Normal file
View file

@ -0,0 +1,9 @@
.venv/
**/aws
node_modules
**/node_modules/
dist/
**/build/
src/backend/langflow/frontend
**/langflow-pre.db
**/langflow.db

View file

@ -0,0 +1,97 @@
# syntax=docker/dockerfile:1
# Keep this syntax directive! It's used to enable Docker BuildKit
################################
# BUILDER-BASE
# Used to build deps + create our virtual environment
################################
# 1. use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved
# 2. do not add --platform=$BUILDPLATFORM because the pydantic binaries must be resolved for the final architecture
FROM python:3.12.3-slim as builder-base
ENV PYTHONDONTWRITEBYTECODE=1 \
\
# pip
PIP_DISABLE_PIP_VERSION_CHECK=on \
PIP_DEFAULT_TIMEOUT=100 \
\
# poetry
# https://python-poetry.org/docs/configuration/#using-environment-variables
POETRY_VERSION=1.8.2 \
# make poetry install to this location
POETRY_HOME="/opt/poetry" \
# make poetry create the virtual environment in the project's root
# it gets named `.venv`
POETRY_VIRTUALENVS_IN_PROJECT=true \
# do not ask any interactive question
POETRY_NO_INTERACTION=1 \
\
# paths
# this is where our requirements + virtual environment will live
PYSETUP_PATH="/opt/pysetup" \
VENV_PATH="/opt/pysetup/.venv"
RUN apt-get update \
&& apt-get install --no-install-recommends -y \
# deps for installing poetry
curl \
# deps for building python deps
build-essential npm \
# gcc
gcc \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=cache,target=/root/.cache \
curl -sSL https://install.python-poetry.org | python3 -
WORKDIR /app
COPY pyproject.toml poetry.lock README.md ./
COPY src/ ./src
COPY scripts/ ./scripts
RUN python -m pip install requests --user && cd ./scripts && python update_dependencies.py
# 1. Install the dependencies using the current poetry.lock file to create reproducible builds
# 2. Do not install dev dependencies
# 3. Install all the extras to ensure all optionals are installed as well
# 4. --sync to ensure nothing else is in the environment
# 5. Build the wheel and install "langflow" package (mainly for version)
# Note: moving to build and installing the wheel will make the docker images not reproducible.
RUN $POETRY_HOME/bin/poetry lock --no-update \
# install current lock file with fixed dependencies versions \
# do not install dev dependencies \
&& $POETRY_HOME/bin/poetry install --without dev --sync -E deploy -E couchbase -E cassio \
&& $POETRY_HOME/bin/poetry build -f wheel \
&& $POETRY_HOME/bin/poetry run pip install dist/*.whl
################################
# RUNTIME
# Setup user, utilities and copy the virtual environment only
################################
# 1. use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved
FROM python:3.12.3-slim as runtime
RUN apt-get -y update \
&& apt-get install --no-install-recommends -y \
curl \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
LABEL org.opencontainers.image.title=langflow
LABEL org.opencontainers.image.authors=['Langflow']
LABEL org.opencontainers.image.licenses=MIT
LABEL org.opencontainers.image.url=https://github.com/langflow-ai/langflow
LABEL org.opencontainers.image.source=https://github.com/langflow-ai/langflow
RUN useradd user -u 1000 -g 0 --no-create-home --home-dir /app/data
COPY --from=builder-base --chown=1000 /app/.venv /app/.venv
ENV PATH="/app/.venv/bin:${PATH}"
USER user
WORKDIR /app
ENTRYPOINT ["python", "-m", "langflow", "run"]
CMD ["--host", "0.0.0.0", "--port", "7860"]

View file

@ -0,0 +1,8 @@
# syntax=docker/dockerfile:1
# Keep this syntax directive! It's used to enable Docker BuildKit
ARG LANGFLOW_IMAGE
FROM $LANGFLOW_IMAGE
RUN rm -rf /app/.venv/langflow/frontend
CMD ["--host", "0.0.0.0", "--port", "7860", "--backend-only"]

View file

@ -10,7 +10,9 @@
# PYTHON-BASE
# Sets up all our shared environment variables
################################
FROM python:3.10-slim as python-base
# use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved
FROM python:3.12.3-slim as python-base
# python
ENV PYTHONUNBUFFERED=1 \
@ -23,7 +25,7 @@ ENV PYTHONUNBUFFERED=1 \
\
# poetry
# https://python-poetry.org/docs/configuration/#using-environment-variables
POETRY_VERSION=1.7.1 \
POETRY_VERSION=1.8.2 \
# make poetry install to this location
POETRY_HOME="/opt/poetry" \
# make poetry create the virtual environment in the project's root
@ -47,7 +49,6 @@ ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH"
# Used to build deps + create our virtual environment
################################
FROM python-base as builder-base
RUN
RUN apt-get update \
&& apt-get install --no-install-recommends -y \
# deps for installing poetry
@ -55,25 +56,45 @@ RUN apt-get update \
# deps for building python deps
build-essential \
# npm
npm
npm \
# gcc
gcc \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=cache,target=/root/.cache \
curl -sSL https://install.python-poetry.org | python3 -
# Now we need to copy the entire project into the image
WORKDIR /app
COPY pyproject.toml poetry.lock ./
COPY src ./src
COPY src/frontend/package.json /tmp/package.json
RUN cd /tmp && npm install
WORKDIR /app
COPY src/frontend ./src/frontend
RUN rm -rf src/frontend/node_modules
RUN cp -a /tmp/node_modules /app/src/frontend
COPY scripts ./scripts
COPY Makefile ./
COPY README.md ./
RUN curl -sSL https://install.python-poetry.org | python3 - && make build
RUN cd src/frontend && npm run build
COPY src/backend ./src/backend
RUN cp -r src/frontend/build src/backend/base/langflow/frontend
RUN rm -rf src/backend/base/dist
RUN useradd -m -u 1000 user && \
mkdir -p /app/langflow && \
chown -R user:user /app && \
chmod -R u+w /app/langflow
# Final stage for the application
FROM python-base as final
# Update PATH with home/user/.local/bin
ENV PATH="/home/user/.local/bin:${PATH}"
RUN cd src/backend/base && $POETRY_HOME/bin/poetry build
# Copy virtual environment and built .tar.gz from builder base
COPY --from=builder-base /app/dist/*.tar.gz ./
USER user
# Install the package from the .tar.gz
RUN pip install *.tar.gz
RUN python -m pip install /app/src/backend/base/dist/*.tar.gz --user
WORKDIR /app
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]
ENTRYPOINT ["python", "-m", "langflow", "run"]
CMD ["--host", "0.0.0.0", "--port", "7860"]

View file

@ -13,7 +13,7 @@ services:
- "7860:7860"
volumes:
- ./:/app
command: bash -c "uvicorn --factory src.backend.langflow.main:create_app --host 0.0.0.0 --port 7860 --reload"
command: bash -c "uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --loop asyncio"
networks:
- langflow
frontend:
@ -23,7 +23,7 @@ services:
args:
- BACKEND_URL=http://backend:7860
depends_on:
- backend
- backend
environment:
- VITE_PROXY_TARGET=http://backend:7860
ports:

View file

@ -1,5 +1,5 @@
export LANGFLOW_DATABASE_URL="mysql+pymysql://${username}:${password}@${host}:3306/${dbname}"
# echo $LANGFLOW_DATABASE_URL
uvicorn --factory src.backend.langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --log-level debug
uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --log-level debug --loop asyncio
# python -m langflow run --host 0.0.0.0 --port 7860

View file

@ -15,4 +15,4 @@ COPY ./ ./
# Install dependencies
RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi
CMD ["uvicorn", "--factory", "src.backend.langflow.main:create_app", "--host", "0.0.0.0", "--port", "7860", "--reload", "--log-level", "debug"]
CMD ["uvicorn", "--factory", "langflow.main:create_app", "--host", "0.0.0.0", "--port", "7860", "--reload", "--log-level", "debug", "--loop", "asyncio"]

View file

@ -0,0 +1,29 @@
# syntax=docker/dockerfile:1
# Keep this syntax directive! It's used to enable Docker BuildKit
################################
# BUILDER-BASE
################################
# 1. force platform to the current architecture to increase build speed time on multi-platform builds
FROM --platform=$BUILDPLATFORM node:lts-bookworm-slim as builder-base
COPY src/frontend /frontend
RUN cd /frontend && npm install && npm run build
################################
# RUNTIME
################################
FROM nginxinc/nginx-unprivileged:stable-bookworm-perl as runtime
LABEL org.opencontainers.image.title=langflow-frontend
LABEL org.opencontainers.image.authors=['Langflow']
LABEL org.opencontainers.image.licenses=MIT
LABEL org.opencontainers.image.url=https://github.com/langflow-ai/langflow
LABEL org.opencontainers.image.source=https://github.com/langflow-ai/langflow
COPY --from=builder-base --chown=nginx /frontend/build /usr/share/nginx/html
COPY --chown=nginx ./docker/frontend/nginx.conf /etc/nginx/conf.d/default.conf
COPY --chown=nginx ./docker/frontend/start-nginx.sh /start-nginx.sh
RUN chmod +x /start-nginx.sh
ENTRYPOINT ["/start-nginx.sh"]

View file

@ -0,0 +1,22 @@
server {
gzip on;
gzip_comp_level 2;
gzip_min_length 1000;
gzip_types text/xml text/css;
gzip_http_version 1.1;
gzip_vary on;
gzip_disable "MSIE [4-6] \.";
listen __FRONTEND_PORT__;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html =404;
}
location /api {
proxy_pass __BACKEND_URL__;
}
include /etc/nginx/extra-conf.d/*.conf;
}

View file

@ -0,0 +1,25 @@
#!/bin/sh
set -e
trap 'kill -TERM $PID' TERM INT
if [ -z "$BACKEND_URL" ]; then
BACKEND_URL="$1"
fi
if [ -z "$FRONTEND_PORT" ]; then
FRONTEND_PORT="$2"
fi
if [ -z "$FRONTEND_PORT" ]; then
FRONTEND_PORT="80"
fi
if [ -z "$BACKEND_URL" ]; then
echo "BACKEND_URL must be set as an environment variable or as first parameter. (e.g. http://localhost:7860)"
exit 1
fi
echo "BACKEND_URL: $BACKEND_URL"
echo "FRONTEND_PORT: $FRONTEND_PORT"
sed -i "s|__BACKEND_URL__|$BACKEND_URL|g" /etc/nginx/conf.d/default.conf
sed -i "s|__FRONTEND_PORT__|$FRONTEND_PORT|g" /etc/nginx/conf.d/default.conf
cat /etc/nginx/conf.d/default.conf
# Start nginx
exec nginx -g 'daemon off;'

View file

@ -0,0 +1 @@
FROM langflowai/langflow:1.0-alpha

View file

@ -1,15 +1,3 @@
FROM python:3.10-slim
FROM langflowai/langflow:latest
RUN apt-get update && apt-get install gcc g++ git make -y && apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
WORKDIR $HOME/app
COPY --chown=user . $HOME/app
RUN pip install langflow>==0.5.0 -U --user
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]

View file

@ -1,9 +1,65 @@
# LangFlow Docker Running
# Running LangFlow with Docker
```sh
git clone git@github.com:logspace-ai/langflow.git
cd langflow/docker_example
docker compose up
```
This guide will help you get LangFlow up and running using Docker and Docker Compose.
The web UI will be accessible on port [7860](http://localhost:7860/)
## Prerequisites
- Docker
- Docker Compose
## Steps
1. Clone the LangFlow repository:
```sh
git clone https://github.com/langflow-ai/langflow.git
```
2. Navigate to the `docker_example` directory:
```sh
cd langflow/docker_example
```
3. Run the Docker Compose file:
```sh
docker compose up
```
LangFlow will now be accessible at [http://localhost:7860/](http://localhost:7860/).
## Docker Compose Configuration
The Docker Compose configuration spins up two services: `langflow` and `postgres`.
### LangFlow Service
The `langflow` service uses the `langflowai/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service.
Environment variables:
- `LANGFLOW_DATABASE_URL`: The connection string for the PostgreSQL database.
- `LANGFLOW_CONFIG_DIR`: The directory where LangFlow stores logs, file storage, monitor data, and secret keys.
Volumes:
- `langflow-data`: This volume is mapped to `/var/lib/langflow` in the container.
### PostgreSQL Service
The `postgres` service uses the `postgres:16` Docker image and exposes port 5432.
Environment variables:
- `POSTGRES_USER`: The username for the PostgreSQL database.
- `POSTGRES_PASSWORD`: The password for the PostgreSQL database.
- `POSTGRES_DB`: The name of the PostgreSQL database.
Volumes:
- `langflow-postgres`: This volume is mapped to `/var/lib/postgresql/data` in the container.
## Switching to a Specific LangFlow Version
If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `langflowai/langflow:latest` to `langflowai/langflow:1.0-alpha`.

View file

@ -1,10 +1,30 @@
version: '3'
version: "3.8"
services:
langflow:
build:
context: .
dockerfile: Dockerfile
image: langflowai/langflow:latest
ports:
- "7860:7860"
command: langflow run --host 0.0.0.0
depends_on:
- postgres
environment:
- LANGFLOW_DATABASE_URL=postgresql://langflow:langflow@postgres:5432/langflow
# This variable defines where the logs, file storage, monitor data and secret keys are stored.
- LANGFLOW_CONFIG_DIR=/var/lib/langflow
volumes:
- langflow-data:/var/lib/langflow
postgres:
image: postgres:16
environment:
POSTGRES_USER: langflow
POSTGRES_PASSWORD: langflow
POSTGRES_DB: langflow
ports:
- "5432:5432"
volumes:
- langflow-postgres:/var/lib/postgresql/data
volumes:
langflow-postgres:
langflow-data:

View file

@ -0,0 +1,3 @@
FROM langflowai/langflow:1.0-alpha
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]

View file

@ -0,0 +1,30 @@
version: "3.8"
services:
langflow:
image: langflowai/langflow:1.0-alpha
ports:
- "7860:7860"
depends_on:
- postgres
environment:
- LANGFLOW_DATABASE_URL=postgresql://langflow:langflow@postgres:5432/langflow
# This variable defines where the logs, file storage, monitor data and secret keys are stored.
- LANGFLOW_CONFIG_DIR=app/langflow
volumes:
- langflow-data:/app/langflow
postgres:
image: postgres:16
environment:
POSTGRES_USER: langflow
POSTGRES_PASSWORD: langflow
POSTGRES_DB: langflow
ports:
- "5432:5432"
volumes:
- langflow-postgres:/var/lib/postgresql/data
volumes:
langflow-postgres:
langflow-data:

View file

@ -1,17 +1,28 @@
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import Admonition from "@theme/Admonition";
# API Keys
## Introduction
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Langflow offers an API Key functionality that allows users to access their individual components and flows without going through traditional login authentication. The API Key is a user-specific token that can be included in the request's header or query parameter to authenticate API calls. The following documentation outlines how to generate, use, and manage these API Keys in Langflow.
Langflow provides an API key functionality that allows users to access their individual components and flows without traditional login authentication. The API key is a user-specific token that can be included in the request header or query parameter to authenticate API calls. This documentation outlines how to generate, use, and manage API keys in Langflow.
## Generating an API Key
<Admonition type="warning">
The default user and password are set using the LANGFLOW_SUPERUSER and
LANGFLOW_SUPERUSER_PASSWORD environment variables.
### Through Langflow UI
The default values are `langflow` and `langflow`, respectively.
{/* add image img/api-key.png */}
</Admonition>
## Generate an API key
Generate a user-specific token to use with Langflow.
### Generate an API key with the Langflow UI
<ZoomableImage
alt="Docusaurus themed image"
@ -28,15 +39,35 @@ Langflow offers an API Key functionality that allows users to access their indiv
4. Click on "Create secret key".
5. Copy the API key and store it in a secure location.
## Using the API Key
### Generate an API key with the Langflow CLI
### Using the `x-api-key` Header
```bash
langflow api-key
# or
python -m langflow api-key
╭─────────────────────────────────────────────────────────────────────╮
│ API Key Created Successfully: │
│ │
│ sk-O0elzoWID1izAH8RUKrnnvyyMwIzHi2Wk-uXWoNJ2Ro │
│ │
│ This is the only time the API key will be displayed. │
│ Make sure to store it in a secure location. │
│ │
│ The API key has been copied to your clipboard. Cmd + V to paste it. │
╰──────────────────────────────
```
## Use the Langflow API key
Include your API key in API requests to authenticate requests to Langflow.
### Use the `x-api-key` header
Include the `x-api-key` in the HTTP header when making API requests:
```bash
curl -X POST \
http://localhost:3000/api/v1/process/<your_flow_id> \
http://localhost:3000/api/v1/run/<your_flow_id> \
-H 'Content-Type: application/json'\
-H 'x-api-key: <your api key>'\
-d '{"inputs": {"text":""}, "tweaks": {}}'
@ -85,9 +116,9 @@ api_key = "<your api key>"
print(run_flow(inputs, flow_id=FLOW_ID, tweaks=TWEAKS, apiKey=api_key))
```
### Using the Query Parameter
### Use the query parameter
Alternatively, you can include the API key as a query parameter in the URL:
Include the API key as a query parameter in the URL:
```bash
curl -X POST \
@ -96,7 +127,7 @@ curl -X POST \
-d '{"inputs": {"text":""}, "tweaks": {}}'
```
Or with Python:
With Python using `requests`:
```python
import requests
@ -140,9 +171,13 @@ print(run_flow(inputs, flow_id=FLOW_ID, tweaks=TWEAKS, apiKey=api_key))
## Security Considerations
- **Visibility**: The API key won't be retrievable again through the UI for security reasons.
- **Scope**: The key only allows access to the flows and components of the specific user to whom it was issued.
- **Visibility**: For security reasons, the API key cannot be retrieved again through the UI.
- **Scope**: The key allows access only to the flows and components of the specific user to whom it was issued.
## Revoking an API Key
## Custom API endpoint
To revoke an API key, simply delete it from the UI. This will immediately invalidate the key and prevent it from being used again.
Under **Project Settings** > **Endpoint Name**, you can pick a custom name for the endpoint used to call your flow from the API.
## Revoke an API Key
To revoke an API key, delete it from the UI. This action immediately invalidates the key and prevents it from being used again.

View file

@ -6,6 +6,10 @@ import Admonition from "@theme/Admonition";
# Chat Widget
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
<div style={{ marginBottom: "20px" }}>
The <b>Langflow Chat Widget</b> is a powerful web component that enables
communication with a Langflow project. This widget allows for a chat interface
@ -61,7 +65,7 @@ import Admonition from "@theme/Admonition";
---
### HTML
### Embed your flow into HTML
The Chat Widget can be embedded into any HTML page, inside a _`<body>`_ tag, as demonstrated in the video below.
@ -73,15 +77,15 @@ The Chat Widget can be embedded into any HTML page, inside a _`<body>`_ tag, as
---
### React
### Embed your flow with React
To embed the Chat Widget using React, you'll need to insert this _`<script>`_ tag into the React _index.html_ file, inside the _`<body>`_ tag:
1. To embed the Chat Widget using React, insert this _`<script>`_ tag into the React _index.html_ file, inside the _`<body>`_ tag:
```html
<script src="https://cdn.jsdelivr.net/gh/logspace-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
<script src="https://cdn.jsdelivr.net/gh/langflow-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
```
Then, declare your Web Component and encapsulate it in a React component.
2. Declare your Web Component and encapsulate it in a React component.
```jsx
declare global {
@ -106,28 +110,27 @@ export default function ChatWidget({ className }) {
}
```
Finally, you can place the component anywhere in your code to display the Chat Widget.
3. Finally, you can place the component anywhere in your code to display the Chat Widget.
---
### Angular
### Embed your flow with Angular
To use it in Angular, first add this _`<script>`_ tag into the Angular _index.html_ file, inside the _`<body>`_ tag.
1. To use the chat widget in Angular, first add this _`<script>`_ tag into the Angular _index.html_ file, inside the _`<body>`_ tag.
```html
<script src="https://cdn.jsdelivr.net/gh/logspace-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
<script src="https://cdn.jsdelivr.net/gh/langflow-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
```
When you use a custom web component in an Angular template, the Angular compiler might show a warning when it doesn't recognize the custom elements by default. To suppress this warning, add _`CUSTOM_ELEMENTS_SCHEMA`_ to the module's _`@NgModule.schemas`_.
- Open the module file (it typically ends with _.module.ts_) where you'd add the _`langflow-chat`_ web component.
- Import _`CUSTOM_ELEMENTS_SCHEMA`_ at the top of the file:
2. When you use a custom web component in an Angular template, the Angular compiler might show a warning when it doesn't recognize the custom elements by default. To suppress this warning, add _`CUSTOM_ELEMENTS_SCHEMA`_ to the module's _`@NgModule.schemas`_.
3. Open the module file (it typically ends with _.module.ts_) where you'd add the _`langflow-chat`_ web component.
4. Import _`CUSTOM_ELEMENTS_SCHEMA`_ at the top of the file:
```ts
import { NgModule, CUSTOM_ELEMENTS_SCHEMA } from "@angular/core";
```
- Add _`CUSTOM_ELEMENTS_SCHEMA`_ to the 'schemas' array inside the '@NgModule' decorator:
5. Add _`CUSTOM_ELEMENTS_SCHEMA`_ to the 'schemas' array inside the '@NgModule' decorator:
```ts
@NgModule({
@ -142,9 +145,7 @@ import { NgModule, CUSTOM_ELEMENTS_SCHEMA } from "@angular/core";
export class YourModule {}
```
In your Angular project, find the component belonging to the module where _`CUSTOM_ELEMENTS_SCHEMA`_ was added.
- Inside the template, add the _`langflow-chat`_ tag to include the Chat Widget in your component's view:
6. In your Angular project, find the component belonging to the module where _`CUSTOM_ELEMENTS_SCHEMA`_ was added. Inside the template, add the _`langflow-chat`_ tag to include the Chat Widget in your component's view:
```jsx
<langflow-chat
@ -175,7 +176,7 @@ In your Angular project, find the component belonging to the module where _`CUST
---
## Configuration
## Chat widget configuration
Use the widget API to customize your Chat Widget:

View file

@ -0,0 +1,159 @@
import Admonition from "@theme/Admonition";
# Command Line Interface (CLI)
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Langflow's Command Line Interface (CLI) is a powerful tool that allows you to interact with the Langflow server from the command line. The CLI provides a wide range of commands to help you shape Langflow to your needs.
The available commands are below. Navigate to their individual sections of this page to see the parameters.
- [langflow](#overview)
- [langflow api-key](#langflow-api-key)
- [langflow copy-db](#langflow-copy-db)
- [langflow migration](#langflow-migration)
- [langflow run](#langflow-run)
- [langflow superuser](#langflow-superuser)
## Overview
Running the CLI without any arguments displays a list of available options and commands.
```bash
langflow
# or
langflow --help
# or
python -m langflow
```
| Command | Description |
| ----------- | ---------------------------------------------------------------------- |
| `api-key` | Creates an API key for the default superuser if AUTO_LOGIN is enabled. |
| `copy-db` | Copy the database files to the current directory (`which langflow`). |
| `migration` | Run or test migrations. |
| `run` | Run the Langflow. |
| `superuser` | Create a superuser. |
### Options
| Option | Description |
| ---------------------- | -------------------------------------------------------------------------------- |
| `--install-completion` | Install completion for the current shell. |
| `--show-completion` | Show completion for the current shell, to copy it or customize the installation. |
| `--help` | Show this message and exit. |
## langflow api-key
Run the `api-key` command to create an API key for the default superuser if `LANGFLOW_AUTO_LOGIN` is set to `True`.
```bash
langflow api-key
# or
python -m langflow api-key
╭─────────────────────────────────────────────────────────────────────╮
│ API Key Created Successfully: │
│ │
│ sk-O0elzoWID1izAH8RUKrnnvyyMwIzHi2Wk-uXWoNJ2Ro │
│ │
│ This is the only time the API key will be displayed. │
│ Make sure to store it in a secure location. │
│ │
│ The API key has been copied to your clipboard. Cmd + V to paste it. │
╰──────────────────────────────
```
### Options
| Option | Type | Description |
| ----------- | ---- | ------------------------------------------------------------- |
| --log-level | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
| --help | | Show this message and exit. |
## langflow copy-db
Run the `copy-db` command to copy the cached `langflow.db` and `langflow-pre.db` database files to the current directory.
If the files exist in the cache directory, they will be copied to the same directory as `__main__.py`, which can be found with `which langflow`.
### Options
None.
## langflow migration
Run or test migrations with the [Alembic](https://pypi.org/project/alembic/) database tool.
```bash
langflow migration
# or
python -m langflow migration
```
### Options
| Option | Description |
| ------------------- | -------------------------------------------------------------------------------------------------------------------------- |
| `--test, --no-test` | Run migrations in test mode. [default: test] |
| `--fix, --no-fix` | Fix migrations. This is a destructive operation, and should only be used if you know what you are doing. [default: no-fix] |
| `--help` | Show this message and exit. |
## langflow run
Run Langflow.
```bash
langflow run
# or
python -m langflow run
```
### Options
| Option | Description |
| ---------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `--help` | Displays all available options. |
| `--host` | Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`. |
| `--workers` | Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`. |
| `--timeout` | Sets the worker timeout in seconds. The default is `60`. |
| `--port` | Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`. |
| `--env-file` | Specifies the path to the .env file containing environment variables. The default is `.env`. |
| `--log-level` | Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`. |
| `--components-path` | Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`. |
| `--log-file` | Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`. |
| `--cache` | Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`. |
| `--dev`/`--no-dev` | Toggles the development mode. The default is `no-dev`. |
| `--path` | Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable. |
| `--open-browser`/`--no-open-browser` | Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`. |
| `--remove-api-keys`/`--no-remove-api-keys` | Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`. |
| `--install-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Installs completion for the specified shell. |
| `--show-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Shows completion for the specified shell, allowing you to copy it or customize the installation. |
| `--backend-only` | This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable. For more, see [Backend-only](../deployment/backend-only). |
| `--store` | This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable. |
#### CLI environment variables
You can configure many of the CLI options using environment variables. These can be exported in your operating system or added to a `.env` file and loaded using the `--env-file` option.
A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence.
## langflow superuser
Create a superuser for Langflow.
```bash
langflow superuser
# or
python -m langflow superuser
```
### Options
| Option | Type | Description |
| ------------- | ---- | ------------------------------------------------------------- |
| `--username` | TEXT | Username for the superuser. [default: None] [required] |
| `--password` | TEXT | Password for the superuser. [default: None] [required] |
| `--log-level` | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
| `--help` | | Show this message and exit. |

View file

@ -0,0 +1,105 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Collections and Projects
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
My Collection is a space in Langflow where users can manage, organize, and access their flows and components.
Flows and components are displayed as individual cards that provide relevant information.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/my-collection.png"),
dark: useBaseUrl("img/my-collection.png"),
}}
style={{ width: "30%", margin: "20px auto" }}
/>
* **Folders**: Users can organize their projects into folders. Default folders include "My Projects" and the ability to create new folders. Hover over a folder to access options to download or delete it.
* **Search Bar** Enables users to quickly search through their flows and components.
* **Select All**: This feature allows users to select all projects displayed on the page for batch actions like moving, deleting, or exporting.
Click on a flow card to open it in Langflow Workspace or use the **Playground Button** for direct access to execute and interact with the flows chatbot interface.
## Collections
Components created or imported by the user are also displayed in **My Collection** and can be directly removed from here.
A collection is a snapshot of flows available in a database.
Collections can be downloaded to local storage and uploaded for future use.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/langflow_collection.mp4" />
</div>
## Project
A **Project** can be a flow or a component. To view your saved projects, select **My Collection**.
Your **Projects** are displayed.
Click the **![Playground icon](/logos/botmessage.svg) Playground** button to run a flow from the **My Collection** screen.
In the top left corner of the screen are options for **Download Collection**, **Upload Collection**, and **New Project**.
Select **Download Collection** to save your project to your local machine. This downloads all flows and components as a `.json` file.
Select **Upload Collection** to upload a flow or component `.json` file from your local machine.
Select **New Project** to create a new project. In addition to a blank workspace, [starter projects](../starter-projects/basic-prompting) are also available.
## Project options menu
To see options for your project, in the upper left corner of the workspace, select the dropdown menu.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/project-options-menu-light.png"),
dark: useBaseUrl("img/project-options-menu-dark.png"),
}}
style={{ width: "30%", margin: "20px auto" }}
/>
* **New** - Start a new project.
* **Duplicate** - Duplicate the current flow as a new project.
* **Settings** - Modify the project's **Name** or **Description**.
* **Import** - Upload a flow `.json` file from your local machine.
* **Export** - Download your current project to your local machine as a `.json` file.
* **Undo** or **Redo** - Undo or redo your last action.
## Project folders
Multiple projects can be stored in **folders**.
Folders allow you to categorize flows and components into manageable groups. This makes it easier to find and access specific projects quickly.
**My Projects** is a default folder where all new projects and components are initially stored unless specified otherwise. Users can create custom folders to better organize their work according to specific needs.
Hovering over a folder in Langflow provides options to either remove or download the entire folder, allowing you to keep an offline copy or migrate projects between environments
Create new folders with the **New folder** button. One folder can store multiple projects (as the default My Projects folder does).
You can download folders of projects as a single JSON file, and upload files and flows to your folder.
Click the **Trash** icon to delete a folder.

View file

@ -0,0 +1,109 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Global Variables
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Global Variables are a useful feature of Langflow, allowing you to define reusable variables accessed from any Text field in your project.
**TL;DR**
- Global Variables are reusable variables accessible from any Text field in your project.
- To create one, click the 🌐 button in a Text field and then **+ Add New Variable**.
- Define the **Name**, **Type**, and **Value** of the variable.
- Click **Save Variable** to create it.
- All Credential Global Variables are encrypted and accessible only by you.
- Set _`LANGFLOW_STORE_ENVIRONMENT_VARIABLES`_ to _`true`_ in your `.env` file to add all variables in _`LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT`_ to your user's Global Variables.
## Create and Add a Global Variable
To create and add a global variable, click the 🌐 button in a Text field, and then click **+ Add New Variable**.
Text fields are where you write text without opening a Text area, and are identified with the 🌐 icon.
For example, to create an environment variable for the **OpenAI** component:
1. In the **OpenAI API Key** text field, click the 🌐 button, then **Add New Variable**.
2. Enter `openai_api_key` in the **Variable Name** field.
3. Paste your OpenAI API Key (`sk-...`) in the **Value** field.
4. Select **Credential** for the **Type**.
5. Choose **OpenAI API Key** in the **Apply to Fields** field to apply this variable to all fields named **OpenAI API Key**.
6. Click **Save Variable**.
You now have a `openai_api_key` global environment variable for your Langflow project.
Subsequently, clicking the 🌐 button in a Text field will display the new variable in the dropdown.
<Admonition type="tip">
You can also create global variables in **Settings** > **Variables and
Secrets**.
</Admonition>
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/global-env.png",
dark: "img/global-env.png",
}}
style={{ width: "40%", margin: "20px auto" }}
/>
To view and manage your project's global environment variables, visit **Settings** > **Variables and Secrets**.
For more on variables in HuggingFace Spaces, see [Managing Secrets](https://huggingface.co/docs/hub/spaces-overview#managing-secrets).
{/* All variables are encrypted */}
<Admonition type="warning">
All Credential Global Variables are encrypted and accessible only by you.
</Admonition>
## Configure Environment Variables in your .env file
Setting `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to `true` in your `.env` file (default) adds all variables in `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` to your user's Global Variables.
These variables are accessible like any other Global Variable.
<Admonition type="tip">
To prevent this behavior, set `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to
`false` in your `.env` file.
</Admonition>
You can specify variables to get from the environment by listing them in `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT`.
Specify variables as a comma-separated list (e.g., _`"VARIABLE1, VARIABLE2"`_) or a JSON-encoded string (e.g., _`'["VARIABLE1", "VARIABLE2"]'`_).
The default list of variables includes:
- ANTHROPIC_API_KEY
- ASTRA_DB_API_ENDPOINT
- ASTRA_DB_APPLICATION_TOKEN
- AZURE_OPENAI_API_KEY
- AZURE_OPENAI_API_DEPLOYMENT_NAME
- AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME
- AZURE_OPENAI_API_INSTANCE_NAME
- AZURE_OPENAI_API_VERSION
- COHERE_API_KEY
- GOOGLE_API_KEY
- GROQ_API_KEY
- HUGGINGFACEHUB_API_TOKEN
- OPENAI_API_KEY
- PINECONE_API_KEY
- SEARCHAPI_API_KEY
- SERPAPI_API_KEY
- UPSTASH_VECTOR_REST_URL
- UPSTASH_VECTOR_REST_TOKEN
- VECTARA_CUSTOMER_ID
- VECTARA_CORPUS_ID
- VECTARA_API_KEY
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/langflow_global_variables.mp4" />
</div>

View file

@ -4,13 +4,15 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Sign up and Sign in
# Sign Up and Sign In
## Introduction
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
The login functionality in Langflow serves to authenticate users and protect sensitive routes in the application. Starting from version 0.5, Langflow introduces an enhanced login mechanism that is governed by a few environment variables. This allows new secure features.
## Environment Variables
## Environment variables
The following environment variables are crucial in configuring the login settings:
@ -68,7 +70,7 @@ export LANGFLOW_SECRET_KEY=randomly_generated_secure_key
By default, this variable is set to `False`. When enabled (`True`), new users are automatically activated and can log in without requiring explicit activation by the superuser.
## Command-Line Interface
## Manage superusers with the CLI
Langflow provides a command-line utility for managing superusers:
@ -78,7 +80,7 @@ langflow superuser
This command prompts you to enter the username and password for the superuser, unless they are already set using environment variables.
## Sign-up
## Sign in
With _`LANGFLOW_AUTO_LOGIN`_ set to _`False`_, Langflow requires users to sign up before they can log in. The sign-up page is the default landing page when a user visits Langflow for the first time.
@ -88,12 +90,12 @@ With _`LANGFLOW_AUTO_LOGIN`_ set to _`False`_, Langflow requires users to sign u
light: useBaseUrl("img/sign-up.png"),
dark: useBaseUrl("img/sign-up.png"),
}}
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
style={{ width: "40%", margin: "20px auto" }}
/>
## Profile settings
Users can change their profile settings by clicking on the profile icon in the top right corner of the application. This opens a dropdown menu with the following options:
Once signed in, you can change your profile settings by clicking on the profile icon in the top right corner of the Langflow dashboard. This opens a dropdown menu with the following options:
- **Admin Page**: Opens the admin page, which is only accessible to the superuser.
- **Profile Settings**: Opens the profile settings page.
@ -105,10 +107,10 @@ Users can change their profile settings by clicking on the profile icon in the t
light: useBaseUrl("img/my-account.png"),
dark: useBaseUrl("img/my-account.png"),
}}
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
style={{ maxWidth: "600px", margin: "20px auto" }}
/>
By clicking on **Profile Settings**, the user is taken to the profile settings page, where they can change their password and their profile picture.
Select **Profile Settings** to change your password and your profile picture.
<ZoomableImage
alt="Docusaurus themed image"
@ -116,10 +118,10 @@ By clicking on **Profile Settings**, the user is taken to the profile settings p
light: useBaseUrl("img/profile-settings.png"),
dark: useBaseUrl("img/profile-settings.png"),
}}
style={{ maxWidth: "600px", margin: "0 auto" }}
style={{ maxWidth: "600px", margin: "20px auto" }}
/>
By clicking on **Admin Page**, the superuser is taken to the admin page, where they can manage users and groups.
Select **Admin Page** to manage users and groups as the superuser.
<ZoomableImage
alt="Docusaurus themed image"

View file

@ -0,0 +1,28 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Logs
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
To view logs, go to **Project Options** > **Logs**.
The **Logs** page provides a detailed record of all component executions within a workspace.
It is designed to help you track actions, debug issues, and understand the flow of data through various components.
Each log entry includes an execution with source and target components, and displays the data and parameters passed from one component to another. The status of each execution is indicated and errors encountered are easily detected.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/playground-logs.png",
dark: "img/playground-logs.png",
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>

View file

@ -0,0 +1,38 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import Admonition from "@theme/Admonition";
# Chat Memory
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Langflow allows every chat message to be stored, and a single flow can have multiple memory sessions. This enables you to create multiple “memories” for agents to store and recall specific information as needed. You can edit and remove previous messages to inspect and validate a models response behavior. Control, explore, and manage conversation histories to get your models acting just right.
The **Chat Memory** component retrieves message histories by session ID. Users can change the session ID in the advanced settings, with the default session ID set to match the flow ID. These memories are accessible and manageable directly from the Playground; modifications to them directly affect the behavior of chatbot responses. Users can remove or edit previous messages to manipulate and explore model responses further.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/chat-memory-component.png",
dark: "img/chat-memory-component.png",
}}
style={{ width: "40%", margin: "20px auto" }}
/>
By default, chat conversations store Message objects categorized by session ID. A a single flow can host multiple session IDs, and different flows can also share the same session ID.
Memories can be visualized and managed directly from the Playground. Modifying these memories will influence the behavior of the chatbot responses, as long as an agent uses them. Here you have the ability to remove or edit previous messages, allowing them to manipulate and explore how these changes affect model responses.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/playground-memories.png",
dark: "img/playground-memories.png",
}}
style={{ width: "40%", margin: "20px auto" }}
/>
You can also display all messages stored across every flow in your workspace by going to **Settings** > **Messages**.

View file

@ -0,0 +1,69 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Playground
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
<Admonition type="info">
From the **My Collection** page, click the Playground button in one of your flow cards.
It will directly open up a window with that project's Playground, without even showing the flow (this also works for flows hosted on the Langflow Store!).
</Admonition>
The **Playground** is a dynamic interface designed for real-time interaction with agents, allowing users to access and manage memories and monitor the inputs and outputs. Here, users can directly prototype and experiment with their configured components or AI models, making adjustments and observing different outcomes in real-time.
It even works for flows hosted on the Langflow store!
As long as you have a flow properly working, you can interact with it by clicking the Playground button.
1. From your **Collections** page, click the **![Playground icon](/logos/botmessage.svg)Playground** in one of your flows.
The **Playground** window opens.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/playground-chat.png"),
dark: useBaseUrl("img/playground-chat.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
2. Chat with your bot as you normally would, all without having to open the editor.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/langflow_playground.mp4" />
</div>
## Playground I/O
The Playground's window arrangement changes depending on what components are being used.
Adding or removing any of the below components modifies your Playground so you can monitor the inputs and outputs.
- Chat Input
- Text Input
- Chat Output
- Text Output
- Data Output
- Inspect Memory
You can also select **Options** > **Logs** to see your flow's logs.
For more information, see [Inputs and Outputs](../components/inputs-and-outputs).
## Memory Management
When you send a message, under **Memories**, you can view a table of previous interactions for this session.
Langflow allows every chat message to be stored, and a single flow can have multiple memory sessions. This enables you to create multiple “memories” for agents to store and recall specific information as needed.
You can edit and remove previous messages to inspect and validate a models response behavior.
For more information, see [Memories](./memories).

View file

@ -5,6 +5,10 @@ import ReactPlayer from "react-player";
# Prompt Customization
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
The prompt template allows users to create prompts and define variables that provide control over instructing the model.
{" "}

View file

@ -0,0 +1,60 @@
import Admonition from "@theme/Admonition";
# Settings
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Change the **Project Settings** or **General Settings** for Langflow.
## Project Settings
Click **Project Name** > **Settings** to view your **Project Settings**.
* **Name** - the name of your project.
* **Description** - the description for your project.
Visible on the Langflow Store.
* **Endpoint name** - the custom endpoint name for your project's API endpoint.
To use the default value, leave this field blank.
## General Settings
Select your **Profile Picture** > **Settings** to view your **General Settings**.
### Profile Picture
Select a profile picture.
### Store API Key
Add your **Langflow Store** API key.
To get a Store key, go to the [Langflow store](https://www.langflow.store/).
### Global Variables
Select **Add New** to add a key to Langflow.
Select the **trash icon** to delete a key.
For more information, see [Global Variables](./global-env).
### Langflow API
Create a Langflow API key.
Click **Add New** > **Create Secret Key** and copy the key somewhere safe and accessible.
For more information, see [Langflow API](./api).
### Shortcuts
A list of keyboard shortcuts for Langflow.
### Messages
Inspect, edit, and remove messages in your flow for testing and debugging purposes.
For more information, see the [Playground](./playground).

View file

@ -1,91 +1,88 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Agents
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Agents are components that use reasoning to make decisions and take actions, designed to autonomously perform tasks or provide services with some degree of “freedom” (or agency). They combine the power of LLM chaining processes with access to external tools such as APIs to interact with applications and accomplish tasks.
Agents are components that use reasoning to make decisions and take actions, designed to autonomously perform tasks or provide services with some degree of agency. LLM chains can only perform hardcoded sequences of actions, while agents use LLMs to reason through which actions to take, and in which order.
---
### AgentInitializer
The `AgentInitializer` component is a quick way to construct a zero-shot agent from a language model (LLM) and tools.
The `AgentInitializer` constructs a zero-shot agent from a language model (LLM) and additional tools.
**Params**
**Parameters**:
- **LLM:** Language Model to use in the `AgentInitializer`.
- **Memory:** Used to add memory functionality to an agent. It allows the agent to store and retrieve information from previous conversations.
- **Tools:** Tools that the agent will have access to.
- **Agent:** The type of agent to be instantiated. Current supported: `zero-shot-react-description`, `react-docstore`, `self-ask-with-search,conversational-react-description` and `openai-functions`.
- **LLM:** The language model used by the `AgentInitializer`.
- **Memory:** Enables memory functionality, allowing the agent to recall and use information from previous interactions.
- **Tools:** The tools available to the agent.
- **Agent:** Specifies the type of agent to instantiate. Currently supported types include `zero-shot-react-description`, `react-docstore`, `self-ask-with-search`, `conversational-react-description`, and `openai-functions`.
---
### CSVAgent
A `CSVAgent` is an agent that is designed to interact with CSV (Comma-Separated Values) files. CSV files are a common format for storing tabular data, where each row represents a record and each column represents a field. The CSV agent can perform various tasks, such as reading and writing CSV files, processing the data, and generating tables. It can extract information from the CSV file, manipulate the data, and perform operations like filtering, sorting, and aggregating.
The `CSVAgent` interacts with CSV (Comma-Separated Values) files, commonly used to store tabular data. Each row in a CSV file represents a record, and each column represents a field. The CSV agent can read and write CSV files, process data, and perform tasks such as filtering, sorting, and aggregating.
**Params**
**Parameters**:
- **LLM:** Language Model to use in the `CSVAgent`.
- **path:** The file path to the CSV data.
- **LLM:** The language model used by the `CSVAgent`.
- **Path:** The file path to the CSV data.
---
### JSONAgent
The `JSONAgent` deals with JSON (JavaScript Object Notation) data. Similar to the CSVAgent, it works with a language model (LLM) and a toolkit designed for JSON manipulation. This agent can iteratively explore a JSON blob to find the information needed to answer the user's question. It can list keys, get values, and navigate through the structure of the JSON object.
The `JSONAgent` manages JSON (JavaScript Object Notation) data. This agent, like the CSVAgent, uses a language model (LLM) and a toolkit for JSON manipulation. It can explore a JSON blob to extract needed information, list keys, retrieve values, and navigate through the JSON structure.
**Params**
**Parameters**:
- **LLM:** Language Model to use in the `JSONAgent`.
- **Toolkit:** Toolkit that the agent will have access to.
- **LLM:** The language model used by the `JSONAgent`.
- **Toolkit:** The toolkit available to the agent.
---
### SQLAgent
A `SQLAgent` is an agent that is designed to interact with SQL databases. It is capable of performing various tasks, such as querying the database, retrieving data, and executing SQL statements. The agent can provide information about the structure of the database, including the tables and their schemas. It can also perform operations like inserting, updating, and deleting data in the database. The SQL agent is a helpful tool for managing and working with SQL databases efficiently.
The `SQLAgent` interacts with SQL databases, capable of querying, retrieving data, and executing SQL statements. It provides insights into the database structure, including tables and schemas, and can perform operations such as insertions, updates, and deletions.
**Params**
**Parameters**:
- **LLM:** Language Model to use in the `SQLAgent`.
- **database_uri:** A string representing the connection URI for the SQL database.
- **LLM:** The language model used by the `SQLAgent`.
- **Database URI:** The connection URI for the SQL database.
---
### VectorStoreAgent
The `VectorStoreAgent` is designed to work with a vector store a data structure used for storing and querying vector-based representations of data. The `VectorStoreAgent` can query the vector store to find relevant information based on user inputs.
The `VectorStoreAgent` operates with a vector store, which is a data structure for storing and querying vector-based data representations. This agent can query the vector store to find information relevant to user inputs.
**Params**
**Parameters**:
- **LLM:** Language Model to use in the `VectorStoreAgent`.
- **Vector Store Info:** `VectorStoreInfo` to use in the `VectorStoreAgent`.
- **LLM:** The language model used by the `VectorStoreAgent`.
- **Vector Store Info:** The `VectorStoreInfo` used by the agent.
---
### VectorStoreRouterAgent
The `VectorStoreRouterAgent` is a custom agent that takes a vector store router as input. It is typically used when theres a need to retrieve information from multiple vector stores. These can be connected through a `VectorStoreRouterToolkit` and sent over to the `VectorStoreRouterAgent`. An agent configured with multiple vector stores can route queries to the appropriate store based on the context.
The `VectorStoreRouterAgent` is a custom agent that uses a vector store router. It is typically used to retrieve information from multiple vector stores connected through a `VectorStoreRouterToolkit`.
**Params**
**Parameters**:
- **LLM:** Language Model to use in the `VectorStoreRouterAgent`.
- **Vector Store Router Toolkit:** `VectorStoreRouterToolkit` to use in the `VectorStoreRouterAgent`.
- **LLM:** The language model used by the `VectorStoreRouterAgent`.
- **Vector Store Router Toolkit:** The toolkit used by the agent.
---
### ZeroShotAgent
The `ZeroShotAgent` is an agent that uses the ReAct framework to determine which tool to use based solely on the tool's description. It can be configured with any number of tools and requires a description for each tool. The agent is designed to be the most general-purpose action agent. It uses an `LLMChain` to determine which actions to take and in what order.
The `ZeroShotAgent` uses the ReAct framework to decide which tool to use based on the tool's description. It is the most general-purpose action agent, capable of determining the necessary actions and their sequence through an `LLMChain`.
**Params**
**Parameters**:
- **Allowed Tools:** Tools that the agent will have access to.
- **LLM Chain:** LLM Chain to be used by the agent.
- **Allowed Tools:** The tools accessible to the agent.
- **LLM Chain:** The LLM Chain used by the agent.

View file

@ -5,144 +5,62 @@ import Admonition from "@theme/Admonition";
# Chains
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may
contain some rough edges. Share your feedback or report issues to help us
improve! 🛠️📝
</p>
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Chains, in the context of language models, refer to a series of calls made to a language model. It allows for the output of one call to be used as the input for another call. Different types of chains allow for different levels of complexity. Chains are useful for creating pipelines and executing specific scenarios.
Chains, in the context of language models, refer to a series of calls made to a language model. This approach allows for using the output of one call as the input for another. Different chain types facilitate varying complexity levels, making them useful for creating pipelines and executing specific scenarios.
---
### CombineDocsChain
## CombineDocsChain
The `CombineDocsChain` incorporates methods to combine or aggregate loaded documents for question-answering functionality.
`CombineDocsChain` includes methods to combine or aggregate loaded documents for question-answering functionality.
<Admonition type="info">
Acts as a proxy for LangChains [documents](https://python.langchain.com/docs/modules/chains/document/) chains produced by the `load_qa_chain` function.
Works as a proxy of LangChains [documents](https://python.langchain.com/docs/modules/chains/document/) chains generated by the `load_qa_chain` function.
</Admonition>
**Params**
**Parameters**:
- **LLM:** Language Model to use in the chain.
- **chain_type:** The chain type to be used. Each one of them applies a different “combination strategy”.
- **stuff**: The stuff [documents](https://python.langchain.com/docs/modules/chains/document/stuff) chain (“stuff" as in "to stuff" or "to fill") is the most straightforward of _the_ document chains. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. This chain is well-suited for applications where documents are small and only a few are passed in for most calls.
- **map_reduce**: The map-reduce [documents](https://python.langchain.com/docs/modules/chains/document/map_reduce) chain first applies an LLM chain to each document individually (the Map step), treating the chain output as a new document. It then passes all the new documents to a separate combined documents chain to get a single output (the Reduce step). It can optionally first compress or collapse the mapped documents to make sure that they fit in the combined documents chain (which will often pass them to an LLM). This compression step is performed recursively if necessary.
- **map_rerank**: The map re-rank [documents](https://python.langchain.com/docs/modules/chains/document/map_rerank) chain runs an initial prompt on each document that not only tries to complete a task but also gives a score for how certain it is in its answer. The highest-scoring response is returned.
- **refine**: The refine [documents](https://python.langchain.com/docs/modules/chains/document/refine) chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
Since the Refine chain only passes a single document to the LLM at a time, it is well-suited for tasks that require analyzing more documents than can fit in the model's context. The obvious tradeoff is that this chain will make far more LLM calls than, for example, the Stuff documents chain. There are also certain tasks that are difficult to accomplish iteratively. For example, the Refine chain can perform poorly when documents frequently cross-reference one another or when a task requires detailed information from many documents.
- **chain_type:** Type of chain to be used, each applying a different combination strategy:
- **stuff**: Most straightforward document chain. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. Suitable for cases where documents are small and few.
- **map_reduce**: Applies an LLM to each document individually (the `Map` step), treating the output as a new document. It then combines these documents to get a single output (the `Reduce` step). Compression may occur to ensure documents fit in the final chain.
- **map_rerank**: Runs an initial prompt on each document to complete a task and score its certainty. Returns the highest-scoring response.
- **refine**: Iteratively updates its answer by looping over the input documents. Each document, along with the latest intermediate answer, is passed to an LLM to generate a new response. This method suits tasks requiring analysis of more documents than the model's context can handle, though it can be less effective for tasks requiring detailed cross-referencing or comprehensive information.
---
### ConversationChain
## ConversationChain
The `ConversationChain` is a straightforward chain for interactive conversations with a language model, making it ideal for chatbots or virtual assistants. It allows for dynamic conversations, question-answering, and complex dialogues.
`ConversationChain` facilitates dynamic, interactive conversations with a language model, ideal for chatbots or virtual assistants.
**Params**
**Parameters**:
- **LLM:** Language Model to use in the chain.
- **Memory:** Default memory store.
- **input_key:** Used to specify the key under which the user input will be stored in the conversation memory. It allows you to provide the user's input to the chain for processing and generating a response.
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can be helpful for debugging and understanding the chain's behavior. If set to False, it will suppress the verbose output — defaults to `False`.
- **input_key:** Specifies the key under which user input is stored in the conversation memory, enabling the chain to process and generate responses.
- **output_key:** Specifies the key under which the generated response is stored, allowing retrieval of the response using this key.
- **verbose:** Controls the verbosity of the chain's output. Set to `True` to enable detailed internal state outputs, useful for debugging and understanding the chain's behavior. Defaults to `False`.
---
### ConversationalRetrievalChain
## ConversationalRetrievalChain
The `ConversationalRetrievalChain` extracts information and provides answers by combining document search and question-answering abilities.
`ConversationalRetrievalChain` combines document search with question-answering capabilities, extracting information and providing answers.
<Admonition type="info">
A retriever finds documents based on a query but doesnt store them; it returns the documents matching the query.
A retriever is a component that finds documents based on a query. It doesn't store the documents themselves, but it returns the ones that match the query.
</Admonition >
**Params**
**Parameters**:
- **LLM:** Language Model to use in the chain.
- **Memory:** Default memory store.
- **Retriever:** The retriever used to fetch relevant documents.
- **chain_type:** The chain type to be used. Each one of them applies a different “combination strategy”.
- **stuff**: The stuff [documents](https://python.langchain.com/docs/modules/chains/document/stuff) chain (“stuff" as in "to stuff" or "to fill") is the most straightforward of _the_ document chains. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. This chain is well-suited for applications where documents are small and only a few are passed in for most calls.
- **map_reduce**: The map-reduce [documents](https://python.langchain.com/docs/modules/chains/document/map_reduce) chain first applies an LLM chain to each document individually (the Map step), treating the chain output as a new document. It then passes all the new documents to a separate combined documents chain to get a single output (the Reduce step). It can optionally first compress or collapse the mapped documents to make sure that they fit in the combined documents chain (which will often pass them to an LLM). This compression step is performed recursively if necessary.
- **map_rerank**: The map re-rank [documents](https://python.langchain.com/docs/modules/chains/document/map_rerank) chain runs an initial prompt on each document that not only tries to complete a task but also gives a score for how certain it is in its answer. The highest-scoring response is returned.
- **refine**: The refine [documents](https://python.langchain.com/docs/modules/chains/document/refine) chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
Since the Refine chain only passes a single document to the LLM at a time, it is well-suited for tasks that require analyzing more documents than can fit in the model's context. The obvious tradeoff is that this chain will make far more LLM calls than, for example, the Stuff documents chain. There are also certain tasks that are difficult to accomplish iteratively. For example, the Refine chain can perform poorly when documents frequently cross-reference one another or when a task requires detailed information from many documents.
- **return_source_documents:** Used to specify whether or not to include the source documents that were used to answer the question in the output. When set to `True`, source documents will be included in the output along with the generated answer. This can be useful for providing additional context or references to the user — defaults to `True`.
- **verbose:** Whether or not to run in verbose mode. In verbose mode, intermediate logs will be printed to the console — defaults to `False`.
- **chain_type:** Type of chain to be used, each applying a different combination strategy:
- **stuff**: Inserts a list of documents into a prompt and passes it to an LLM. Suitable for cases where documents are small and few.
- **map_reduce**: Processes each document with an LLM separately, combines them for a single output. Compressions may occur to fit documents into the final chain.
- **map_rerank**: Scores responses based on certainty from each document, returns the highest.
- **refine**: Updates answers iteratively by looping through documents, passing each with intermediate answers to an LLM for a new response. This method is beneficial for tasks that involve extensive document analysis.
- **return_source_documents:** Specifies whether to include source documents used in the output. Useful for providing context or references to the user. Defaults to `True`.
- **verbose:** Controls verbosity of output. Set to `True` for detailed logs, useful for debugging. Defaults to `False`.
---
### LLMChain
The `LLMChain` is a straightforward chain that adds functionality around language models. It combines a prompt template with a language model. To use it, create input variables to format the prompt template. The formatted prompt is then sent to the language model, and the generated output is returned as the result of the `LLMChain`.
**Params**
- **LLM:** Language Model to use in the chain.
- **Memory:** Default memory store.
- **Prompt**: Prompt template object to use in the chain.
- **output_key:** This parameter is used to specify which key in the LLM output dictionary should be returned as the final output. By default, the `LLMChain` returns both the input and output key values — defaults to `text`.
- **verbose:** Whether or not to run in verbose mode. In verbose mode, intermediate logs will be printed to the console — defaults to `False`.
---
### LLMMathChain
The `LLMMathChain` combines a language model (LLM) and a math calculation component. It allows the user to input math problems and get the corresponding solutions.
The `LLMMathChain` works by using the language model with an `LLMChain` to understand the input math problem and generate a math expression. It then passes this expression to the math component, which evaluates it and returns the result.
**Params**
- **LLM:** Language Model to use in the chain.
- **LLMChain:** LLM Chain to use in the chain.
- **Memory:** Default memory store.
- **input_key:** Used to specify the input value for the mathematical calculation. It allows you to provide the specific values or variables that you want to use in the calculation — defaults to `question`.
- **output_key:** Used to specify the key under which the output of the mathematical calculation will be stored. It allows you to retrieve the result of the calculation using the specified key — defaults to `answer`.
- **verbose:** Whether or not to run in verbose mode. In verbose mode, intermediate logs will be printed to the console — defaults to `False`.
---
### RetrievalQA
`RetrievalQA` is a chain used to find relevant documents or information to answer a given query. The retriever is responsible for returning the relevant documents based on the query, and the QA component then extracts the answer from those documents. The retrieval QA system combines the capabilities of both the retriever and the QA component to provide accurate and relevant answers to user queries.
<Admonition type="info">
A retriever is a component that finds documents based on a query. It doesn't store the documents themselves, but it returns the ones that match the query.
</Admonition >
**Params**
- **Combine Documents Chain:** Chain to use to combine the documents.
- **Memory:** Default memory store.
- **Retriever:** The retriever used to fetch relevant documents.
- **input_key:** This parameter is used to specify the key in the input data that contains the question. It is used to retrieve the question from the input data and pass it to the question-answering model for generating the answer — defaults to `query`.
- **output_key:** This parameter is used to specify the key in the output data where the generated answer will be stored. It is used to retrieve the answer from the output data after the question-answering model has generated it — defaults to `result`.
- **return_source_documents:** Used to specify whether or not to include the source documents that were used to answer the question in the output. When set to `True`, source documents will be included in the output along with the generated answer. This can be useful for providing additional context or references to the user — defaults to `True`.
- **verbose:** Whether or not to run in verbose mode. In verbose mode, intermediate logs will be printed to the console — defaults to `False`.
---
### SQLDatabaseChain
The `SQLDatabaseChain` finds answers to questions using a SQL database. It works by using the language model to understand the SQL query and generate the corresponding SQL code. It then passes the SQL code to the SQL database component, which executes the query on the database and returns the result.
**Params**
- **Db:** SQL Database to connect to.
- **LLM:** Language Model to use in the chain.
- **Prompt:** Prompt template to translate natural language to SQL.

View file

@ -1,116 +1,227 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Custom Components
Used to create a custom component, a special type of Langflow component that allows users to extend the functionality of the platform by creating their own reusable and configurable components from a Python script.
To use a custom component, follow these steps:
- Create a class that inherits from _`langflow.CustomComponent`_ and contains a _`build`_ method.
- Use arguments with [Type Annotations (or Type Hints)](https://docs.python.org/3/library/typing.html) of the _`build`_ method to create component fields.
- If applicable, use the _`build_config`_ method to customize how these fields look and behave.
<Admonition type="info" label="Tip">
For an in-depth explanation of custom components, their rules, and applications, make sure to read [Custom Component guidelines](../guidelines/custom-component).
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
**Params**
Build custom components in Langflow for various data processing and transformation tasks.
- **Code:** The Python code to define the component.
This guide provides a comprehensive overview of how to create custom components using Langflow.
## The CustomComponent Class
## Basic Structure of a Custom Component
The CustomComponent class serves as the foundation for creating custom components. By inheriting this class, users can create new, configurable components, tailored to their specific requirements.
A custom component in Langflow typically includes the following parts:
**Methods**
1. **Class Definition**: Inherits from the `Component` class.
2. **Component Metadata**: Defines display name, description, and icon.
3. **Inputs and Outputs**: Specifies the inputs and outputs for the component.
4. **Processing Logic**: Implements the logic for processing data within the component.
- **build**: This method is required within a Custom Component class. It defines the component's functionality and specifies how it processes input data to produce output data. This method is called when the component is built (i.e., when you click the _Build_ ⚡ button in the canvas).
A custom component in Python looks like this:
The type annotations of the _`build`_ instance method are used to create the fields of the component.
```python
from langflow.custom import Component
from langflow.inputs import MessageTextInput, IntInput, BoolInput, DropdownInput, HandleInput
from langflow.template import Output
from langflow.schema import Data, Message
from typing import List, Optional
| Supported Types |
| --------------------------------------------------------- |
| _`str`_, _`int`_, _`float`_, _`bool`_, _`list`_, _`dict`_ |
| _`langflow.field_typing.NestedDict`_ |
| _`langflow.field_typing.Prompt`_ |
| _`langchain.chains.base.Chain`_ |
| _`langchain.PromptTemplate`_ |
| _`langchain.llms.base.BaseLLM`_ |
| _`langchain.Tool`_ |
| _`langchain.document_loaders.base.BaseLoader`_ |
| _`langchain.schema.Document`_ |
| _`langchain.text_splitters.TextSplitter`_ |
| _`langchain.vectorstores.base.VectorStore`_ |
| _`langchain.embeddings.base.Embeddings`_ |
| _`langchain.schema.BaseRetriever`_ |
class ExampleComponent(Component):
display_name = "Example Component"
description = "A template for creating custom components."
icon = "icon-name"
The difference between _`dict`_ and _`langflow.field_typing.NestedDict`_ is that one adds a simple key-value pair field, while the other opens a more robust dictionary editor.
inputs = [
MessageTextInput(
name="input_text",
display_name="Input Text",
info="Text input for the component.",
),
IntInput(
name="input_number",
display_name="Input Number",
info="Numeric input for the component.",
),
BoolInput(
name="input_boolean",
display_name="Input Boolean",
info="Boolean input for the component.",
),
DropdownInput(
name="input_choice",
display_name="Input Choice",
options=["Option1", "Option2", "Option3"],
info="Dropdown input for the component.",
),
]
<Admonition type="info">
To use the _`Prompt`_ type, you must also add _`**kwargs`_ to the _`build`_ method. This is because the _`Prompt`_ type passes new arbitrary keyword arguments to it.
outputs = [
Output(display_name="Output Data", name="output_data", method="process_data"),
]
If you want to add the values of the variables to the template you defined, you must format the PromptTemplate inside the CustomComponent class.
</Admonition>
def process_data(self) -> Data:
input_text = self.input_text
input_number = self.input_number
input_boolean = self.input_boolean
input_choice = self.input_choice
# Implement your processing logic here
result = f"Processed: {input_text}, {input_number}, {input_boolean}, {input_choice}"
<Admonition type="info">
Unlike Langchain types, base Python types do not add a
[handle](../guidelines/components) to the field by default. To add handles,
use the _`input_types`_ key in the _`build_config`_ method.
</Admonition>
self.status = result
return Data(data={"result": result})
- **build_config**: Used to define the configuration fields of the component (if applicable). It should always return a dictionary with specific keys representing the field names and corresponding configurations. This method is called when the code is processed (i.e., when you click _Check and Save_ in the code editor). It must follow the format described below:
```
- Top-level keys are field names.
- Their values are can be of type _`langflow.field_typing.TemplateField`_ or _`dict`_. They specify the behavior of the generated fields.
## Create a Custom Component Step-by-Step
Below are the available keys used to configure component fields:
1. Create a class that inherits from the `Component` class.
| Key | Description |
| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _`field_type: str`_ | The type of the field (can be any of the types supported by the _`build`_ method). |
| _`is_list: bool`_ | If the field can be a list of values, meaning that the user can manually add more inputs to the same field. |
| _`options: List[str]`_ | When defined, the field becomes a dropdown menu where a list of strings defines the options to be displayed. If the _`value`_ attribute is set to one of the options, that option becomes default. For this parameter to work, _`field_type`_ should invariably be _`str`_. |
| _`multiline: bool`_ | Defines if a string field opens a text editor. Useful for longer texts. |
| _`input_types: List[str]`_ | Used when you want a _`str`_ field to have connectable handles. |
| _`display_name: str`_ | Defines the name of the field. |
| _`advanced: bool`_ | Hide the field in the canvas view (displayed component settings only). Useful when a field is for advanced users. |
| _`password: bool`_ | To mask the input text. Useful to hide sensitive text (e.g. API keys). |
| _`required: bool`_ | Makes the field required. |
| _`info: str`_ | Adds a tooltip to the field. |
| _`file_types: List[str]`_ | This is a requirement if the _`field_type`_ is _file_. Defines which file types will be accepted. For example, _json_, _yaml_ or _yml_. |
| _`range_spec: langflow.field_typing.RangeSpec`_ | This is a requirement if the _`field_type`_ is _`float`_. Defines the range of values accepted and the step size. If none is defined, the default is _`[-1, 1, 0.1]`_. |
| _`title_case: bool`_ | Formats the name of the field when _`display_name`_ is not defined. Set it to False to keep the name as you set it in the _`build`_ method. |
```python
class ExampleComponent(Component):
# Class content
```
<Admonition type="info" label="Tip">
2. Define metadata such as `display_name`, `description`, and `icon`.
Keys _`options`_ and _`value`_ can receive a method or function that returns a list of strings or a string, respectively. This is useful when you want to dynamically generate the options or the default value of a field. A refresh button will appear next to the field in the component, allowing the user to update the options or the default value.
```python
display_name = "Example Component"
description = "A template for creating custom components."
icon = "icon-name"
```
</Admonition>
3. Define the inputs and outputs for the component using the `inputs` and `outputs` lists.
**Inputs** can be of various types such as `TextInput`, `IntInput`, `BoolInput`, `DropdownInput`, etc.
```python
inputs = [
MessageTextInput(
name="input_text",
display_name="Input Text",
info="Text input for the component.",
),
IntInput(
name="input_number",
display_name="Input Number",
info="Numeric input for the component.",
),
BoolInput(
name="input_boolean",
display_name="Input Boolean",
info="Boolean input for the component.",
),
DropdownInput(
name="input_choice",
display_name="Input Choice",
options=["Option1", "Option2", "Option3"],
info="Dropdown input for the component.",
),
]
```
- The CustomComponent class also provides helpful methods for specific tasks (e.g., to load and use other flows from the Langflow platform):
**Outputs** define the output methods for the component.
| Method Name | Description |
| -------------- | ------------------------------------------------------------------- |
| _`list_flows`_ | Returns a list of Flow objects with an _`id`_ and a _`name`_. |
| _`get_flow`_ | Returns a Flow object. Parameters are _`flow_name`_ or _`flow_id`_. |
| _`load_flow`_ | Loads a flow from a given _`id`_. |
```python
outputs = [
Output(display_name="Output Data", name="output_data", method="process_data"),
]
```
- Useful attributes:
4. Implement the logic for processing data within the component. Define methods for processing data and returning results.
| Attribute Name | Description |
| -------------- | ----------------------------------------------------------------------------- |
| _`status`_ | Displays the value it receives in the _`build`_ method. Useful for debugging. |
| _`field_order`_ | Defines the order the fields will be displayed in the canvas. |
| _`icon`_ | Defines the emoji (for example, _`:rocket:`_) that will be displayed in the canvas. |
```python
def process_data(self) -> Data:
input_text = self.input_text
input_number = self.input_number
input_boolean = self.input_boolean
input_choice = self.input_choice
<Admonition type="info" label="Tip">
# Implement your processing logic here
result = f"Processed: {input_text}, {input_number}, {input_boolean}, {input_choice}"
Check out the [FlowRunner](../examples/flow-runner) example to understand how to call a flow from a custom component.
self.status = result
return Data(data={"result": result})
</Admonition>
```
## Advanced Example: Create a Conditional Router Component
This example demonstrates a more complex component that routes data based on a condition.
Notice that this component has two outputs associated with the methods `true_response` and `false_response`.
These methods trigger `self.stop` to block the transmission for the selected output, allowing for logic operations to be implemented visually.
```python
from langflow.custom import Component
from langflow.inputs import MessageTextInput, DropdownInput, BoolInput
from langflow.template import Output
from langflow.field_typing import Text
class ConditionalRouterComponent(Component):
display_name = "Conditional Router"
description = "Routes input based on a specified condition."
icon = "router"
inputs = [
MessageTextInput(
name="input_value",
display_name="Input Value",
info="Value to be evaluated.",
),
MessageTextInput(
name="comparison_value",
display_name="Comparison Value",
info="Value to compare against.",
),
DropdownInput(
name="operator",
display_name="Operator",
options=["equals", "not equals", "contains"],
info="Comparison operator.",
),
]
outputs = [
Output(display_name="True Output", name="true_output", method="true_response"),
Output(display_name="False Output", name="false_response", method="false_response"),
]
def evaluate_condition(self, input_value: str, comparison_value: str, operator: str) -> bool:
if operator == "equals":
return input_value == comparison_value
elif operator == "not equals":
return input_value != comparison_value
elif operator == "contains":
return comparison_value in input_value
return False
def true_response(self) -> Text:
if self.evaluate_condition(self.input_value, self.comparison_value, self.operator):
self.stop("false_response")
return self.input_value
else:
self.stop("true_response")
return ""
def false_response(self) -> Text:
if not self.evaluate_condition(self.input_value, self.comparison_value, self.operator):
self.stop("true_response")
return self.input_value
else:
self.stop("false_response")
return ""
```
By following these steps and examples, you can create custom components in Langflow tailored to your specific needs. The modular structure of Custom Components allows for flexible and reusable components that can be easily integrated into your workflows.
---

View file

@ -0,0 +1,64 @@
import Admonition from "@theme/Admonition";
# Data
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
## API Request
This component sends HTTP requests to the specified URLs.
Use this component to interact with external APIs or services and retrieve data. Ensure that the URLs are valid and that you configure the method, headers, body, and timeout correctly.
**Parameters:**
- **URLs:** The URLs to target.
- **Method:** The HTTP method, such as GET or POST.
- **Headers:** The headers to include with the request.
- **Body:** The data to send with the request (for methods like POST, PATCH, PUT).
- **Timeout:** The maximum time to wait for a response.
---
## Directory
This component recursively retrieves files from a specified directory.
Use this component to retrieve various file types, such as text or JSON files, from a directory. Make sure to provide the correct path and configure the other parameters as needed.
**Parameters:**
- **Path:** The directory path.
- **Types:** The types of files to retrieve. Leave this blank to retrieve all file types.
- **Depth:** The level of directory depth to search.
- **Max Concurrency:** The maximum number of simultaneous file loading operations.
- **Load Hidden:** Set to true to include hidden files.
- **Recursive:** Set to true to enable recursive search.
- **Silent Errors:** Set to true to suppress exceptions on errors.
- **Use Multithreading:** Set to true to use multithreading in file loading.
---
## File
This component loads a file.
Use this component to load files, such as text or JSON files. Ensure you specify the correct path and configure other parameters as necessary.
**Parameters:**
- **Path:** The file path.
- **Silent Errors:** Set to true to prevent exceptions on errors.
---
## URL
This component retrieves content from specified URLs.
Ensure the URLs are valid and adjust other parameters as needed.
**Parameters:**
- **URLs:** The URLs to retrieve content from.

View file

@ -2,122 +2,133 @@ import Admonition from "@theme/Admonition";
# Embeddings
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may
contain some rough edges. Share your feedback or report issues to help us
improve! 🛠️📝
</p>
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Embeddings are vector representations of text that capture the semantic meaning of the text. They are created using text embedding models and allow us to think about the text in a vector space, enabling us to perform tasks like semantic search, where we look for pieces of text that are most similar in the vector space.
## Amazon Bedrock Embeddings
---
Used to load embedding models from [Amazon Bedrock](https://aws.amazon.com/bedrock/).
### BedrockEmbeddings
| **Parameter** | **Type** | **Description** | **Default** |
| -------------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
| `credentials_profile_name` | `str` | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information. | |
| `model_id` | `str` | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API. | |
| `endpoint_url` | `str` | URL to set a specific service endpoint other than the default AWS endpoint. | |
| `region_name` | `str` | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided. | |
Used to load [Amazon Bedrockss](https://aws.amazon.com/bedrock/) embedding models.
## Astra vectorize
**Params**
Used to generate server-side embeddings using [DataStax Astra](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html).
- **credentials_profile_name:** The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See [the AWS documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) for more details.
| **Parameter** | **Type** | **Description** | **Default** |
|--------------------|----------|-----------------------------------------------------------------------------------------------------------------------|-------------|
| `provider` | `str` | The embedding provider to use. | |
| `model_name` | `str` | The embedding model to use. | |
| `authentication` | `dict` | Authentication parameters. Use the Astra Portal to add the embedding provider integration to your Astra organization. | |
| `provider_api_key` | `str` | An alternative to the Astra Authentication that let you use directly the API key of the provider. | |
| `model_parameters` | `dict` | Additional model parameters. | |
- **model_id:** Id of the model to call, e.g., amazon.titan-embed-text-v1, this is equivalent to the modelId property in the list-foundation-models api.
## Cohere Embeddings
- **endpoint_url:** Needed if you dont want to default to us-east-1 endpoint.
Used to load embedding models from [Cohere](https://cohere.com/).
- **region_name:** The aws region e.g., us-west-2. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here.
| **Parameter** | **Type** | **Description** | **Default** |
| ---------------- | -------- | ------------------------------------------------------------------------- | -------------------- |
| `cohere_api_key` | `str` | API key required to authenticate with the Cohere service. | |
| `model` | `str` | Language model used for embedding text documents and performing queries. | `embed-english-v2.0` |
| `truncate` | `bool` | Whether to truncate the input text to fit within the model's constraints. | `False` |
---
## Azure OpenAI Embeddings
### CohereEmbeddings
Generate embeddings using Azure OpenAI models.
Used to load [Coheres](https://cohere.com/) embedding models.
| **Parameter** | **Type** | **Description** | **Default** |
| ----------------- | -------- | -------------------------------------------------------------------------------------------------- | ----------- |
| `Azure Endpoint` | `str` | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | |
| `Deployment Name` | `str` | The name of the deployment. | |
| `API Version` | `str` | The API version to use, options include various dates. | |
| `API Key` | `str` | The API key to access the Azure OpenAI service. | |
**Params**
## Hugging Face API Embeddings
- **cohere_api_key:** Holds the API key required to authenticate with the Cohere service.
Generate embeddings using Hugging Face Inference API models.
- **model:** The language model used for embedding text documents and performing queries —defaults to `embed-english-v2.0`.
| **Parameter** | **Type** | **Description** | **Default** |
| --------------- | -------- | ----------------------------------------------------- | ------------------------ |
| `API Key` | `str` | API key for accessing the Hugging Face Inference API. | |
| `API URL` | `str` | URL of the Hugging Face Inference API. | `http://localhost:8080` |
| `Model Name` | `str` | Name of the model to use for embeddings. | `BAAI/bge-large-en-v1.5` |
| `Cache Folder` | `str` | Folder path to cache Hugging Face models. | |
| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | |
| `Model Kwargs` | `dict` | Additional arguments for the model. | |
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
- **truncate:** Used to specify whether or not to truncate the input text. Truncation is useful when dealing with long texts that exceed the model's maximum input length. By truncating the text, the user can ensure that it fits within the model's constraints.
## Hugging Face Embeddings
---
Used to load embedding models from [HuggingFace](https://huggingface.co).
### HuggingFaceEmbeddings
| **Parameter** | **Type** | **Description** | **Default** |
| --------------- | -------- | ---------------------------------------------- | ----------------------------------------- |
| `Cache Folder` | `str` | Folder path to cache HuggingFace models. | |
| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | |
| `Model Kwargs` | `dict` | Additional arguments for the model. | |
| `Model Name` | `str` | Name of the HuggingFace model to use. | `sentence-transformers/all-mpnet-base-v2` |
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
Used to load [HuggingFaces](https://huggingface.co) embedding models.
## OpenAI Embeddings
**Params**
Used to load embedding models from [OpenAI](https://openai.com/).
- **cache_folder:** Used to specify the folder where the embeddings will be cached. When embeddings are computed for a text, they can be stored in the cache folder so that they can be reused later without the need to recompute them. This can improve the performance of the application by avoiding redundant computations.
| **Parameter** | **Type** | **Description** | **Default** |
| -------------------------- | ---------------- | ------------------------------------------------ | ------------------------ |
| `OpenAI API Key` | `str` | The API key to use for accessing the OpenAI API. | |
| `Default Headers` | `Dict[str, str]` | Default headers for the HTTP requests. | |
| `Default Query` | `NestedDict` | Default query parameters for the HTTP requests. | |
| `Allowed Special` | `List[str]` | Special tokens allowed for processing. | `[]` |
| `Disallowed Special` | `List[str]` | Special tokens disallowed for processing. | `["all"]` |
| `Chunk Size` | `int` | Chunk size for processing. | `1000` |
| `Client` | `Any` | HTTP client for making requests. | |
| `Deployment` | `str` | Deployment name for the model. | `text-embedding-3-small` |
| `Embedding Context Length` | `int` | Length of embedding context. | `8191` |
| `Max Retries` | `int` | Maximum number of retries for failed requests. | `6` |
| `Model` | `str` | Name of the model to use. | `text-embedding-3-small` |
| `Model Kwargs` | `NestedDict` | Additional keyword arguments for the model. | |
| `OpenAI API Base` | `str` | Base URL of the OpenAI API. | |
| `OpenAI API Type` | `str` | Type of the OpenAI API. | |
| `OpenAI API Version` | `str` | Version of the OpenAI API. | |
| `OpenAI Organization` | `str` | Organization associated with the API key. | |
| `OpenAI Proxy` | `str` | Proxy server for the requests. | |
| `Request Timeout` | `float` | Timeout for the HTTP requests. | |
| `Show Progress Bar` | `bool` | Whether to show a progress bar for processing. | `False` |
| `Skip Empty` | `bool` | Whether to skip empty inputs. | `False` |
| `TikToken Enable` | `bool` | Whether to enable TikToken. | `True` |
| `TikToken Model Name` | `str` | Name of the TikToken model. | |
- **encode_kwargs:** Used to pass additional keyword arguments to the encoding method of the underlying HuggingFace model. These keyword arguments can be used to customize the encoding process, such as specifying the maximum length of the input sequence or enabling truncation or padding.
## Ollama Embeddings
- **model_kwargs:** Used to customize the behavior of the model, such as specifying the model architecture, the tokenizer, or any other model-specific configuration options. By using `model_kwargs`, the user can configure the HuggingFace model according to specific needs and preferences.
Generate embeddings using Ollama models.
- **model_name:** Used to specify the name or identifier of the HuggingFace model that will be used for generating embeddings. It allows users to choose a specific pre-trained model from the Hugging Face model hub — defaults to `sentence-transformers/all-mpnet-base-v2`.
| **Parameter** | **Type** | **Description** | **Default** |
| ------------------- | -------- | ---------------------------------------------------------------------------------------- | ------------------------ |
| `Ollama Model` | `str` | Name of the Ollama model to use. | `llama2` |
| `Ollama Base URL` | `str` | Base URL of the Ollama API. | `http://localhost:11434` |
| `Model Temperature` | `float` | Temperature parameter for the model. Adjusts the randomness in the generated embeddings. | |
---
### OpenAIEmbeddings
Used to load [OpenAIs](https://openai.com/) embedding models.
**Params**
- **chunk_size:** Determines the maximum size of each chunk of text that is processed for embedding. If any of the incoming text chunks exceeds `chunk_size` characters, it will be split into multiple chunks of size `chunk_size` or less before being embedded — defaults to `1000`.
- **deployment:** Used to specify the deployment name or identifier of the text embedding model. It allows the user to choose a specific deployment of the model to use for embedding. When the deployment is provided, this can be useful when the user has multiple deployments of the same model with different configurations or versions — defaults to `text-embedding-ada-002`.
- **embedding_ctx_length:** This parameter determines the maximum context length for the text embedding model. It specifies the number of tokens that the model considers when generating embeddings for a piece of text — defaults to `8191` (this means that the model will consider up to 8191 tokens when generating embeddings).
- **max_retries:** Determines the maximum number of times to retry a request if the model provider returns an error from their API — defaults to `6`.
- **model:** Defines which pre-trained text embedding model to use — defaults to `text-embedding-ada-002`.
- **openai_api_base:** Refers to the base URL for the Azure OpenAI resource. It is used to configure the API to connect to the Azure OpenAI service. The base URL can be found in the Azure portal under the user Azure OpenAI resource.
- **openai_api_key:** Is used to authenticate and authorize access to the OpenAI service.
- **openai_api_type:** Is used to specify the type of OpenAI API being used, either the regular OpenAI API or the Azure OpenAI API. This parameter allows the `OpenAIEmbeddings` class to connect to the appropriate API service.
- **openai_api_version:** Is used to specify the version of the OpenAI API being used. This parameter allows the `OpenAIEmbeddings` class to connect to the appropriate version of the OpenAI API service.
- **openai_organization:** Is used to specify the organization associated with the OpenAI API key. If not provided, the default organization associated with the API key will be used.
- **openai_proxy:** Proxy enables better budgeting and cost management for making OpenAI API calls, including more transparency into pricing.
- **request_timeout:** Used to specify the maximum amount of time, in milliseconds, to wait for a response from the OpenAI API when generating embeddings for a given text.
- **tiktoken_model_name:** Used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name.
---
### VertexAIEmbeddings
## VertexAI Embeddings
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings).
<Admonition type="info">
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
</Admonition>
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
- **location:** The default location to use when making API calls defaults to `us-central1`.
- **max_output_tokens:** Token limit determines the maximum amount of text output from one prompt defaults to `128`.
- **model_name:** The name of the Vertex AI large language model defaults to `text-bison`.
- **project:** The default GCP project to use when making Vertex API calls.
- **request_parallelism:** The amount of parallelism allowed for requests issued to VertexAI models defaults to `5`.
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value defaults to `0`.
- **top_k:** How the model selects tokens for output, the next token is selected from defaults to `40`.
- **top_p:** Tokens are selected from most probable to least until the sum of their defaults to `0.95`.
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output defaults to `False`.
### OllamaEmbeddings
Used to load [Ollamas](https://ollama.ai/) embedding models. Wrapper around LangChain's [Ollama API](https://python.langchain.com/docs/integrations/text_embedding/ollama).
- **model** The name of the Ollama model to use defaults to `llama2`.
- **base_url** The base URL for the Ollama API defaults to `http://localhost:11434`.
- **temperature** Tunes the degree of randomness in text generations. Should be a non-negative value defaults to `0`.
| **Parameter** | **Type** | **Description** | **Default** |
| --------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------------- |
| `credentials` | `Credentials` | The default custom credentials to use. | |
| `location` | `str` | The default location to use when making API calls. | `us-central1` |
| `max_output_tokens` | `int` | Token limit determines the maximum amount of text output from one prompt. | `128` |
| `model_name` | `str` | The name of the Vertex AI large language model. | `text-bison` |
| `project` | `str` | The default GCP project to use when making Vertex API calls. | |
| `request_parallelism` | `int` | The amount of parallelism allowed for requests issued to VertexAI models. | `5` |
| `temperature` | `float` | Tunes the degree of randomness in text generations. Should be a non-negative value. | `0` |
| `top_k` | `int` | How the model selects tokens for output, the next token is selected from the top `k` tokens. | `40` |
| `top_p` | `float` | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value. | `0.95` |
| `tuned_model_name` | `str` | The name of a tuned model. If provided, `model_name` is ignored. | |
| `verbose` | `bool` | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug. | `False` |

View file

@ -0,0 +1,275 @@
import Admonition from "@theme/Admonition";
# Experimental
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Components in the experimental phase are currently in beta. They have been initially developed and tested but haven't yet achieved a stable or fully supported status. We encourage users to explore these components, provide feedback, and report any issues encountered.
---
## Clear Message History Component
This component clears the message history for a specified session ID.
**Beta:** This component is in beta.
**Parameters**
- **Session ID:**
- **Display Name:** Session ID
- **Info:** Clears the message history for this ID.
**Usage**
Provide the session ID to clear its message history.
---
## Extract Key From Data
This component extracts specified keys from a record.
**Parameters**
- **Data:**
- **Display Name:** Data
- **Info:** The record from which to extract keys.
- **Keys:**
- **Display Name:** Keys
- **Info:** The keys to be extracted.
- **Silent Errors:**
- **Display Name:** Silent Errors
- **Info:** Set to true to suppress errors.
- **Advanced:** True
**Usage**
Provide the record and specify the keys you want to extract. Optionally, enable silent errors for missing keys.
---
## Flow as Tool
This component turns a function running a flow into a Tool.
**Parameters**
- **Flow Name:**
- **Display Name:** Flow Name
- **Info:** Select the flow to run.
- **Options:** List of available flows.
- **Real-time Refresh:** True
- **Refresh Button:** True
- **Name:**
- **Display Name:** Name
- **Description:** The tool's name.
- **Description:**
- **Display Name:** Description
- **Description:** Describes the tool.
- **Return Direct:**
- **Display Name:** Return Direct
- **Description:** Returns the result directly.
- **Advanced:** True
**Usage**
Select a flow, name and describe the tool, and decide if you want to return the result directly.
---
## Listen
This component listens for a specified notification.
**Parameters**
- **Name:**
- **Display Name:** Name
- **Info:** The notification to listen for.
**Usage**
Specify the notification to listen for.
---
## List Flows
This component lists all available flows.
**Usage**
Call this component without parameters to list all flows.
---
## Merge Data
This component merges a list of Data.
**Parameters**
- **Data:**
- **Display Name:** Data
**Usage**
Provide the Data you want to merge.
---
## Notify
This component generates a notification.
**Parameters**
- **Name:**
- **Display Name:** Name
- **Info:** The notification's name.
- **Data:**
- **Display Name:** Data
- **Info:** Optionally, a record to store in the notification.
- **Append:**
- **Display Name:** Append
- **Info:** Set to true to append the record to the notification.
**Usage**
Specify the notification name, provide a record if necessary, and indicate whether to append it.
---
## Run Flow
This component runs a specified flow.
**Parameters**
- **Input Value:**
- **Display Name:** Input Value
- **Multiline:** True
- **Flow Name:**
- **Display Name:** Flow Name
- **Info:** Select the flow to run.
- **Options:** List of available flows.
- **Refresh Button:** True
- **Tweaks:**
- **Display Name:** Tweaks
- **Info:** Modifications to apply to the flow.
**Usage**
Provide the input value, select the flow, and apply any tweaks.
---
## Runnable Executor
This component executes a specified runnable.
**Parameters**
- **Input Key:**
- **Display Name:** Input Key
- **Info:** The input key.
- **Inputs:**
- **Display Name:** Inputs
- **Info:** Inputs for the runnable.
- **Runnable:**
- **Display Name:** Runnable
- **Info:** The runnable to execute.
- **Output Key:**
- **Display Name:** Output Key
- **Info:** The output key.
**Usage**
Specify the input key, provide inputs, select the runnable, and optionally define the output key.
---
## SQL Executor
This component executes an SQL query.
**Parameters**
- **Database URL:**
- **Display Name:** Database URL
- **Info:** The database's URL.
- **Include Columns:**
- **Display Name:** Include Columns
- **Info:** Whether to include columns in the result.
- **Passthrough:**
- **Display Name:** Passthrough
- **Info:** Returns the query instead of raising an exception if an error occurs.
- **Add Error:**
- **Display Name:** Add Error
- **Info:** Includes the error in the result.
**Usage**
Provide the SQL query, specify the database URL, and configure settings for columns, error handling, and passthrough.
---
## SubFlow
This component dynamically generates a tool from a flow.
**Parameters**
- **Input Value:**
- **Display Name:** Input Value
- **Multiline:** True
- **Flow Name:**
- **Display Name:** Flow Name
- **Info:** Select the flow to run.
- **Options:** List of available flows.
- **Real Time Refresh:** True
- **Refresh Button:** True
- **Tweaks:**
- **Display Name:** Tweaks
- **Info:** Modifications to apply to the flow.
**Usage**
Select a flow, apply any necessary tweaks, and generate a tool.

View file

@ -0,0 +1,132 @@
import Admonition from "@theme/Admonition";
# Helpers
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
### Chat memory
This component retrieves stored chat messages based on a specific session ID.
#### Parameters
- **Sender type:** Choose the sender type from options like "Machine", "User", or "Both".
- **Sender name:** (Optional) The name of the sender.
- **Number of messages:** Number of messages to retrieve.
- **Session ID:** The session ID of the chat history.
- **Order:** Choose the message order, either "Ascending" or "Descending".
- **Data template:** (Optional) Template to convert a record to text. If left empty, the system dynamically sets it to the record's text key.
---
### Combine text
This component concatenates two text sources into a single text chunk using a specified delimiter.
#### Parameters
- **First text:** The first text input to concatenate.
- **Second text:** The second text input to concatenate.
- **Delimiter:** A string used to separate the two text inputs. Defaults to a space.
---
### Create record
This component dynamically creates a record with a specified number of fields.
#### Parameters
- **Number of fields:** Number of fields to be added to the record.
- **Text key:** Key used as text.
---
### Custom component
Use this component as a template to create your custom component.
#### Parameters
- **Parameter:** Describe the purpose of this parameter.
<Admonition type="info" title="Info">
<p>
Customize the <code>build_config</code> and <code>build</code> methods
according to your requirements.
</p>
</Admonition>
Learn more about creating custom components at [Custom Component](http://docs.langflow.org/components/custom).
---
### Documents to Data
Convert LangChain documents into Data.
#### Parameters
- **Documents:** Documents to be converted into Data.
---
### ID generator
Generates a unique ID.
#### Parameters
- **Value:** Unique ID generated.
---
### Message history
Retrieves stored chat messages based on a specific session ID.
#### Parameters
- **Sender type:** Options for the sender type.
- **Sender name:** Sender name.
- **Number of messages:** Number of messages to retrieve.
- **Session ID:** Session ID of the chat history.
- **Order:** Order of the messages.
---
### Data to text
Convert Data into plain text following a specified template.
#### Parameters
- **Data:** The Data to convert to text.
- **Template:** The template used for formatting the Data. It can contain keys like `{text}`, `{data}`, or any other key in the record.
---
### Split text
Split text into chunks of a specified length.
#### Parameters
- **Texts:** Texts to split.
- **Separators:** Characters to split on. Defaults to a space.
- **Max chunk size:** The maximum length (in characters) of each chunk.
- **Chunk overlap:** The amount of character overlap between chunks.
- **Recursive:** Whether to split recursively.
---
### Update record
Update a record with text-based key/value pairs, similar to updating a Python dictionary.
#### Parameters
- **Data:** The record to update.
- **New data:** The new data to update the record with.

View file

@ -0,0 +1,162 @@
import Admonition from "@theme/Admonition";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import useBaseUrl from "@docusaurus/useBaseUrl";
# Inputs and Outputs
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Inputs and Outputs are a category of components that are used to define where data comes in and out of your flow.
They also dynamically change the Playground and can be renamed to facilitate building and maintaining your flows.
## Inputs
Inputs are components used to define where data enters your flow. They can receive data from the user, a database, or any other source that can be converted to Text or Data.
The difference between Chat Input and other Input components is the output format, the number of configurable fields, and the way they are displayed in the Playground.
Chat Input components can output `Text` or `Data`. When you want to pass the sender name or sender to the next component, use the `Data` output. To pass only the message, use the `Text` output, useful when saving the message to a database or memory system like Zep.
You can find out more about Chat Input and other Inputs [here](#chat-input).
### Chat Input
This component collects user input from the chat.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/chat-input-expanded.png",
dark: "img/chat-input-expanded.png",
}}
style={{ width: "40%", margin: "20px auto" }}
/>
**Parameters**
- **Sender Type:** Specifies the sender type. Defaults to `User`. Options are `Machine` and `User`.
- **Sender Name:** Specifies the name of the sender. Defaults to `User`.
- **Message:** Specifies the message text. It is a multiline text input.
- **Session ID:** Specifies the session ID of the chat history. If provided, the message will be saved in the Message History.
<Admonition type="note" title="Note">
<p>
If `As Data` is `true` and the `Message` is a `Data`, the data of the `Data`
will be updated with the `Sender`, `Sender Name`, and `Session ID`.
</p>
</Admonition>
One significant capability of the Chat Input component is its ability to transform the Playground into a chat window. This feature is particularly valuable for scenarios requiring user input to initiate or influence the flow.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/playground-chat.png"),
dark: useBaseUrl("img/playground-chat.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
### Text Input
The **Text Input** component adds an **Input** field on the Playground. This enables you to define parameters while running and testing your flow.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/text-input-expanded.png",
dark: "img/text-input-expanded.png",
}}
style={{ width: "50%", margin: "20px auto" }}
/>
**Parameters**
- **Value:** Specifies the text input value. This is where the user inputs text data that will be passed to the next component in the sequence. If no value is provided, it defaults to an empty string.
- **Data Template:** Specifies how a `Data` should be converted into `Text`.
The **Data Template** field is used to specify how a `Data` should be converted into `Text`. This is particularly useful when you want to extract specific information from a `Data` and pass it as text to the next component in the sequence.
For example, if you have a `Data` with the following structure:
```json
{
"name": "John Doe",
"age": 30,
"email": "johndoe@email.com"
}
```
A template with `Name: {name}, Age: {age}` will convert the `Data` into a text string of `Name: John Doe, Age: 30`.
If you pass more than one `Data`, the text will be concatenated with a new line separator.
## Outputs
Outputs are components that are used to define where data comes out of your flow. They can be used to send data to the user, to the Playground, or to define how the data will be displayed in the Playground.
The Chat Output works similarly to the Chat Input but does not have a field that allows for written input. It is used as an Output definition and can be used to send data to the user.
You can find out more about it and the other Outputs [here](#chat-output).
### Chat Output
This component sends a message to the chat.
**Parameters**
- **Sender Type:** Specifies the sender type. Default is `"Machine"`. Options are `"Machine"` and `"User"`.
- **Sender Name:** Specifies the sender's name. Default is `"AI"`.
- **Session ID:** Specifies the session ID of the chat history. If provided, messages are saved in the Message History.
- **Message:** Specifies the text of the message.
<Admonition type="note" title="Note">
<p>
If `As Data` is `true` and the `Message` is a `Data`, the data in the `Data`
is updated with the `Sender`, `Sender Name`, and `Session ID`.
</p>
</Admonition>
### Text Output
This component displays text data to the user. It is useful when you want to show text without sending it to the chat.
**Parameters**
- **Value:** Specifies the text data to be displayed. Defaults to an empty string.
The `TextOutput` component provides a simple way to display text data. It allows textual data to be visible in the chat window during your interaction flow.
## Prompts
A prompt is the input provided to a language model, consisting of multiple components and can be parameterized using prompt templates. A prompt template offers a reproducible method for generating prompts, enabling easy customization through input variables.
### Prompt
This component creates a prompt template with dynamic variables. This is useful for structuring prompts and passing dynamic data to a language model.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/prompt-with-template.png",
dark: "img/prompt-with-template.png",
}}
style={{ width: "50%", margin: "20px auto" }}
/>
**Parameters**
- **Template:** The template for the prompt. This field allows you to create other fields dynamically by using curly brackets `{}`. For example, if you have a template like `Hello {name}, how are you?`, a new field called `name` will be created. Prompt variables can be created with any name inside curly brackets, e.g. `{variable_name}`.
### PromptTemplate
The `PromptTemplate` component enables users to create prompts and define variables that control how the model is instructed. Users can input a set of variables which the template uses to generate the prompt when a conversation starts.
After defining a variable in the prompt template, it acts as its own component input. See [Prompt Customization](../administration/prompt-customization) for more details.
- **template:** The template used to format an individual request.

View file

@ -1,221 +0,0 @@
import Admonition from '@theme/Admonition';
# LLMs
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
</Admonition>
An LLM stands for Large Language Model. It is a core component of Langflow and provides a standard interface for interacting with different LLMs from various providers such as OpenAI, Cohere, and HuggingFace. LLMs are used widely throughout Langflow, including in chains and agents. They can be used to generate text based on a given prompt (or input).
---
### Anthropic
Wrapper around Anthropic's large language models. Find out more at [Anthropic](https://www.anthropic.com).
- **anthropic_api_key:** Used to authenticate and authorize access to the Anthropic API.
- **anthropic_api_url:** Specifies the URL of the Anthropic API to connect to.
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value.
---
### ChatAnthropic
Wrapper around Anthropic's large language model used for chat-based interactions. Find out more at [Anthropic](https://www.anthropic.com).
- **anthropic_api_key:** Used to authenticate and authorize access to the Anthropic API.
- **anthropic_api_url:** Specifies the URL of the Anthropic API to connect to.
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value.
---
### CTransformers
The `CTransformers` component provides access to the Transformer models implemented in C/C++ using the [GGML](https://github.com/ggerganov/ggml) library.
<Admonition type="info">
Make sure to have the `ctransformers` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/marella/ctransformers).
</Admonition>
**config:** Configuration for the Transformer models. Check out [config](https://github.com/marella/ctransformers#config). Defaults to:
```
{
"top_k": 40,
"top_p": 0.95,
"temperature": 0.8,
"repetition_penalty": 1.1,
"last_n_tokens": 64,
"seed": -1,
"max_new_tokens": 256,
"stop": null,
"stream": false,
"reset": true,
"batch_size": 8,
"threads": -1,
"context_length": -1,
"gpu_layers": 0
}
```
**model:** The path to a model file or directory or the name of a Hugging Face Hub model repo.
**model_file:** The name of the model file in the repo or directory.
**model_type:** Transformer model to be used. Learn more [here](https://github.com/marella/ctransformers).
---
### ChatOpenAI
Wrapper around [OpenAI's](https://openai.com) chat large language models. This component supports some of the LLMs (Large Language Models) available by OpenAI and is used for tasks such as chatbots, Generative Question-Answering (GQA), and summarization.
- **max_tokens:** The maximum number of tokens to generate in the completion. `-1` returns as many tokens as possible, given the prompt and the model's maximal context size defaults to `256`.
- **model_kwargs:** Holds any model parameters valid for creating non-specified calls.
- **model_name:** Defines the OpenAI chat model to be used.
- **openai_api_base:** Used to specify the base URL for the OpenAI API. It is typically set to the API endpoint provided by the OpenAI service.
- **openai_api_key:** Key used to authenticate and access the OpenAI API.
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value defaults to `0.7`.
---
### Cohere
Wrapper around [Cohere's](https://cohere.com) large language models.
- **cohere_api_key:** Holds the API key required to authenticate with the Cohere service.
- **max_tokens:** Maximum number of tokens to predict per generation defaults to `256`.
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value defaults to `0.75`.
---
### HuggingFaceHub
Wrapper around [HuggingFace](https://www.huggingface.co/models) models.
<Admonition type="info">
The HuggingFace Hub is an online platform that hosts over 120k models, 20k datasets, and 50k demo apps, all of which are open-source and publicly available. Discover more at [HuggingFace](http://www.huggingface.co).
</Admonition>
- **huggingfacehub_api_token:** Token needed to authenticate the API.
- **model_kwargs:** Keyword arguments to pass to the model.
- **repo_id:** Model name to use defaults to `gpt2`.
- **task:** Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.
---
### LlamaCpp
The `LlamaCpp` component provides access to the `llama.cpp` models.
<Admonition type="info">
Make sure to have the `llama.cpp` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/ggerganov/llama.cpp).
</Admonition>
- **echo:** Whether to echo the prompt defaults to `False`.
- **f16_kv:** Use half-precision for key/value cache defaults to `True`.
- **last_n_tokens_size:** The number of tokens to look back at when applying the repeat_penalty. Defaults to `64`.
- **logits_all:** Return logits for all tokens, not just the last token Defaults to `False`.
- **logprobs:** The number of logprobs to return. If None, no logprobs are returned.
- **lora_base:** The path to the Llama LoRA base model.
- **lora_path:** The path to the Llama LoRA. If None, no LoRa is loaded.
- **max_tokens:** The maximum number of tokens to generate. Defaults to `256`.
- **model_path:** The path to the Llama model file.
- **n_batch:** Number of tokens to process in parallel. Should be a number between 1 and n_ctx. Defaults to `8`.
- **n_ctx:** Token context window. Defaults to `512`.
- **n_gpu_layers:** Number of layers to be loaded into GPU memory. Default None.
- **n_parts:**Number of parts to split the model into. If -1, the number of parts is automatically determined. Defaults to `-1`.
- **n_threads:** Number of threads to use. If None, the number of threads is automatically determined.
- **repeat_penalty:** The penalty to apply to repeated tokens. Defaults to `1.1`.
- **seed:** Seed. If -1, a random seed is used. Defaults to `-1`.
- **stop:** A list of strings to stop generation when encountered.
- **streaming:** Whether to stream the results, token by token. Defaults to `True`.
- **suffix:** A suffix to append to the generated text. If None, no suffix is appended.
- **tags:** Tags to add to the run trace.
- **temperature:** The temperature to use for sampling. Defaults to `0.8`.
- **top_k:** The top-k value to use for sampling. Defaults to `40`.
- **top_p:** The top-p value to use for sampling. Defaults to `0.95`.
- **use_mlock:** Force the system to keep the model in RAM. Defaults to `False`.
- **use_mmap:** Whether to keep the model loaded in RAM. Defaults to `True`.
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output. Defaults to `False`.
- **vocab_only:** Only load the vocabulary, no weights. Defaults to `False`.
---
### OpenAI
Wrapper around [OpenAI's](https://openai.com) large language models.
- **max_tokens:** The maximum number of tokens to generate in the completion. `-1` returns as many tokens as possible, given the prompt and the model's maximal context size defaults to `256`.
- **model_kwargs:** Holds any model parameters valid for creating non-specified calls.
- **model_name:** Defines the OpenAI model to be used.
- **openai_api_base:** Used to specify the base URL for the OpenAI API. It is typically set to the API endpoint provided by the OpenAI service.
- **openai_api_key:** Key used to authenticate and access the OpenAI API.
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value defaults to `0.7`.
---
### VertexAI
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
<Admonition type="info">
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
</Admonition>
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
- **location:** The default location to use when making API calls defaults to `us-central1`.
- **max_output_tokens:** Token limit determines the maximum amount of text output from one prompt defaults to `128`.
- **model_name:** The name of the Vertex AI large language model defaults to `text-bison`.
- **project:** The default GCP project to use when making Vertex API calls.
- **request_parallelism:** The amount of parallelism allowed for requests issued to VertexAI models defaults to `5`.
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value defaults to `0`.
- **top_k:** How the model selects tokens for output, the next token is selected from defaults to `40`.
- **top_p:** Tokens are selected from most probable to least until the sum of their defaults to `0.95`.
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output defaults to `False`.
---
### ChatVertexAI
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
<Admonition type="info">
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
</Admonition>
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
- **location:** The default location to use when making API calls defaults to `us-central1`.
- **max_output_tokens:** Token limit determines the maximum amount of text output from one prompt defaults to `128`.
- **model_name:** The name of the Vertex AI large language model defaults to `text-bison`.
- **project:** The default GCP project to use when making Vertex API calls.
- **request_parallelism:** The amount of parallelism allowed for requests issued to VertexAI models defaults to `5`.
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value defaults to `0`.
- **top_k:** How the model selects tokens for output, the next token is selected from defaults to `40`.
- **top_p:** Tokens are selected from most probable to least until the sum of their defaults to `0.95`.
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output defaults to `False`.

View file

@ -2,6 +2,10 @@ import Admonition from '@theme/Admonition';
# Loaders
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝

View file

@ -1,108 +1,138 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Memories
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Memory is a concept in chat-based applications that allows the system to remember previous interactions. It helps in maintaining the context of the conversation and enables the system to understand new messages in relation to past messages.
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
Thanks for your patience as we improve our documentation—it might have some
rough edges. Share your feedback or report issues to help us enhance it!
🛠️📝
</p>
</Admonition>
Memory is a concept in chat-based applications that allows the system to remember previous interactions. This capability helps maintain the context of the conversation and enables the system to understand new messages in light of past messages.
---
### MessageHistory
This component retrieves stored messages using various filters such as sender type, sender name, session ID, and the specific file path where messages are stored. It offers flexible retrieval of chat history, providing insights into past interactions.
**Parameters**
- **sender_type** (optional): Specifies the sender's type. Options include `"Machine"`, `"User"`, or `"Machine and User"`. Filters messages by the sender type.
- **sender_name** (optional): Specifies the sender's name. Filters messages by the sender's name.
- **session_id** (optional): Specifies the session ID of the chat history. Filters messages by session.
- **number_of_messages**: Specifies the number of messages to retrieve. Defaults to `5`. Determines the number of recent messages from the chat history to fetch.
<Admonition type="note" title="Note">
<p>
The component retrieves messages based on the provided criteria, including
the specific file path for stored messages. If no specific criteria are
provided, it returns the most recent messages up to the specified limit.
This component can be used to review past interactions and analyze
conversation flows.
</p>
</Admonition>
### ConversationBufferMemory
The `ConversationBufferMemory` component is a type of memory system that plainly stores the last few inputs and outputs of a conversation.
The `ConversationBufferMemory` component stores the last few inputs and outputs of a conversation.
**Params**
**Parameters**
- **input_key:** Used to specify the key under which the user input will be stored in the conversation memory. It allows you to provide the user's input to the chain for processing and generating a response.
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model defaults to `chat_history`.
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
- **input_key**: Specifies the key under which the user input will be stored in the conversation memory.
- **memory_key**: Specifies the prompt variable name where the memory will store and retrieve chat messages. Defaults to `chat_history`.
- **output_key**: Specifies the key under which the generated response will be stored.
- **return_messages**: Determines whether the history should be returned as a string or as a list of messages. The default is `False`.
---
### ConversationBufferWindowMemory
`ConversationBufferWindowMemory` is a variation of the `ConversationBufferMemory` that maintains a list of the recent interactions in a conversation. It only keeps the last K interactions in memory, which can be useful for maintaining a sliding window of the most recent interactions without letting the buffer get too large.
`ConversationBufferWindowMemory` is a variant of the `ConversationBufferMemory` that keeps only the last K interactions in memory. It's useful for maintaining a sliding window of recent interactions without letting the buffer get too large.
**Params**
**Parameters**
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model. Defaults to `chat_history`.
- **k:** Used to specify the number of interactions or messages that should be stored in the conversation buffer. It determines the size of the sliding window that keeps track of the most recent interactions.
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
- **input_key**: Specifies the keys in the memory object where input messages are stored.
- **memory_key**: Specifies the prompt variable name for storing and retrieving chat messages. Defaults to `chat_history`.
- **k**: Specifies the number of interactions or messages to be stored in the conversation buffer.
- **output_key**: Specifies the key under which the generated response will be stored.
- **return_messages**: Determines whether the history should be returned as a string or as a list of messages. The default is `False`.
---
### ConversationEntityMemory
The `ConversationEntityMemory` component incorporates intricate memory structures, specifically a key-value store, for entities referenced in a conversation. This facilitates the storage and retrieval of information related to entities that have been mentioned throughout the conversation.
The `ConversationEntityMemory` component uses a key-value store to manage entities mentioned in conversations. This structure enhances the storage and retrieval of information about specific entities.
**Params**
**Parameters**
- **Entity Store:** Structure that stores information about specific entities mentioned in a conversation.
- **LLM:** Language Model to use in the `ConversationEntityMemory`.
- **chat_history_key:** Specify a unique identifier for the chat history data associated with a particular entity. This allows for organizing and accessing the chat history data for each entity within the conversation entity memory. Defaults to `history`
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
- **k:** Refers to the number of entities that can be stored in the memory. It determines the maximum number of entities that can be stored and retrieved from the memory object. Defaults to `10`
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
- **entity_store**: A structure that stores information about entities mentioned in a conversation.
- **LLM**: Specifies the language model used in the `ConversationEntityMemory`.
- **chat_history_key**: A unique identifier for the chat history data associated with a particular entity. This key helps organize and access chat history data for each entity within the memory. Defaults to `history`.
- **input_key**: Identifies where input messages are stored in the memory object, allowing for their retrieval and manipulation.
- **k**: Specifies the maximum number of entities that can be stored and retrieved from the memory. Defaults to `10`.
- **output_key**: Identifies the key under which the generated response is stored, enabling retrieval using this key.
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.
---
### ConversationKGMemory
`ConversationKGMemory` is a type of memory that uses a knowledge graph to recreate memory. It allows the extraction of entities and knowledge triplets from a new message, using previous messages as context.
The `ConversationKGMemory` utilizes a knowledge graph to enhance memory capabilities. It extracts entities and knowledge triplets from new messages, using previous messages as context.
**Params**
**Parameters**
- **LLM:** Language Model to use in the `ConversationKGMemory`.
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
- **k:** Represents the number of previous conversation turns that will be stored in the memory. By setting "k" to 2, it means that the memory will retain the previous 2 conversation turns, allowing the model to access and utilize the information from those turns during the conversation. Defaults to `10`
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model. Defaults to `chat_history`.
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
- **LLM**: Specifies the language model used in the `ConversationKGMemory`.
- **input_key**: Identifies where input messages are stored in the memory object, facilitating their retrieval and manipulation.
- **k**: Indicates the number of previous conversation turns stored in memory, allowing the model to utilize information from these turns. Defaults to `10`.
- **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`.
- **output_key**: Identifies the key under which the generated response
is stored, enabling retrieval using this key.
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.
---
### ConversationSummaryMemory
The `ConversationSummaryMemory` is a memory component that creates a summary of the conversation over time. It condenses information from the conversation and stores the current summary in memory. It is particularly useful for longer conversations where keeping the entire message history in the prompt would take up too many tokens.
The `ConversationSummaryMemory` summarizes conversations over time, condensing information and storing it efficiently. It's particularly useful for long conversations.
**Params**
**Parameters**
- **LLM:** Language Model to use in the `ConversationSummaryMemory`.
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model. Defaults to `chat_history`.
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
- **LLM**: Specifies the language model used in the `ConversationSummaryMemory`.
- **input_key**: Identifies where input messages are stored in the memory object, facilitating their retrieval and manipulation.
- **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`.
- **output_key**: Identifies the key under which the generated response is stored, enabling retrieval using this key.
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.
---
### PostgresChatMessageHistory
The `PostgresChatMessageHistory` is a memory component that allows for the storage and retrieval of chat message history using a PostgreSQL database. The connection to the PostgreSQL database is established using a connection string, which includes the necessary authentication and database information.
The `PostgresChatMessageHistory` component uses a PostgreSQL database to store and retrieve chat message history.
**Params**
**Parameters**
- **connection_string:** Refers to a string that contains the necessary information to establish a connection to a PostgreSQL database. The `connection_string` typically includes details such as the username, password, host, port, and database name required to connect to the PostgreSQL database. Defaults to `postgresql://postgres:mypassword@localhost/chat_history`
- **session_id:** It is a unique identifier that is used to associate chat message history with a specific session or conversation.
- **table_name:** Refers to the name of the table in the PostgreSQL database where the chat message history will be stored. Defaults to `message_store`
- **connection_string**: Specifies the details needed to connect to the PostgreSQL database, including username, password, host, port, and database name. Defaults to `postgresql://postgres:mypassword@localhost/chat_history`.
- **session_id**: A unique identifier used to link chat message history with a specific session or conversation.
- **table_name**: The name of the PostgreSQL database table where chat message history is stored. Defaults to `message_store`.
---
### VectorRetrieverMemory
The `VectorRetrieverMemory` is a memory component that allows for the retrieval of vectors based on a given query. It is used to perform vector-based searches and retrievals.
The `VectorRetrieverMemory` retrieves vectors based on queries, facilitating vector-based searches and retrievals.
**Params**
**Parameters**
- **Retriever:** The retriever used to fetch documents.
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model defaults to `chat_history`.
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string defaults to `False`.
- **Retriever**: The tool used to fetch documents.
- **input_key**: Identifies where input messages are stored in the memory object, facilitating their retrieval and manipulation.
- **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`.
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.

View file

@ -0,0 +1,144 @@
import Admonition from "@theme/Admonition";
# Large Language Models (LLMs)
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
A Large Language Model (LLM) is a foundational component of Langflow. It provides a uniform interface for interacting with LLMs from various providers, including OpenAI, Cohere, and HuggingFace. Langflow extensively uses LLMs across its chains and agents, employing them to generate text based on specific prompts or inputs.
---
## Anthropic
This is a wrapper for Anthropic's large language models. Learn more at [Anthropic](https://www.anthropic.com).
- **anthropic_api_key:** This key authenticates and authorizes access to the Anthropic API.
- **anthropic_api_url:** This URL connects to the Anthropic API.
- **temperature:** This parameter adjusts the randomness level in text generation. Set this to a non-negative number.
---
## ChatAnthropic
This is a wrapper for Anthropic's large language model designed for chat-based interactions. Learn more at [Anthropic](https://www.anthropic.com).
- **anthropic_api_key:** This key authenticates and authorizes access to the Anthropic API.
- **anthropic_api_url:** This URL connects to the Anthropic API.
- **temperature:** This parameter adjusts the randomness level in text generation. Set this to a non-negative number.
---
## CTransformers
`CTransformers` provides access to Transformer models implemented in C/C++ using the [GGML](https://github.com/ggerganov/ggml) library.
<Admonition type="info">
Ensure the `ctransformers` Python package is installed. Discover more about
installation, supported models, and usage
[here](https://github.com/marella/ctransformers).
</Admonition>
- **config:** This configuration is for the Transformer models. Check the default settings and possible configurations at [config](https://github.com/marella/ctransformers#config).
```json
{
"top_k": 40,
"top_p": 0.95,
"temperature": 0.8,
"repetition_penalty": 1.1,
"last_n_tokens": 64,
"seed": -1,
"max_new_tokens": 256,
"stop": null,
"stream": false,
"reset": true,
"batch_size": 8,
"threads": -1,
"context_length": -1,
"gpu_layers": 0
}
```
- **model**: The file path, directory, or Hugging Face Hub model repository name.
- **model_file**: The specific model file name within the repository or directory.
- **model_type**: The type of transformer model used. For further information, visit [ctransformers](https://github.com/marella/ctransformers).
## ChatOpenAI Component
This component interfaces with [OpenAI's](https://openai.com) large language models, supporting a variety of tasks such as chatbots, generative question-answering, and summarization.
- **max_tokens**: The maximum number of tokens to generate for each completion. Set to `-1` to generate as many tokens as possible, based on the model's context size. The default is `256`.
- **model_kwargs**: A dictionary containing any additional model parameters for undefined calls.
- **model_name**: Specifies the OpenAI chat model in use.
- **openai_api_base**: The base URL for accessing the OpenAI API.
- **openai_api_key**: The API key required for authentication with the OpenAI API.
- **temperature**: Adjusts the randomness level of the text generation. This should be a non-negative number, defaulting to `0.7`.
## Cohere Component
A wrapper for accessing [Cohere's](https://cohere.com) large language models.
- **cohere_api_key**: The API key needed for Cohere service authentication.
- **max_tokens**: The limit on the number of tokens to generate per request, defaulting to `256`.
- **temperature**: Adjusts the randomness level in text generations. This should be a non-negative number, defaulting to `0.75`.
## HuggingFaceHub Component
A component facilitating access to models hosted on the [HuggingFace Hub](https://www.huggingface.co/models).
- **huggingfacehub_api_token**: The token required for API authentication.
- **model_kwargs**: Parameters passed to the model.
- **repo_id**: Specifies the model repository, defaulting to `gpt2`.
- **task**: The specific task to execute with the model, returning either `generated_text` or `summary_text`.
## LlamaCpp Component
This component provides access to `llama.cpp` models, ensuring high performance and flexibility.
- **echo**: Whether to echo the input prompt, defaulting to `False`.
- **f16_kv**: Indicates if half-precision should be used for the key/value cache, defaulting to `True`.
- **last_n_tokens_size**: The lookback size for applying repeat penalties, defaulting to `64`.
- **logits_all**: Whether to return logits for all tokens or just the last one, defaulting to `False`.
- **logprobs**: The number of log probabilities to return. If set to None, no probabilities are returned.
- **lora_base**: The path to the base Llama LoRA model.
- **lora_path**: The specific path to the Llama LoRA model. If set to None, no LoRA model is loaded.
- **max_tokens**: The maximum number of tokens to generate in one session, defaulting to `256`.
- **model_path**: The file path to the Llama model.
- **n_batch**: The number of tokens processed in parallel, defaulting to `8`.
- **n_ctx**: The context window size for tokens, defaulting to `512`.
- **repeat_penalty**: The penalty applied to repeated tokens, defaulting to `1.1`.
- **seed**: The seed for random number generation. If set to `-1`, a random seed is used.
- **stop**: A list of stop strings that terminate generation when encountered.
- **streaming**: Indicates whether to stream results token by token, defaulting to `True`.
- **suffix**: A suffix appended to generated text. If None, no suffix is appended.
- **tags**: Tags added to the execution trace for monitoring.
- **temperature**: The sampling temperature, defaulting to `0.8`.
- **top_k**: The top-k sampling setting, defaulting to `40`.
- **top_p**: The cumulative probability threshold for top-p sampling, defaulting to `0.95`.
- **use_mlock**: Forces the system to retain the model in RAM, defaulting to `False`.
- **use_mmap**: Indicates whether to maintain the model loaded in RAM, defaulting to `True`.
- **verbose**: Controls the verbosity of output details. When enabled, it provides insights into internal states to aid debugging and understanding, defaulting to `False`.
- **vocab_only**: Loads only the vocabulary without model weights, defaulting to `False`.
## VertexAI Component
This component integrates with [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models to enhance AI capabilities.
- **credentials**: Custom
credentials used for API interactions.
- **location**: The default location for API calls, defaulting to `us-central1`.
- **max_output_tokens**: Limits the output tokens per prompt, defaulting to `128`.
- **model_name**: The name of the Vertex AI model in use, defaulting to `text-bison`.
- **project**: The default Google Cloud Platform project for API calls.
- **request_parallelism**: The level of request parallelism for VertexAI model interactions, defaulting to `5`.
- **temperature**: Adjusts the randomness level in text generations, defaulting to `0`.
- **top_k**: The setting for selecting the top-k tokens for outputs.
- **top_p**: The threshold for summing probabilities of the most likely tokens, defaulting to `0.95`.
- **tuned_model_name**: Specifies a tuned model name, which overrides the default model name if provided.
- **verbose**: Controls the output verbosity to assist in debugging and understanding the operational details, defaulting to `False`.
---

View file

@ -0,0 +1,354 @@
import Admonition from "@theme/Admonition";
# Models
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
## Amazon Bedrock
This component facilitates the generation of text using the LLM (Large Language Model) model from Amazon Bedrock.
**Params**
- **Input Value:** Specifies the input text for text generation.
- **System Message (Optional):** A system message to pass to the model.
- **Model ID (Optional):** Specifies the model ID to be used for text generation. Defaults to _`"anthropic.claude-instant-v1"`_. Available options include:
- _`"ai21.j2-grande-instruct"`_
- _`"ai21.j2-jumbo-instruct"`_
- _`"ai21.j2-mid"`_
- _`"ai21.j2-mid-v1"`_
- _`"ai21.j2-ultra"`_
- _`"ai21.j2-ultra-v1"`_
- _`"anthropic.claude-instant-v1"`_
- _`"anthropic.claude-v1"`_
- _`"anthropic.claude-v2"`_
- _`"cohere.command-text-v14"`_
- **Credentials Profile Name (Optional):** Specifies the name of the credentials profile.
- **Region Name (Optional):** Specifies the region name.
- **Model Kwargs (Optional):** Additional keyword arguments for the model.
- **Endpoint URL (Optional):** Specifies the endpoint URL.
- **Streaming (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **Cache (Optional):** Specifies whether to cache the response.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
<Admonition type="note" title="Note">
<p>
Ensure that necessary credentials are provided to connect to the Amazon
Bedrock API. If connection fails, a ValueError will be raised.
</p>
</Admonition>
---
## Anthropic
This component allows the generation of text using Anthropic Chat&Completion large language models.
**Params**
- **Model Name:** Specifies the name of the Anthropic model to be used for text generation. Available options include:
- _`"claude-2.1"`_
- _`"claude-2.0"`_
- _`"claude-instant-1.2"`_
- _`"claude-instant-1"`_
- **Anthropic API Key:** Your Anthropic API key.
- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to _`256`_.
- **Temperature (Optional):** Specifies the sampling temperature. Defaults to _`0.7`_.
- **API Endpoint (Optional):** Specifies the endpoint of the Anthropic API. Defaults to _`"https://api.anthropic.com"`_ if not specified.
- **Input Value:** Specifies the input text for text generation.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** A system message to pass to the model.
For detailed documentation and integration guides, please refer to the [Anthropic Component Documentation](https://python.langchain.com/docs/integrations/chat/anthropic).
---
## Azure OpenAI
This component allows the generation of text using the LLM (Large Language Model) model from Azure OpenAI.
**Params**
- **Model Name:** Specifies the name of the Azure OpenAI model to be used for text generation. Available options include:
- _`"gpt-35-turbo"`_
- _`"gpt-35-turbo-16k"`_
- _`"gpt-35-turbo-instruct"`_
- _`"gpt-4"`_
- _`"gpt-4-32k"`_
- _`"gpt-4-vision"`_
- **Azure Endpoint:** Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/`.
- **Deployment Name:** Specifies the name of the deployment.
- **API Version:** Specifies the version of the Azure OpenAI API to be used. Available options include:
- _`"2023-03-15-preview"`_
- _`"2023-05-15"`_
- _`"2023-06-01-preview"`_
- _`"2023-07-01-preview"`_
- _`"2023-08-01-preview"`_
- _`"2023-09-01-preview"`_
- _`"2023-12-01-preview"`_
- **API Key:** Your Azure OpenAI API key.
- **Temperature (Optional):** Specifies the sampling temperature. Defaults to _`0.7`_.
- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to _`1000`_.
- **Input Value:** Specifies the input text for text generation.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** A system message to pass to the model.
For detailed documentation and integration guides, please refer to the [Azure OpenAI Component Documentation](https://python.langchain.com/docs/integrations/llms/azure_openai).
---
## Cohere
This component enables text generation using Cohere large language models.
**Params**
- **Cohere API Key:** Your Cohere API key.
- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to _`256`_.
- **Temperature (Optional):** Specifies the sampling temperature. Defaults to _`0.75`_.
- **Input Value:** Specifies the input text for text generation.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** A system message to pass to the model.
---
## Google Generative AI
This component enables text generation using Google Generative AI.
**Params**
- **Google API Key:** Your Google API key to use for the Google Generative AI.
- **Model:** The name of the model to use. Supported examples are _`"gemini-pro"`_ and _`"gemini-pro-vision"`_.
- **Max Output Tokens (Optional):** The maximum number of tokens to generate.
- **Temperature:** Run inference with this temperature. Must be in the closed interval [0.0, 1.0].
- **Top K (Optional):** Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.
- **Top P (Optional):** The maximum cumulative probability of tokens to consider when sampling.
- **N (Optional):** Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.
- **Input Value:** The input to the model.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** A system message to pass to the model.
---
## Hugging Face API
This component facilitates text generation using LLM models from the Hugging Face Inference API.
**Params**
- **Endpoint URL:** The URL of the Hugging Face Inference API endpoint. Should be provided along with necessary authentication credentials.
- **Task:** Specifies the task for text generation. Options include _`"text2text-generation"`_, _`"text-generation"`_, and _`"summarization"`_.
- **API Token:** The API token required for authentication with the Hugging Face Hub.
- **Model Keyword Arguments (Optional):** Additional keyword arguments for the model. Should be provided as a Python dictionary.
- **Input Value:** The input text for text generation.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** A system message to pass to the model.
---
## LiteLLM Model
Generates text using the `LiteLLM` collection of large language models.
**Parameters**
- **Model name:** The name of the model to use. For example, `gpt-3.5-turbo`. (Type: str)
- **API key:** The API key to use for accessing the provider's API. (Type: str, Optional)
- **Provider:** The provider of the API key. (Type: str, Choices: "OpenAI", "Azure", "Anthropic", "Replicate", "Cohere", "OpenRouter")
- **Temperature:** Controls the randomness of the text generation. (Type: float, Default: 0.7)
- **Model kwargs:** Additional keyword arguments for the model. (Type: Dict, Optional)
- **Top p:** Filter responses to keep the cumulative probability within the top p tokens. (Type: float, Optional)
- **Top k:** Filter responses to only include the top k tokens. (Type: int, Optional)
- **N:** Number of chat completions to generate for each prompt. (Type: int, Default: 1)
- **Max tokens:** The maximum number of tokens to generate for each chat completion. (Type: int, Default: 256)
- **Max retries:** Maximum number of retries for failed requests. (Type: int, Default: 6)
- **Verbose:** Whether to print verbose output. (Type: bool, Default: False)
- **Input:** The input prompt for text generation. (Type: str)
- **Stream:** Whether to stream the output. (Type: bool, Default: False)
- **System message:** System message to pass to the model. (Type: str, Optional)
---
## Ollama
Generate text using Ollama Local LLMs.
**Parameters**
- **Base URL:** Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.
- **Model Name:** The model name to use. Refer to [Ollama Library](https://ollama.ai/library) for more models.
- **Temperature:** Controls the creativity of model responses. (Default: 0.8)
- **Cache:** Enable or disable caching. (Default: False)
- **Format:** Specify the format of the output (e.g., json). (Advanced)
- **Metadata:** Metadata to add to the run trace. (Advanced)
- **Mirostat:** Enable/disable Mirostat sampling for controlling perplexity. (Default: Disabled)
- **Mirostat Eta:** Learning rate for Mirostat algorithm. (Default: None) (Advanced)
- **Mirostat Tau:** Controls the balance between coherence and diversity of the output. (Default: None) (Advanced)
- **Context Window Size:** Size of the context window for generating tokens. (Default: None) (Advanced)
- **Number of GPUs:** Number of GPUs to use for computation. (Default: None) (Advanced)
- **Number of Threads:** Number of threads to use during computation. (Default: None) (Advanced)
- **Repeat Last N:** How far back the model looks to prevent repetition. (Default: None) (Advanced)
- **Repeat Penalty:** Penalty for repetitions in generated text. (Default: None) (Advanced)
- **TFS Z:** Tail free sampling value. (Default: None) (Advanced)
- **Timeout:** Timeout for the request stream. (Default: None) (Advanced)
- **Top K:** Limits token selection to top K. (Default: None) (Advanced)
- **Top P:** Works together with top-k. (Default: None) (Advanced)
- **Verbose:** Whether to print out response text.
- **Tags:** Tags to add to the run trace. (Advanced)
- **Stop Tokens:** List of tokens to signal the model to stop generating text. (Advanced)
- **System:** System to use for generating text. (Advanced)
- **Template:** Template to use for generating text. (Advanced)
- **Input:** The input text.
- **Stream:** Whether to stream the response.
- **System Message:** System message to pass to the model. (Advanced)
---
## OpenAI
This component facilitates text generation using OpenAI's models.
**Params**
- **Input Value:** The input text for text generation.
- **Max Tokens (Optional):** The maximum number of tokens to generate. Defaults to _`256`_.
- **Model Kwargs (Optional):** Additional keyword arguments for the model. Should be provided as a nested dictionary.
- **Model Name (Optional):** The name of the model to use. Defaults to _`gpt-4-1106-preview`_. Supported options include: _`gpt-4-turbo-preview`_, _`gpt-4-0125-preview`_, _`gpt-4-1106-preview`_, _`gpt-4-vision-preview`_, _`gpt-3.5-turbo-0125`_, _`gpt-3.5-turbo-1106`_.
- **OpenAI API Base (Optional):** The base URL of the OpenAI API. Defaults to _`https://api.openai.com/v1`_.
- **OpenAI API Key (Optional):** The API key for accessing the OpenAI API.
- **Temperature:** Controls the creativity of model responses. Defaults to _`0.7`_.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** System message to pass to the model.
---
## Qianfan
This component facilitates the generation of text using Baidu Qianfan chat models.
**Params**
- **Model Name:** Specifies the name of the Qianfan chat model to be used for text generation. Available options include:
- _`"ERNIE-Bot"`_
- _`"ERNIE-Bot-turbo"`_
- _`"BLOOMZ-7B"`_
- _`"Llama-2-7b-chat"`_
- _`"Llama-2-13b-chat"`_
- _`"Llama-2-70b-chat"`_
- _`"Qianfan-BLOOMZ-7B-compressed"`_
- _`"Qianfan-Chinese-Llama-2-7B"`_
- _`"ChatGLM2-6B-32K"`_
- _`"AquilaChat-7B"`_
- **Qianfan Ak:** Your Baidu Qianfan access key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
- **Qianfan Sk:** Your Baidu Qianfan secret key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
- **Top p (Optional):** Model parameter. Specifies the top-p value. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.8`_.
- **Temperature (Optional):** Model parameter. Specifies the sampling temperature. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.95`_.
- **Penalty Score (Optional):** Model parameter. Specifies the penalty score. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`1.0`_.
- **Endpoint (Optional):** Endpoint of the Qianfan LLM, required if custom model is used.
- **Input Value:** Specifies the input text for text generation.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** A system message to pass to the model.
---
## Vertex AI
The `ChatVertexAI` is a component for generating text using Vertex AI Chat large language models API.
**Params**
- **Credentials:** The JSON file containing the credentials for accessing the Vertex AI Chat API.
- **Project:** The name of the project associated with the Vertex AI Chat API.
- **Examples (Optional):** List of examples to provide context for text generation.
- **Location:** The location of the Vertex AI Chat API service. Defaults to _`us-central1`_.
- **Max Output Tokens:** The maximum number of tokens to generate. Defaults to _`128`_.
- **Model Name:** The name of the model to use. Defaults to _`chat-bison`_.
- **Temperature:** Controls the creativity of model responses. Defaults to _`0.0`_.
- **Input Value:** The input text for text generation.
- **Top K:** Limits token selection to top K. Defaults to _`40`_.
- **Top P:** Works together with top-k. Defaults to _`0.95`_.
- **Verbose:** Whether to print out response text. Defaults to _`False`_.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** System message to pass to the model.

View file

@ -1,27 +0,0 @@
import Admonition from "@theme/Admonition";
# Prompts
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may
contain some rough edges. Share your feedback or report issues to help us
improve! 🛠️📝
</p>
</Admonition>
A prompt refers to the input given to a language model. It is constructed from multiple components and can be parametrized using prompt templates. A prompt template is a reproducible way to generate prompts and allow for easy customization through input variables.
---
### PromptTemplate
The `PromptTemplate` component allows users to create prompts and define variables that provide control over instructing the model. The template can take in a set of variables from the end user and generates the prompt once the conversation is initiated.
<Admonition type="info">
Once a variable is defined in the prompt template, it becomes a component
input of its own. Check out [Prompt
Customization](../docs/guidelines/prompt-customization.mdx) to learn more.
</Admonition>
- **template:** Template used to format an individual request.

View file

@ -1,24 +1,22 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Retrievers
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
A retriever is an interface that returns documents given an unstructured query. It is more general than a vector store and does not need to be able to store documents, only to return or retrieve them.
A retriever is an interface that returns documents in response to an unstructured query. It's broader than a vector store because it doesn't need to store documents; it only needs to retrieve them.
---
### MultiQueryRetriever
## MultiQueryRetriever
The `MultiQueryRetriever` component automates the process of generating multiple queries, retrieves relevant documents for each query, and combines the results to provide a more extensive and diverse set of potentially relevant documents. This approach enhances the effectiveness of the retrieval process and helps overcome the limitations of traditional distance-based retrieval methods.
The `MultiQueryRetriever` automates generating multiple queries, retrieves relevant documents for each query, and aggregates the results. This method improves retrieval effectiveness and addresses the limitations of traditional distance-based methods.
**Params**
**Parameters**
- **LLM:** Language Model to use in the `MultiQueryRetriever`.
- **Prompt:** Prompt to represent a schema for an LLM.
- **Retriever:** The retriever used to fetch documents.
- **parser_key:** This parameter is used to specify the key or attribute name of the parsed output that will be used for retrieval. It determines how the results from the language model are split into a list of queries. Defaults to `lines`, which means that the output from the language model will be split into a list of lines of text. This allows the retriever to retrieve relevant documents based on each line of text separately.
- **LLM:** Specifies the language model used in the `MultiQueryRetriever`.
- **Prompt:** Defines a schema for the LLM.
- **Retriever:** Identifies the retriever that fetches documents.
- **parser_key:** Specifies the key or attribute name of the parsed output for retrieval. By default, it's set to `lines`, meaning the output from the language model is split into separate lines of text. This allows the retriever to fetch documents relevant to each line of text.

View file

@ -0,0 +1,55 @@
import Admonition from "@theme/Admonition";
# Text and Data
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
There are two main input and output types: `Text` and `Data`.
`Text` is a simple string input and output type, while `Data` is a structure very similar to a dictionary in Python. It is a key-value pair data structure.
We've created a few components to help you work with these types. Let's see how a few of them work.
## Data To Text
This is a component that takes in Data and outputs a `Text`. It does this using a template string and concatenating the values of the `Data`, one per line.
If we have the following Data:
```json
{
"sender_name": "Alice",
"message": "Hello!"
}
{
"sender_name": "John",
"message": "Hi!"
}
```
And the template string is: _`{sender_name}: {message}`_
The output is:
```
Alice: Hello!
John: Hi!
```
## Create Data
This component allows you to create a `Data` from a number of inputs. You can add as many key-value pairs as you want (as long as it is less than 15). Once you've picked that number you'll need to write the name of the Key and can pass `Text` values from other components to it.
## Documents To Data
This component takes in a LangChain `Document` and outputs a `Data`. It does this by extracting the `page_content` and the `metadata` from the `Document` and adding them to the `Data` as text and data respectively.
## Why is this useful?
The idea was to create a unified way to work with complex data in Langflow and to make it easier to work with data that is not just a simple string. This way you can create more complex workflows and use the data in more ways.
## What's next?
We are planning to integrate an array of modalities to Langflow, such as images, audio, and video. This will allow you to create even more complex workflows and use cases. Stay tuned for more updates! 🚀

View file

@ -2,62 +2,45 @@ import Admonition from "@theme/Admonition";
# Text Splitters
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may
contain some rough edges. Share your feedback or report issues to help us
improve! 🛠️📝
</p>
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
A text splitter is a tool that divides a document or text into smaller chunks or segments. It is used to break down large texts into more manageable pieces for analysis or processing.
A text splitter is a tool that divides a document or text into smaller chunks or segments. This helps make large texts more manageable for analysis or processing.
---
### CharacterTextSplitter
## CharacterTextSplitter
The `CharacterTextSplitter` is used to split a long text into smaller chunks based on a specified character. It splits the text by trying to keep paragraphs, sentences, and words together as long as possible, as these are semantically related pieces of text.
The `CharacterTextSplitter` splits a long text into smaller chunks based on a specified character. It aims to keep paragraphs, sentences, and words intact as much as possible since these are semantically related elements of text.
**Params**
**Parameters**
- **Documents:** Input documents to split.
- **chunk_overlap:** Determines the number of characters that overlap between consecutive chunks when splitting text. It specifies how much of the previous chunk should be included in the next chunk.
For example, if the `chunk_overlap` is set to 20 and the `chunk_size` is set to 100, the splitter will create chunks of 100 characters each, but the last 20 characters of each chunk will overlap with the first 20 characters of the next chunk. This allows for a smoother transition between chunks and ensures that no information is lost defaults to `200`.
- **chunk_size:** Determines the maximum number of characters in each chunk when splitting a text. It specifies the size or length of each chunk.
For example, if the chunk_size is set to 100, the splitter will create chunks of 100 characters each. If the text is longer than 100 characters, it will be divided into multiple chunks of equal size, except for the last chunk, which may be smaller if there are remaining characters defaults to `1000`.
- **separator:** Specifies the character that will be used to split the text into chunks defaults to `.`
- **Documents:** The input documents to split.
- **chunk_overlap:** The number of characters that overlap between consecutive chunks. This setting ensures a smoother transition between chunks and prevents information loss. For example, with a `chunk_overlap` of 20 and a `chunk_size` of 100, each chunk will have the last 20 characters overlap with the next chunk's first 20 characters. The default is `200`.
- **chunk_size:** The maximum number of characters in each chunk. If the text exceeds the specified `chunk_size`, it will be divided into multiple chunks of equal size, with the possible exception of the last chunk, which may be smaller if fewer characters remain. The default is `1000`.
- **separator:** The character used to split the text into chunks. The default is `.`.
---
### RecursiveCharacterTextSplitter
## RecursiveCharacterTextSplitter
The `RecursiveCharacterTextSplitter` splits the text by trying to keep paragraphs, sentences, and words together as long as possible, similar to the `CharacterTextSplitter`. However, it also recursively splits the text into smaller chunks if the chunk size exceeds a specified threshold.
The `RecursiveCharacterTextSplitter` functions similarly to the `CharacterTextSplitter` by trying to keep paragraphs, sentences, and words together. It also recursively splits the text into smaller chunks if the initial chunk size exceeds a specified threshold.
**Params**
**Parameters**
- **Documents:** Input documents to split.
- **Documents:** The input documents to split.
- **chunk_overlap:** The number of characters that overlap between consecutive chunks.
- **chunk_size:** The maximum number of characters in each chunk.
- **separators:** A list of characters used to split the text into chunks. The splitter first tries to split text using the first character in the `separators` list. If any chunk exceeds the maximum size, it proceeds to the next character in the list and continues splitting. The defaults are ["\n\n", "\n", " ", ""].
- **chunk_overlap:** Determines the number of characters that overlap between consecutive chunks when splitting text. It specifies how much of the previous chunk should be included in the next chunk.
## LanguageRecursiveTextSplitter
- **chunk_size:** Determines the maximum number of characters in each chunk when splitting a text. It specifies the size or length of each chunk.
The `LanguageRecursiveTextSplitter` divides text into smaller chunks based on the programming language of the text.
- **separators:** The `separators` in RecursiveCharacterTextSplitter are the characters used to split the text into chunks. The text splitter tries to create chunks based on splitting on the first character in the list of `separators`. If any chunks are too large, it moves on to the next character in the list and continues splitting. Defaults to ["\n\n", "\n", " ", ""].
**Parameters**
### LanguageRecursiveTextSplitter
The `LanguageRecursiveTextSplitter` is a text splitter that splits the text into smaller chunks based on the (programming) language of the text.
**Params**
- **Documents:** Input documents to split.
- **chunk_overlap:** Determines the number of characters that overlap between consecutive chunks when splitting text. It specifies how much of the previous chunk should be included in the next chunk.
- **chunk_size:** Determines the maximum number of characters in each chunk when splitting a text. It specifies the size or length of each chunk.
- **separator_type:** The parameter allows the user to split the code with multiple language support. It supports various languages such as Ruby, Python, Solidity, Java, and more. Defaults to `Python`.
- **Documents:** The input documents to split.
- **chunk_overlap:** The number of characters that overlap between consecutive chunks.
- **chunk_size:** The maximum number of characters in each chunk.
- **separator_type:** This parameter allows splitting text across multiple programming languages such as Ruby, Python, Solidity, Java, and more. The default is `Python`.

View file

@ -1,9 +1,15 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Toolkits
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
</Admonition>
<p>
We appreciate your understanding as we polish our documentation - it may
contain some rough edges. Share your feedback or report issues to help us
improve! 🛠️📝
</p>
</Admonition>

View file

@ -1,78 +1,69 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Tools
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
## SearchApi
### SearchApi
SearchApi offers a real-time search engine results API that returns structured JSON data, including answer boxes, knowledge graphs, organic results, and more.
Real-time search engine results API. Returns structured JSON data that includes answer box, knowledge graph, organic results, and more.
### Parameters
**Parameters**
- **Api Key:** A unique identifier required for authentication with real-time search engines, obtainable through the [SearchApi dashboard](https://www.searchapi.io/).
- **Engine:** Specifies the search engine used, such as Google, Google Scholar, Bing, YouTube, and YouTube transcripts. Refer to the [documentation](https://www.searchapi.io/docs/google) for a complete list of supported engines.
- **Parameters:** Allows the selection of various parameters recognized by SearchApi. Some parameters are mandatory while others are optional.
- **Api Key:** A unique identifier for the SearchApi, necessary for authenticating requests to real-time search engines. This key can be retrieved from the [SearchApi dashboard](https://www.searchapi.io/).
- **Engine:** Specifies the search engine. For instance: google, google_scholar, bing, youtube, and youtube_transcripts. A full list of supported engines is available in the [documentation](https://www.searchapi.io/docs/google).
- **Parameters:** Allows the selection of any parameters recognized by SearchApi, with some being required and others optional.
### Output
**Output**
- **Document:** The JSON response from the request.
- **Document:** The JSON response from the request as a Document.
## BingSearchRun
Bing Search, a web search engine by Microsoft, provides search results for various content types like web pages, images, videos, and news articles. It combines algorithms and human editors to deliver these results.
### BingSearchRun
### Parameters
Bing Search is a web search engine owned and operated by Microsoft. It provides search results for various types of content, including web pages, images, videos, and news articles. It uses a combination of algorithms and human editors to deliver search results to users.
- **Api Wrapper:** A BingSearchAPIWrapper component that processes the search URL and subscription key.
**Params**
## Calculator
- **Api Wrapper:** A BingSearchAPIWrapper component that takes the search URL and a subscription key.
The calculator tool leverages an LLMMathChain to provide mathematical calculation capabilities, enabling the agent to perform computations as needed.
### Parameters
### Calculator
- **LLM:** The Language Model used for calculations.
The calculator tool provides mathematical calculation capabilities to an agent by leveraging an LLMMathChain. It allows the agent to perform math when needed to answer questions.
## GoogleSearchResults
**Params**
This is a wrapper around Google Search tailored for users who need precise control over the JSON data returned from the API.
- **LLM:** Language Model to use in the calculation.
### Parameters
- **Api Wrapper:** A GoogleSearchAPIWrapper equipped with a Google API key and CSE ID.
### GoogleSearchResults
## GoogleSearchRun
A wrapper around Google Search. Useful for when the user needs to answer questions about with more control over the JSON data returned from the API. It returns the full JSON response configured based on the parameters passed to the API wrapper.
This tool acts as a quick wrapper around Google Search, executing the search query and returning the snippet from the most relevant result.
**Params**
### Parameters
- **Api Wrapper:** A GoogleSearchAPIWrapper with Google API key and CSE ID
- **Api Wrapper:** A GoogleSearchAPIWrapper equipped with a Google API key and CSE ID.
## GoogleSerperRun
### GoogleSearchRun
A cost-effective Google Search API.
A quick wrapper around Google Search. It executes the search query and returns just the first result snippet from the highest-priority result type.
### Parameters
**Params**
- **Api Wrapper:** A GoogleSerperAPIWrapper with the required API key and result keys.
- **Api Wrapper:** A GoogleSearchAPIWrapper with Google API key and CSE ID
## InfoSQLDatabaseTool
This tool retrieves metadata about SQL databases. It takes a comma-separated list of table names as input and outputs the schema and sample rows for those tables.
### GoogleSerperRun
### Parameters
A low-cost Google Search API.
**Params**
- **Api Wrapper:** A GoogleSerperAPIWrapper component with API key and result keys
### InfoSQLDatabaseTool
Tool for getting metadata about a SQL database. The input to this tool is a comma-separated list of tables, and the output is the schema and sample rows for those tables. Example Input: `“table1`, `table2`, `table3”`.
**Params**
- **Db:** SQLDatabase to query.
- **Db:** The SQL database to query.

View file

@ -2,75 +2,93 @@ import Admonition from "@theme/Admonition";
# Utilities
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may
contain some rough edges. Share your feedback or report issues to help us
improve! 🛠️📝
</p>
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Utilities are a set of actions that can be used to perform common tasks in a flow. They are available in the **Utilities** section in the sidebar.
---
### GET Request
## GET request
Make a GET request to the given URL.
Make a GET request to the specified URL.
**Params**
**Parameters**
- **URL:** The URL to make the request to. There can be more than one URL, in which case the request will be made to each URL in order.
- **URL:** The URL to make the request to. If there are multiple URLs, the request will be made to each URL in order.
- **Headers:** A dictionary of headers to send with the request.
**Output**
- **List of Documents:** A list of Documents containing the JSON response from each request.
- **List of documents:** A list of documents containing the JSON response from each request.
---
### POST Request
## POST request
Make a POST request to the given URL.
Make a POST request to the specified URL.
**Params**
**Parameters**
- **URL:** The URL to make the request to.
- **Headers:** A dictionary of headers to send with the request.
- **Document:** The Document containing a JSON object to send with the request.
- **Document:** The document containing a JSON object to send with the request.
**Output**
- **Document:** The JSON response from the request as a Document.
- **Document:** The JSON response from the request as a document.
---
### Update Request
## Update request
Make a PATCH or PUT request to the given URL.
Make a PATCH or PUT request to the specified URL.
**Params**
**Parameters**
- **URL:** The URL to make the request to.
- **Headers:** A dictionary of headers to send with the request.
- **Document:** The Document containing a JSON object to send with the request.
- **Method:** The HTTP method to use for the request. Can be either `PATCH` or `PUT`.
- **Document:** The document containing a JSON object to send with the request.
- **Method:** The HTTP method to use for the request, either `PATCH` or `PUT`.
**Output**
- **Document:** The JSON response from the request as a Document.
- **Document:** The JSON response from the request as a document.
---
### JSON Document Builder
## JSON document builder
Build a Document containing a JSON object using a key and another Document page content.
Build a document containing a JSON object using a key and another document page content.
**Params**
**Parameters**
- **Key:** The key to use for the JSON object.
- **Document:** The Document page to use for the JSON object.
- **Document:** The document page to use for the JSON object.
**Output**
- **List of Documents:** A list containing the Document with the JSON object.
- **List of documents:** A list containing the document with the JSON object.
## Unique ID generator
Generates a unique identifier (UUID) for each instance it is invoked, providing a distinct and reliable identifier suitable for a variety of applications.
**Parameters**
- **Value:** This field displays the generated unique identifier (UUID). The UUID is dynamically generated for each instance of the component, ensuring uniqueness across different uses.
**Output**
- Returns a unique identifier (UUID) as a string. This UUID is generated using Python's `uuid` module, ensuring that each identifier is unique and can be used as a reliable reference in your application.
<Admonition type="note" title="Note">
The Unique ID Generator is crucial for scenarios requiring distinct
identifiers, such as session management, transaction tracking, or any context
where different instances or entities must be uniquely identified. The
generated UUID is provided as a hexadecimal string, offering a high level of
uniqueness and security for identification purposes.
</Admonition>
For additional information and examples, please consult the [Langflow Components Custom Documentation](http://docs.langflow.org/components/custom).

View file

@ -1,9 +1,458 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Vector Stores
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
</Admonition>
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
### Astra DB
The `Astra DB` initializes a vector store using Astra DB from Data. It creates Astra DB-based vector indexes to efficiently store and retrieve documents.
**Parameters:**
- **Input:** Documents or Data for input.
- **Embedding or Astra vectorize:** External or server-side model Astra DB uses.
- **Collection Name:** Name of the Astra DB collection.
- **Token:** Authentication token for Astra DB.
- **API Endpoint:** API endpoint for Astra DB.
- **Namespace:** Astra DB namespace.
- **Metric:** Metric used by Astra DB.
- **Batch Size:** Batch size for operations.
- **Bulk Insert Batch Concurrency:** Concurrency level for bulk inserts.
- **Bulk Insert Overwrite Concurrency:** Concurrency level for overwriting during bulk inserts.
- **Bulk Delete Concurrency:** Concurrency level for bulk deletions.
- **Setup Mode:** Setup mode for the vector store.
- **Pre Delete Collection:** Option to delete the collection before setup.
- **Metadata Indexing Include:** Fields to include in metadata indexing.
- **Metadata Indexing Exclude:** Fields to exclude from metadata indexing.
- **Collection Indexing Policy:** Indexing policy for the collection.
<Admonition type="note" title="Note">
Ensure you configure the necessary Astra DB token and API endpoint before
starting.
</Admonition>
---
### Astra DB Search
`Astra DBSearch` searches an existing Astra DB vector store for documents similar to the input. It uses the `Astra DB` component's functionality for efficient retrieval.
**Parameters:**
- **Search Type:** Type of search, such as Similarity or MMR.
- **Input Value:** Value to search for.
- **Embedding or Astra vectorize:** External or server-side model Astra DB uses.
- **Collection Name:** Name of the Astra DB collection.
- **Token:** Authentication token for Astra DB.
- **API Endpoint:** API endpoint for Astra DB.
- **Namespace:** Astra DB namespace.
- **Metric:** Metric used by Astra DB.
- **Batch Size:** Batch size for operations.
- **Bulk Insert Batch Concurrency:** Concurrency level for bulk inserts.
- **Bulk Insert Overwrite Concurrency:** Concurrency level for overwriting during bulk inserts.
- **Bulk Delete Concurrency:** Concurrency level for bulk deletions.
- **Setup Mode:** Setup mode for the vector store.
- **Pre Delete Collection:** Option to delete the collection before setup.
- **Metadata Indexing Include:** Fields to include in metadata indexing.
- **Metadata Indexing Exclude:** Fields to exclude from metadata indexing.
- **Collection Indexing Policy:** Indexing policy for the collection.
---
### Chroma
`Chroma` sets up a vector store using Chroma for efficient vector storage and retrieval within language processing workflows.
**Parameters:**
- **Collection Name:** Name of the collection.
- **Persist Directory:** Directory to persist the Vector Store.
- **Server CORS Allow Origins (Optional):** CORS allow origins for the Chroma server.
- **Server Host (Optional):** Host for the Chroma server.
- **Server Port (Optional):** Port for the Chroma server.
- **Server gRPC Port (Optional):** gRPC port for the Chroma server.
- **Server SSL Enabled (Optional):** SSL configuration for the Chroma server.
- **Input:** Input data for creating the Vector Store.
- **Embedding:** Embeddings used for the Vector Store.
For detailed documentation and integration guides, please refer to the [Chroma Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/chroma).
---
### Chroma Search
`ChromaSearch` searches a Chroma collection for documents similar to the input text. It leverages Chroma to ensure efficient document retrieval.
**Parameters:**
- **Input:** Input text for search.
- **Search Type:** Type of search, such as Similarity or MMR.
- **Collection Name:** Name of the Chroma collection.
- **Index Directory:** Directory where the Chroma index is stored.
- **Embedding:** Embedding model used for vectorization.
- **Server CORS Allow Origins (Optional):** CORS allow origins for the Chroma server.
- **Server Host (Optional):** Host for the Chroma server.
- **Server Port (Optional):** Port for the Chroma server.
- **Server gRPC Port (Optional):** gRPC port for the Chroma server.
- **Server SSL Enabled (Optional):** SSL configuration for the Chroma server.
---
### Couchbase
`Couchbase` builds a Couchbase vector store from Data, streamlining the storage and retrieval of documents.
**Parameters:**
- **Embedding:** Model used by Couchbase.
- **Input:** Documents or Data.
- **Couchbase Cluster Connection String:** Cluster Connection string.
- **Couchbase Cluster Username:** Cluster Username.
- **Couchbase Cluster Password:** Cluster Password.
- **Bucket Name:** Bucket identifier in Couchbase.
- **Scope Name:** Scope identifier in Couchbase.
- **Collection Name:** Collection identifier in Couchbase.
- **Index Name:** Index identifier.
For detailed documentation and integration guides, please refer to the [Couchbase Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/couchbase).
---
### Couchbase Search
`CouchbaseSearch` leverages the Couchbase component to search for documents based on similarity metric.
**Parameters:**
- **Input:** Search query.
- **Embedding:** Model used in the Vector Store.
- **Couchbase Cluster Connection String:** Cluster Connection string.
- **Couchbase Cluster Username:** Cluster Username.
- **Couchbase Cluster Password:** Cluster Password.
- **Bucket Name:** Bucket identifier.
- **Scope Name:** Scope identifier.
- **Collection Name:** Collection identifier in Couchbase.
- **Index Name:** Index identifier.
---
### FAISS
The `FAISS` component manages document ingestion into a FAISS Vector Store, optimizing document indexing and retrieval.
**Parameters:**
- **Embedding:** Model used for vectorizing inputs.
- **Input:** Documents to ingest.
- **Folder Path:** Save path for the FAISS index, relative to Langflow.
- **Index Name:** Index identifier.
For more details, see the [FAISS Component Documentation](https://faiss.ai/index.html).
---
### FAISS Search
`FAISSSearch` searches a FAISS Vector Store for documents similar to a given input, using similarity metrics for efficient retrieval.
**Parameters:**
- **Embedding:** Model used in the FAISS Vector Store.
- **Folder Path:** Path to load the FAISS index from, relative to Langflow.
- **Input:** Search query.
- **Index Name:** Index identifier.
---
### MongoDB Atlas
`MongoDBAtlas` builds a MongoDB Atlas-based vector store from Data, streamlining the storage and retrieval of documents.
**Parameters:**
- **Embedding:** Model used by MongoDB Atlas.
- **Input:** Documents or Data.
- **Collection Name:** Collection identifier in MongoDB Atlas.
- **Database Name:** Database identifier.
- **Index Name:** Index identifier.
- **MongoDB Atlas Cluster URI:** Cluster URI.
- **Search Kwargs:** Additional search parameters.
<Admonition type="note" title="Note">
Ensure pymongo is installed for using MongoDB Atlas Vector Store.
</Admonition>
---
### MongoDB Atlas Search
`MongoDBAtlasSearch` leverages the MongoDBAtlas component to search for documents based on similarity metrics.
**Parameters:**
- **Search Type:** Type of search, such as "Similarity" or "MMR".
- **Input:** Search query.
- **Embedding:** Model used in the Vector Store.
- **Collection Name:** Collection identifier.
- **Database Name:** Database identifier.
- **Index Name:** Index identifier.
- **MongoDB Atlas Cluster URI:** Cluster URI.
- **Search Kwargs:** Additional search parameters.
---
### PGVector
`PGVector` integrates a Vector Store within a PostgreSQL database, allowing efficient storage and retrieval of vectors.
**Parameters:**
- **Input:** Value for the Vector Store.
- **Embedding:** Model used.
- **PostgreSQL Server Connection String:** Server URL.
- **Table:** Table name in the PostgreSQL database.
For more details, see the [PGVector Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/pgvector).
<Admonition type="note" title="Note">
Ensure the PostgreSQL server is accessible and configured correctly.
</Admonition>
---
### PGVector Search
`PGVectorSearch` extends `PGVector` to search for documents based on similarity metrics.
**Parameters:**
- **Input:** Search query.
- **Embedding:** Model used.
- **PostgreSQL Server Connection String:** Server URL.
- **Table:** Table name.
- **Search Type:** Type of search, such as "Similarity" or "MMR".
---
### Pinecone
`Pinecone` constructs a Pinecone wrapper from Data, setting up Pinecone-based vector indexes for document storage and retrieval.
**Parameters:**
- **Input:** Documents or Data.
- **Embedding:** Model used.
- **Index Name:** Index identifier.
- **Namespace:** Namespace used.
- **Pinecone API Key:** API key.
- **Pinecone Environment:** Environment settings.
- **Search Kwargs:** Additional search parameters.
- **Pool Threads:** Number of threads.
<Admonition type="note" title="Note">
Ensure the Pinecone API key and environment are correctly configured.
</Admonition>
---
### Pinecone Search
`PineconeSearch` searches a Pinecone Vector Store for documents similar to the input, using advanced similarity metrics.
**Parameters:**
- **Search Type:** Type of search, such as "Similarity" or "MMR".
- **Input Value:** Search query.
- **Embedding:** Model used.
- **Index Name:** Index identifier.
- **Namespace:** Namespace used.
- **Pinecone API Key:** API key.
- **Pinecone Environment:** Environment settings.
- **Search Kwargs:** Additional search parameters.
- **Pool Threads:** Number of threads.
---
### Qdrant
`Qdrant` allows efficient similarity searches and retrieval operations, using a list of texts to construct a Qdrant wrapper.
**Parameters:**
- **Input:** Documents or Data.
- **Embedding:** Model used.
- **API Key:** Qdrant API key.
- **Collection Name:** Collection identifier.
- **Advanced Settings:** Includes content payload key, distance function, gRPC port, host, HTTPS, location, metadata payload key, path, port, prefer gRPC, prefix, search kwargs, timeout, URL.
---
### Qdrant Search
`QdrantSearch` extends `Qdrant` to search for documents similar to the input based on advanced similarity metrics.
**Parameters:**
- **Search Type:** Type of search, such as "Similarity" or "MMR".
- **Input Value:** Search query.
- **Embedding:** Model used.
- **API Key:** Qdrant API key.
- **Collection Name:** Collection identifier.
- **Advanced Settings:** Includes content payload key, distance function, gRPC port, host, HTTPS, location, metadata payload key, path, port, prefer gRPC, prefix, search kwargs, timeout, URL.
---
### Redis
`Redis` manages a Vector Store in a Redis database, supporting efficient vector storage and retrieval.
**Parameters:**
- **Index Name:** Default index name.
- **Input:** Data for building the Redis Vector Store.
- **Embedding:** Model used.
- **Schema:** Optional schema file (.yaml) for document structure.
- **Redis Server Connection String:** Server URL.
- **Redis Index:** Optional index name.
For detailed documentation, refer to the [Redis Documentation](https://python.langchain.com/docs/integrations/vectorstores/redis).
<Admonition type="note" title="Note">
Ensure the Redis server URL and index name are configured correctly. Provide a
schema if no documents are available.
</Admonition>
---
### Redis Search
`RedisSearch` searches a Redis Vector Store for documents similar to the input.
**Parameters:**
- **Search Type:** Type of search, such as "Similarity" or "MMR".
- **Input Value:** Search query.
- **Index Name:** Default index name.
- **Embedding:** Model used.
- **Schema:** Optional schema file (.yaml) for document structure.
- **Redis Server Connection String:** Server URL.
- **Redis Index:** Optional index name.
---
### Supabase
`Supabase` initializes a Supabase Vector Store from texts and embeddings, setting up an environment for efficient document retrieval.
**Parameters:**
- **Input:** Documents or data.
- **Embedding:** Model used.
- **Query Name:** Optional query name.
- **Search Kwargs:** Advanced search parameters.
- **Supabase Service Key:** Service key.
- **Supabase URL:** Instance URL.
- **Table Name:** Optional table name.
<Admonition type="note" title="Note">
Ensure the Supabase service key, URL, and table name are properly configured.
</Admonition>
---
### Supabase Search
`SupabaseSearch` searches a Supabase Vector Store for documents similar to the input.
**Parameters:**
- **Search Type:** Type of search, such as "Similarity" or "MMR".
- **Input Value:** Search query.
- **Embedding:** Model used.
- **Query Name:** Optional query name.
- **Search Kwargs:** Advanced search parameters.
- **Supabase Service Key:** Service key.
- **Supabase URL:** Instance URL.
- **Table Name:** Optional table name.
---
### Vectara
`Vectara` sets up a Vectara Vector Store from files or upserted data, optimizing document retrieval.
**Parameters:**
- **Vectara Customer ID:** Customer ID.
- **Vectara Corpus ID:** Corpus ID.
- **Vectara API Key:** API key.
- **Files Url:** Optional URLs for file initialization.
- **Input:** Optional data for corpus upsert.
For more information, consult the [Vectara Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/vectara).
<Admonition type="note" title="Note">
If inputs or files_url are provided, they will be processed accordingly.
</Admonition>
---
### Vectara Search
`VectaraSearch` searches a Vectara Vector Store for documents based on the provided input.
**Parameters:**
- **Search Type:** Type of search, such as "Similarity" or "MMR".
- **Input Value:** Search query.
- **Vectara Customer ID:** Customer ID.
- **Vectara Corpus ID:** Corpus ID.
- **Vectara API Key:** API key.
- **Files Url:** Optional URLs for file initialization.
---
### Weaviate
`Weaviate` facilitates a Weaviate Vector Store setup, optimizing text and document indexing and retrieval.
**Parameters:**
- **Weaviate URL:** Default instance URL.
- **Search By Text:** Indicates whether to search by text.
- **API Key:** Optional API key for authentication.
- **Index Name:** Optional index name.
- **Text Key:** Default text extraction key.
- **Input:** Document or record.
- **Embedding:** Model used.
- **Attributes:** Optional additional attributes.
For more details, see the [Weaviate Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/weaviate).
<Admonition type="note" title="Note">
Ensure Weaviate instance is running and accessible. Verify API key, index
name, text key, and attributes are set correctly.
</Admonition>
---
### Weaviate Search
`WeaviateSearch` searches a Weaviate Vector Store for documents similar to the input.
**Parameters:**
- **Search Type:** Type of search, such as "Similarity" or "MMR".
- **Input Value:** Search query.
- **Weaviate URL:** Default instance URL.
- **Search By Text:** Indicates whether to search by text.
- **API Key:** Optional API key for authentication.
- **Index Name:** Optional index name.
- **Text Key:** Default text extraction key.
- **Embedding:** Model used.
- **Attributes:** Optional additional attributes.
---

View file

@ -1,20 +0,0 @@
import Admonition from '@theme/Admonition';
# Wrappers
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
</Admonition>
### TextRequestsWrapper
This component is designed to work with the Python Requests module, which is a popular tool for making web requests. Used to fetch data from a particular website.
**Params**
- **header:** specifies the headers to be included in the HTTP request. Defaults to `{'Authorization': 'Bearer <token>'}`.
Headers are key-value pairs that provide additional information about the request or the client making the request. They can be used to send authentication credentials, specify the content type of the request, set cookies, and more. They allow the client and the server to communicate additional information beyond the basic request.

View file

@ -1,23 +1,30 @@
import Admonition from "@theme/Admonition";
# Community
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
## 🤖 Join **Langflow** Discord server
Join us to ask questions and showcase your projects.
Join us to ask questions and showcase your projects.
Let's bring together the building blocks of AI integration!
Let's bring together the building blocks of AI integration!
Langflow [Discord](https://discord.gg/EqksyE2EX9) server.
Langflow [Discord](https://discord.gg/EqksyE2EX9) server.
---
## 🐦 Stay tunned for **Langflow** on Twitter
## 🐦 Stay tuned for **Langflow** on Twitter
Follow [@langflow_ai](https://twitter.com/langflow_ai) on **Twitter** to get the latest news about **Langflow**.
---
## ⭐️ Star **Langflow** on GitHub
You can "star" **Langflow** in [GitHub](https://github.com/logspace-ai/langflow).
You can "star" **Langflow** in [GitHub](https://github.com/langflow-ai/langflow).
By adding a star, other users will be able to find it more easily and see that it has been already useful for others.
@ -25,14 +32,12 @@ By adding a star, other users will be able to find it more easily and see that i
## 👀 Watch the GitHub repository for releases
You can "watch" **Langflow** in [GitHub](https://github.com/logspace-ai/langflow).
You can "watch" **Langflow** in [GitHub](https://github.com/langflow-ai/langflow).
If you select "Watching" instead of "Releases only" you will receive notifications when someone creates a new issue or question. You can also specify that you only want to be notified about new issues, discussions, PRs, etc.
Then you can try and help them solve those questions.
---
Thanks! 🚀
Thanks! 🚀

View file

@ -0,0 +1,50 @@
import ZoomableImage from "/src/theme/ZoomableImage.js";
import Admonition from "@theme/Admonition";
# How to Contribute Components?
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
New components are added as objects of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/interface/custom/custom_component/custom_component.py) class and any dependencies are added to the [pyproject.toml](https://github.com/langflow-ai/langflow/blob/dev/pyproject.toml#L27) file.
## Add an example component
You have a new document loader called **MyCustomDocumentLoader** and it would look awesome in Langflow.
1. Write your loader as an object of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/interface/custom/custom_component/custom_component.py) class. You'll create a new class, `MyCustomDocumentLoader`, that will inherit from `CustomComponent` and override the base class's methods.
2. Define optional attributes like `display_name`, `description`, and `documentation` to provide information about your custom component.
3. Implement the `build_config` method to define the configuration options for your custom component.
4. Implement the `build` method to define the logic for taking input parameters specified in the `build_config` method and returning the desired output.
5. Add the code to the [/components/documentloaders](https://github.com/langflow-ai/langflow/tree/dev/src/backend/base/langflow/components) folder.
6. Add the dependency to [/documentloaders/\_\_init\_\_.py](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/components/documentloaders/__init__.py) as `from .MyCustomDocumentLoader import MyCustomDocumentLoader`.
7. Add any new dependencies to the outer [pyproject.toml](https://github.com/langflow-ai/langflow/blob/dev/pyproject.toml#L27) file.
8. Submit documentation for your component. For this example, you'd submit documentation to the [loaders page](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/components/loaders).
9. Submit your changes as a pull request. The Langflow team will have a look, suggest changes, and add your component to Langflow.
## User Sharing
You might want to share and test your custom component with others, but don't need it merged into the main source code.
If so, you can share your component on the Langflow store.
1. [Register at the Langflow store](https://www.langflow.store/login/).
2. Undergo pre-validation before receiving an API key.
3. To deploy your amazing component directly to the Langflow store, without it being merged into the main source code, navigate to your flow, and then click **Share**.
The share window appears:
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/add-component-to-store.png",
dark: "img/add-component-to-store.png",
}}
style={{ width: "50%", margin: "20px auto" }}
/>
5. Choose whether you want to flow to be public or private.
You can also **Export** your flow as a JSON file from this window.
When you're ready to share the flow, click **Share Flow**.
You should see a **Flow shared successfully** popup.
6. To confirm, navigate to the **Langflow Store** and filter results by **Created By Me**. You should see your new flow on the **Langflow Store**.

View file

@ -1,11 +1,17 @@
import Admonition from "@theme/Admonition";
# GitHub Issues
Our [issues](https://github.com/logspace-ai/langflow/issues) page is kept up to date
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
Our [issues](https://github.com/langflow-ai/langflow/issues) page is kept up to date
with bugs, improvements, and feature requests. There is a taxonomy of labels to help
with sorting and discovery of issues of interest.
If you're looking for help with your code, consider posting a question on the
[GitHub Discussions board](https://github.com/logspace-ai/langflow/discussions). Please
[GitHub Discussions board](https://github.com/langflow-ai/langflow/discussions). Please
understand that we won't be able to provide individual support via email. We
also believe that help is much more valuable if it's **shared publicly**,
so that more people can benefit from it.
@ -21,7 +27,6 @@ so that more people can benefit from it.
logs or tracebacks, you can wrap them in `<details>` and `</details>`. This
[collapses the content](https://developer.mozilla.org/en/docs/Web/HTML/Element/details) so it only becomes visible on click, making the issue easier to read and follow.
## Issue labels
[See this page](https://github.com/logspace-ai/langflow/labels) for an overview of the system we use to tag our issues and pull requests.
[See this page](https://github.com/langflow-ai/langflow/labels) for an overview of the system we use to tag our issues and pull requests.

View file

@ -1,6 +1,12 @@
# How to contribute?
import Admonition from "@theme/Admonition";
👋 Hello there! We welcome contributions from developers of all levels to our open-source project on [GitHub](https://github.com/logspace-ai/langflow). If you'd like to contribute, please check our contributing guidelines and help make Langflow more accessible.
# How to Contribute?
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
👋 Hello there! We welcome contributions from developers of all levels to our open-source project on [GitHub](https://github.com/langflow-ai/langflow). If you'd like to contribute, please check our contributing guidelines and help make Langflow more accessible.
As an open-source project in a rapidly developing field, we are extremely open
to contributions, whether in the form of a new feature, improved infra, or better documentation.
@ -10,13 +16,15 @@ To contribute to this project, please follow a ["fork and pull request"](https:/
Please do not try to push directly to this repo unless you are a maintainer.
---
## Local development
You can develop Langflow using docker compose, or locally.
We provide a .vscode/launch.json file for debugging the backend in VSCode, which is a lot faster than using docker compose.
We provide a `.vscode/launch.json` file for debugging the backend in VSCode, which is a lot faster than using docker compose.
Setting up hooks:
```bash
make init
```
@ -48,7 +56,6 @@ And the frontend:
make frontend
```
---
## Docker compose

View file

@ -0,0 +1,51 @@
import Admonition from "@theme/Admonition";
# Telemetry
Our system uses anonymous telemetry to collect essential usage statistics to enhance functionality and user experience. This data helps us identify commonly used features and areas needing improvement, ensuring our development efforts align with what you need.
<Admonition type="info">
We respect your privacy and are committed to protecting your data. We do not collect any personal information or sensitive data. All telemetry data is anonymized and used solely for improving Langflow.
You can opt-out of telemetry by setting the `LANGFLOW_DO_NOT_TRACK` or `DO_NOT_TRACK` environment variable to `true` before running Langflow. This will disable telemetry data collection.
</Admonition>
## Data Collected Includes:
### Run
- **IsWebhook**: Indicates whether the operation was triggered via a webhook.
- **Seconds**: Duration in seconds for how long the operation lasted, providing insights into performance.
- **Success**: Boolean value indicating whether the operation was successful, helping identify potential errors or issues.
- **ErrorMessage**: Provides error message details if the operation was unsuccessful, aiding in troubleshooting and enhancements.
### Shutdown
- **Time Running**: Total runtime before shutdown, useful for understanding application lifecycle and optimizing uptime.
### Version
- **Version**: The specific version of Langflow used, which helps in tracking feature adoption and compatibility.
- **Platform**: Operating system of the host machine, which aids in focusing our support for popular platforms like Windows, macOS, and Linux.
- **Python**: The version of Python used, assisting in maintaining compatibility and support for various Python versions.
- **Arch**: Architecture of the system (e.g., x86, ARM), which helps optimize our software for different hardware.
- **AutoLogin**: Indicates whether the auto-login feature is enabled, reflecting user preference settings.
- **CacheType**: Type of caching mechanism used, which impacts performance and efficiency.
- **BackendOnly**: Boolean indicating whether you are running Langflow in a backend-only mode, useful for understanding deployment configurations.
### Playground
- **Seconds**: Duration in seconds for playground execution, offering insights into performance during testing or experimental stages.
- **ComponentCount**: Number of components used in the playground, which helps understand complexity and usage patterns.
- **Success**: Success status of the playground operation, aiding in identifying the stability of experimental features.
### Component
- **Name**: Identifies the component, providing data on which components are most utilized or prone to issues.
- **Seconds**: Time taken by the component to execute, offering performance metrics.
- **Success**: Whether the component operated successfully, which helps in quality control.
- **ErrorMessage**: Details of any errors encountered, crucial for debugging and improvement.
This telemetry data is crucial for enhancing Langflow and ensuring that our development efforts align with your needs.
Your feedback and suggestions are invaluable in shaping the future of Langflow, and we appreciate your support in making Langflow better for everyone.

View file

@ -0,0 +1,129 @@
import Admonition from "@theme/Admonition";
# Backend-only
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
You can run Langflow in `--backend-only` mode to expose your Langflow app as an API, without running the frontend UI.
Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
Langflow will now serve requests to its API without the frontend running.
## Prerequisites
- [Langflow installed](../getting-started/install-langflow)
- [OpenAI API key](https://platform.openai.com)
- [A Langflow flow created](../starter-projects/basic-prompting)
## Download your flow's curl call
1. Click API.
2. Click **curl** > **Copy code** and save the code to your local machine.
It will look something like this:
```curl
curl -X POST \
"http://127.0.0.1:7864/api/v1/run/ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef?stream=false" \
-H 'Content-Type: application/json'\
-d '{"input_value": "message",
"output_type": "chat",
"input_type": "chat",
"tweaks": {
"Prompt-kvo86": {},
"OpenAIModel-MilkD": {},
"ChatOutput-ktwdw": {},
"ChatInput-xXC4F": {}
}}'
```
Note the flow ID of `ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef`. You can find this ID in the UI as well to ensure you're querying the right flow.
## Start Langflow in backend-only mode
1. Stop Langflow with Ctrl+C.
2. Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
Langflow will now serve requests to its API.
3. Run the curl code you copied from the UI.
You should get a result like this:
```bash
{"session_id":"ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880","outputs":[{"inputs":{"input_value":"hi, are you there?"},"outputs":[{"results":{"result":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?"},"artifacts":{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI"},"messages":[{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI","component_id":"ChatOutput-ktwdw"}],"component_display_name":"Chat Output","component_id":"ChatOutput-ktwdw","used_frozen_result":false}]}]}%
```
Again, note that the flow ID matches.
Langflow is receiving your POST request, running the flow, and returning the result, all without running the frontend. Cool!
## Download your flow's Python API call
Instead of using curl, you can download your flow as a Python API call instead.
1. Click API.
2. Click **Python API** > **Copy code** and save the code to your local machine.
The code will look something like this:
```python
import requests
from typing import Optional
BASE_API_URL = "http://127.0.0.1:7864/api/v1/run"
FLOW_ID = "ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef"
# You can tweak the flow by adding a tweaks dictionary
# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}}
def run_flow(message: str,
flow_id: str,
output_type: str = "chat",
input_type: str = "chat",
tweaks: Optional[dict] = None,
api_key: Optional[str] = None) -> dict:
"""
Run a flow with a given message and optional tweaks.
:param message: The message to send to the flow
:param flow_id: The ID of the flow to run
:param tweaks: Optional tweaks to customize the flow
:return: The JSON response from the flow
"""
api_url = f"{BASE_API_URL}/{flow_id}"
payload = {
"input_value": message,
"output_type": output_type,
"input_type": input_type,
}
headers = None
if tweaks:
payload["tweaks"] = tweaks
if api_key:
headers = {"x-api-key": api_key}
response = requests.post(api_url, json=payload, headers=headers)
return response.json()
# Setup any tweaks you want to apply to the flow
message = "message"
print(run_flow(message=message, flow_id=FLOW_ID))
```
3. Run your Python app:
```python
python3 app.py
```
The result is similar to the curl call:
```bash
{'session_id': 'ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880', 'outputs': [{'inputs': {'input_value': 'message'}, 'outputs': [{'results': {'result': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!"}, 'artifacts': {'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI'}, 'messages': [{'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI', 'component_id': 'ChatOutput-ktwdw'}], 'component_display_name': 'Chat Output', 'component_id': 'ChatOutput-ktwdw', 'used_frozen_result': False}]}]}
```
Your Python app POSTs to your Langflow server, and the server runs the flow and returns the result.
See [API](../administration/api) for more ways to interact with your headless Langflow server.

View file

@ -0,0 +1,71 @@
import Admonition from "@theme/Admonition";
# Docker
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
This guide will help you get LangFlow up and running using Docker and Docker Compose.
## Prerequisites
- Docker
- Docker Compose
## Clone repo and build Docker container
1. Clone the LangFlow repository:
```sh
git clone https://github.com/langflow-ai/langflow.git
```
2. Navigate to the `docker_example` directory:
```sh
cd langflow/docker_example
```
3. Run the Docker Compose file:
```sh
docker compose up
```
LangFlow will now be accessible at [http://localhost:7860/](http://localhost:7860/).
## Docker Compose configuration
The Docker Compose configuration spins up two services: `langflow` and `postgres`.
### LangFlow service
The `langflow` service uses the `langflowai/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service.
Environment variables:
- `LANGFLOW_DATABASE_URL`: The connection string for the PostgreSQL database.
- `LANGFLOW_CONFIG_DIR`: The directory where LangFlow stores logs, file storage, monitor data, and secret keys.
Volumes:
- `langflow-data`: This volume is mapped to `/var/lib/langflow` in the container.
### PostgreSQL service
The `postgres` service uses the `postgres:16` Docker image and exposes port 5432.
Environment variables:
- `POSTGRES_USER`: The username for the PostgreSQL database.
- `POSTGRES_PASSWORD`: The password for the PostgreSQL database.
- `POSTGRES_DB`: The name of the PostgreSQL database.
Volumes:
- `langflow-postgres`: This volume is mapped to `/var/lib/postgresql/data` in the container.
## Switch to a specific LangFlow version
If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `langflowai/langflow:latest` to `langflowai/langflow:1.0-alpha`.

View file

@ -1,15 +1,20 @@
import Admonition from "@theme/Admonition";
# Deploy on Google Cloud Platform
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
## Run Langflow from a New Google Cloud Project
This guide will help you set up a Langflow development VM in a Google Cloud Platform project using Google Cloud Shell.
> Note: When Cloud Shell opens, be sure to select **Trust repo**. Some `gcloud` commands might not run in an ephemeral Cloud Shell environment.
## Standard VM
## Standard VM
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial.md)
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial.md)
This script sets up a Debian-based VM with the Langflow package, Nginx, and the necessary configurations to run the Langflow Dev environment.
@ -17,18 +22,18 @@ This script sets up a Debian-based VM with the Langflow package, Nginx, and the
## Spot/Preemptible Instance
[![Open in Cloud Shell - Spot Instance](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/genome21/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial_spot.md)
[![Open in Cloud Shell - Spot Instance](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/genome21/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
When running as a [spot (preemptible) instance](https://cloud.google.com/compute/docs/instances/preemptible), the code and VM will behave the same way as in a regular instance, executing the startup script to configure the environment, install necessary dependencies, and run the Langflow application. However, **due to the nature of spot instances, the VM may be terminated at any time if Google Cloud needs to reclaim the resources**. This makes spot instances suitable for fault-tolerant, stateless, or interruptible workloads that can handle unexpected terminations and restarts.
---
## Pricing (approximate)
> For a more accurate breakdown of costs, please use the [**GCP Pricing Calculator**](https://cloud.google.com/products/calculator)
| Component | Regular Cost (Hourly) | Regular Cost (Monthly) | Spot/Preemptible Cost (Hourly) | Spot/Preemptible Cost (Monthly) | Notes |
| -------------- | --------------------- | ---------------------- | ------------------------------ | ------------------------------- | ----- |
| 100 GB Disk | - | $10/month | - | $10/month | Disk cost remains the same for both regular and Spot/Preemptible VMs |
| VM (n1-standard-4) | $0.15/hr | ~$108/month | ~$0.04/hr | ~$29/month | The VM cost can be significantly reduced using a Spot/Preemptible instance |
| **Total** | **$0.15/hr** | **~$118/month** | **~$0.04/hr** | **~$39/month** | Total costs for running the VM and disk 24/7 for an entire month |
| Component | Regular Cost (Hourly) | Regular Cost (Monthly) | Spot/Preemptible Cost (Hourly) | Spot/Preemptible Cost (Monthly) | Notes |
| ------------------ | --------------------- | ---------------------- | ------------------------------ | ------------------------------- | -------------------------------------------------------------------------- |
| 100 GB Disk | - | $10/month | - | $10/month | Disk cost remains the same for both regular and Spot/Preemptible VMs |
| VM (n1-standard-4) | $0.15/hr | ~$108/month | ~$0.04/hr | ~$29/month | The VM cost can be significantly reduced using a Spot/Preemptible instance |
| **Total** | **$0.15/hr** | **~$118/month** | **~$0.04/hr** | **~$39/month** | Total costs for running the VM and disk 24/7 for an entire month |

View file

@ -0,0 +1,315 @@
# Kubernetes
This guide will help you get LangFlow up and running in Kubernetes cluster, including the following steps:
- Install [LangFlow as IDE](#langflow-ide) in a Kubernetes cluster (for development)
- Install [LangFlow as a standalone application](#langflow-runtime) in a Kubernetes cluster (for production runtime workloads)
## LangFlow (IDE)
This solution is designed to provide a complete environment for developers to create, test, and debug their flows. It includes both the API and the UI.
### Prerequisites
- Kubernetes server
- kubectl
- Helm
### Step 0. Prepare a Kubernetes cluster
We use [Minikube](https://minikube.sigs.k8s.io/docs/start/) for this example, but you can use any Kubernetes cluster.
1. Create a Kubernetes cluster on Minikube.
```shell
minikube start
```
2. Set `kubectl` to use Minikube.
```shell
kubectl config use-context minikube
```
### Step 1. Install the LangFlow Helm chart
1. Add the repository to Helm.
```shell
helm repo add langflow https://langflow-ai.github.io/langflow-helm-charts
helm repo update
```
2. Install LangFlow with the default options in the `langflow` namespace.
```shell
helm install langflow-ide langflow/langflow-ide -n langflow --create-namespace
```
3. Check the status of the pods
```shell
kubectl get pods -n langflow
```
```
NAME READY STATUS RESTARTS AGE
langflow-0 1/1 Running 0 33s
langflow-frontend-5d9c558dbb-g7tc9 1/1 Running 0 38s
```
### Step 2. Access LangFlow
Enable local port forwarding to access LangFlow from your local machine.
```shell
kubectl port-forward -n langflow svc/langflow-langflow-runtime 7860:7860
```
Now you can access LangFlow at [http://localhost:7860/](http://localhost:7860/).
### LangFlow version
To specify a different LangFlow version, you can set the `langflow.backend.image.tag` and `langflow.frontend.image.tag` values in the `values.yaml` file.
```yaml
langflow:
backend:
image:
tag: "1.0.0a59"
frontend:
image:
tag: "1.0.0a59"
```
### Storage
By default, the chart will use a SQLLite database stored in a local persistent disk.
If you want to use an external PostgreSQL database, you can set the `langflow.database` values in the `values.yaml` file.
```yaml
# Deploy postgresql. You can skip this section if you have an existing postgresql database.
postgresql:
enabled: true
fullnameOverride: "langflow-ide-postgresql-service"
auth:
username: "langflow"
password: "langflow-postgres"
database: "langflow-db"
langflow:
backend:
externalDatabase:
enabled: true
driver:
value: "postgresql"
host:
value: "langflow-ide-postgresql-service"
port:
value: "5432"
database:
value: "langflow-db"
user:
value: "langflow"
password:
valueFrom:
secretKeyRef:
key: "password"
name: "langflow-ide-postgresql-service"
sqlite:
enabled: false
```
### Scaling
You can scale the number of replicas for the LangFlow backend and frontend services by changing the `replicaCount` value in the `values.yaml` file.
```yaml
langflow:
backend:
replicaCount: 3
frontend:
replicaCount: 3
```
You can scale frontend and backend services independently.
To scale vertically (increase the resources for the pods), you can set the `resources` values in the `values.yaml` file.
```yaml
langflow:
backend:
resources:
requests:
memory: "2Gi"
cpu: "1000m"
frontend:
resources:
requests:
memory: "1Gi"
cpu: "1000m"
```
### Deploy on AWS EKS, Google GKE, or Azure AKS and other examples
Visit the [LangFlow Helm Charts repository](https://github.com/langflow-ai/langflow-helm-charts) for more examples and configurations.
Use the [default values file](https://github.com/langflow-ai/langflow-helm-charts/blob/main/charts/langflow-ide/values.yaml) as reference for all the options available.
Visit the [examples directory](https://github.com/langflow-ai/langflow-helm-charts/tree/main/examples/langflow-ide) to learn more about different deployment options.
## LangFlow (Runtime)
The runtime chart is tailored for deploying applications in a production environment. It is focused on stability, performance, isolation and security to ensure that applications run reliably and efficiently.
Using a dedicated deployment for a set of flows is fundamental in production environments in order to have a granular resource control.
## Import a flow
There are two ways to import a flow (or multiple flows) and deploy it with the LangFlow runtime Helm chart:
1. **From a remote location**: You can reference a flow stored in a remote location, such as a URL or a Git repository by customizing the `values.yaml` file in the section `downloadFlows`:
```yaml
downloadFlows:
flows:
- url: https://raw.githubusercontent.com/langflow-ai/langflow/dev/src/backend/base/langflow/initial_setup/starter_projects/Basic%20Prompting%20(Hello%2C%20world!).json
```
When the LangFlow runtime starts, it will download the flow from the specified URL and run it.
The flow UUID to use to call the API endpoints is the same as the one in the JSON file under the `id` field.
You can also specify a `endpoint_name` field to give a friendly name to the flow.
2. **Packaging the flow as docker image**: You can add a flow from to a docker image based on Langflow runtime and refer to it in the chart.
First you need a base Dockerfile to get the langflow image and add your local flows:
```Dockerfile
FROM langflowai/langflow-backend:latest
RUN mkdir /app/flows
COPY ./*json /app/flows/.
```
Then you can build the image and push it to DockerHub (or any registry you prefer):
```bash
# Create the Dockerfile
echo """FROM langflowai/langflow-backend:latest
RUN mkdir /app/flows
ENV LANGFLOW_LOAD_FLOWS_PATH=/app/flows
COPY ./*json /app/flows/.""" > Dockerfile
# Download the flows
wget https://raw.githubusercontent.com/langflow-ai/langflow/dev/src/backend/base/langflow/initial_setup/starter_projects/Basic%20Prompting%20(Hello%2C%20world!).json
# Build the docker image locally
docker build -t myuser/langflow-just-chat:1.0.0 -f Dockerfile .
# Push the image to DockerHub
docker push myuser/langflow-just-chat:1.0.0
```
### Prerequisites
- Kubernetes server
- kubectl
- Helm
### Step 0. Prepare a Kubernetes cluster
Follow the same steps as for the LangFlow IDE.
### Step 1. Install the LangFlow runtime Helm chart
1. Add the repository to Helm.
```shell
helm repo add langflow https://langflow-ai.github.io/langflow-helm-charts
helm repo update
```
2. Install the LangFlow app with the default options in the `langflow` namespace.
If you bundled the flow in a docker image, you can specify the image name in the `values.yaml` file or with the `--set` flag:
```shell
helm install my-langflow-app langflow/langflow-runtime -n langflow --create-namespace --set image.repository=myuser/langflow-just-chat --set image.tag=1.0.0
```
If you want to download the flow from a remote location, you can specify the URL in the `values.yaml` file or with the `--set` flag:
```shell
helm install my-langflow-app langflow/langflow-runtime -n langflow --create-namespace --set downloadFlows.flows[0].url=https://raw.githubusercontent.com/langflow-ai/langflow/dev/src/backend/base/langflow/initial_setup/starter_projects/Basic%20Prompting%20(Hello%2C%20world!).json
```
3. Check the status of the pods
```shell
kubectl get pods -n langflow
```
### Step 2. Access the LangFlow app API
Enable local port forwarding to access LangFlow from your local machine.
```shell
kubectl port-forward -n langflow svc/langflow-my-langflow-app 7860:7860
```
Now you can access the API at [http://localhost:7860/api/v1/flows](http://localhost:7860/api/v1/flows) and execute the flow:
```bash
id=$(curl -s http://localhost:7860/api/v1/flows | jq -r '.flows[0].id')
curl -X POST \
"http://localhost:7860/api/v1/run/$id?stream=false" \
-H 'Content-Type: application/json'\
-d '{
"input_value": "Hello!",
"output_type": "chat",
"input_type": "chat"
}'
```
### Storage
In this case, the storage is not needed as our deployment is stateless.
### Log level and LangFlow configurations
You can set the log level and other LangFlow configurations in the `values.yaml` file.
```yaml
env:
- name: LANGFLOW_LOG_LEVEL
value: "INFO"
```
### Configure secrets and variables
In order to inject secrets and LangFlow global variables, you can use the `secrets` and `env` sections in the `values.yaml` file.
Let's say your flow uses a global variable which is a secret; when you export the flow as JSON, it's recommended to not include it.
When importing the flow in the LangFlow runtime, you can set the global variable using the `env` section in the `values.yaml` file.
Assuming you have a global variable called `openai_key_var`, you can read it directly from a secret:
```yaml
env:
- name: openai_key_var
valueFrom:
secretKeyRef:
name: openai-key
key: openai-key
```
or directly from the values file (not recommended for secret values!):
```yaml
env:
- name: openai_key_var
value: "sk-...."
```
### Scaling
You can scale the number of replicas for the LangFlow app by changing the `replicaCount` value in the `values.yaml` file.
```yaml
replicaCount: 3
```
To scale vertically (increase the resources for the pods), you can set the `resources` values in the `values.yaml` file.
```yaml
resources:
requests:
memory: "2Gi"
cpu: "1000m"
```
### Other examples
Visit the [LangFlow Helm Charts repository](https://github.com/langflow-ai/langflow-helm-charts) for more examples and configurations.
Use the [default values file](https://github.com/langflow-ai/langflow-helm-charts/blob/main/charts/langflow-runtime/values.yaml) as reference for all the options available.
Visit the [examples directory](https://github.com/langflow-ai/langflow-helm-charts/tree/main/examples/langflow-runtime) to learn more about different deployment options.

View file

@ -1,29 +0,0 @@
import Admonition from "@theme/Admonition";
# Buffer Memory
For certain applications, retaining past interactions is crucial. For that, chains and agents may accept a memory component as one of their input parameters. The `ConversationBufferMemory` component is one of them. It stores messages and extracts them into variables.
## ⛓️ Langflow Example
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/buffer-memory.png",
dark: "img/buffer-memory.png",
}}
/>
#### <a target="\_blank" href="json_files/Buffer_Memory.json" download>Download Flow</a>
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`ConversationBufferMemory`](https://python.langchain.com/docs/modules/memory/types/buffer)
- [`ConversationChain`](https://python.langchain.com/docs/modules/chains/)
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)
</Admonition>

View file

@ -0,0 +1,21 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Chat Memory
<Admonition type="warning" title="warning">
This page may contain outdated information. It will be updated as soon as possible.
</Admonition>
The **Chat Memory** component restores previous messages given a Session ID, which can be any string.
This component is available under the **Helpers** tab of the Langflow sidebar.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/chat_memory.mp4" />
</div>

Some files were not shown because too many files have changed in this diff Show more