Merge remote-tracking branch 'origin/dev' into two_edges
This commit is contained in:
commit
79e35dc2a2
1180 changed files with 74386 additions and 33420 deletions
|
|
@ -3,7 +3,7 @@
|
|||
{
|
||||
"name": "Langflow Dev Container",
|
||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||
"image": "mcr.microsoft.com/devcontainers/python:1-3.10-bullseye",
|
||||
"image": "mcr.microsoft.com/devcontainers/python:3.10",
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
"features": {
|
||||
|
|
|
|||
|
|
@ -1,7 +0,0 @@
|
|||
.venv/
|
||||
**/aws
|
||||
# node_modules
|
||||
**/node_modules/
|
||||
dist/
|
||||
**/build/
|
||||
src/backend/langflow/frontend
|
||||
29
.env.example
29
.env.example
|
|
@ -4,6 +4,19 @@
|
|||
# Do not commit .env file to git
|
||||
# Do not change .env.example file
|
||||
|
||||
# Config directory
|
||||
# Directory where files, logs and database will be stored
|
||||
# Example: LANGFLOW_CONFIG_DIR=~/.langflow
|
||||
LANGFLOW_CONFIG_DIR=
|
||||
|
||||
# Save database in the config directory
|
||||
# Values: true, false
|
||||
# If false, the database will be saved in Langflow's root directory
|
||||
# This means that the database will be deleted when Langflow is uninstalled
|
||||
# and that the database will not be shared between different virtual environments
|
||||
# Example: LANGFLOW_SAVE_DB_IN_CONFIG_DIR=true
|
||||
LANGFLOW_SAVE_DB_IN_CONFIG_DIR=
|
||||
|
||||
# Database URL
|
||||
# Postgres example: LANGFLOW_DATABASE_URL=postgresql://postgres:postgres@localhost:5432/langflow
|
||||
# SQLite example:
|
||||
|
|
@ -56,6 +69,12 @@ LANGFLOW_REMOVE_API_KEYS=
|
|||
# LANGFLOW_REDIS_CACHE_EXPIRE (default: 3600)
|
||||
LANGFLOW_CACHE_TYPE=
|
||||
|
||||
# Set AUTO_LOGIN to false if you want to disable auto login
|
||||
# and use the login form to login. LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD
|
||||
# must be set if AUTO_LOGIN is set to false
|
||||
# Values: true, false
|
||||
LANGFLOW_AUTO_LOGIN=
|
||||
|
||||
# Superuser username
|
||||
# Example: LANGFLOW_SUPERUSER=admin
|
||||
LANGFLOW_SUPERUSER=
|
||||
|
|
@ -64,14 +83,18 @@ LANGFLOW_SUPERUSER=
|
|||
# Example: LANGFLOW_SUPERUSER_PASSWORD=123456
|
||||
LANGFLOW_SUPERUSER_PASSWORD=
|
||||
|
||||
# Should store environment variables in the database
|
||||
# Values: true, false
|
||||
LANGFLOW_STORE_ENVIRONMENT_VARIABLES=
|
||||
|
||||
# STORE_URL
|
||||
# Example: LANGFLOW_STORE_URL=https://api.langflow.store
|
||||
LANGFLOW_STORE_URL=
|
||||
# LANGFLOW_STORE_URL=
|
||||
|
||||
# DOWNLOAD_WEBHOOK_URL
|
||||
#
|
||||
LANGFLOW_DOWNLOAD_WEBHOOK_URL=
|
||||
# LANGFLOW_DOWNLOAD_WEBHOOK_URL=
|
||||
|
||||
# LIKE_WEBHOOK_URL
|
||||
#
|
||||
LANGFLOW_LIKE_WEBHOOK_URL=
|
||||
# LANGFLOW_LIKE_WEBHOOK_URL=
|
||||
90
.eslintrc.json
Normal file
90
.eslintrc.json
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
{
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:react/recommended",
|
||||
"plugin:prettier/recommended"
|
||||
],
|
||||
"plugins": [
|
||||
"react",
|
||||
"import-helpers",
|
||||
"prettier"
|
||||
],
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"parserOptions": {
|
||||
"project": [
|
||||
"./tsconfig.node.json",
|
||||
"./tsconfig.json"
|
||||
],
|
||||
"extraFileExtensions:": [
|
||||
".mdx"
|
||||
],
|
||||
"extensions:": [
|
||||
".mdx"
|
||||
]
|
||||
},
|
||||
"env": {
|
||||
"browser": true,
|
||||
"es2021": true
|
||||
},
|
||||
"settings": {
|
||||
"react": {
|
||||
"version": "detect"
|
||||
}
|
||||
},
|
||||
"rules": {
|
||||
"no-console": "warn",
|
||||
"no-self-assign": "warn",
|
||||
"no-self-compare": "warn",
|
||||
"complexity": [
|
||||
"error",
|
||||
{
|
||||
"max": 15
|
||||
}
|
||||
],
|
||||
"indent": [
|
||||
"error",
|
||||
2,
|
||||
{
|
||||
"SwitchCase": 1
|
||||
}
|
||||
],
|
||||
"no-dupe-keys": "error",
|
||||
"no-invalid-regexp": "error",
|
||||
"no-undef": "error",
|
||||
"no-return-assign": "error",
|
||||
"no-redeclare": "error",
|
||||
"no-empty": "error",
|
||||
"no-await-in-loop": "error",
|
||||
"react/react-in-jsx-scope": 0,
|
||||
"node/exports-style": [
|
||||
"error",
|
||||
"module.exports"
|
||||
],
|
||||
"node/file-extension-in-import": [
|
||||
"error",
|
||||
"always"
|
||||
],
|
||||
"node/prefer-global/buffer": [
|
||||
"error",
|
||||
"always"
|
||||
],
|
||||
"node/prefer-global/console": [
|
||||
"error",
|
||||
"always"
|
||||
],
|
||||
"node/prefer-global/process": [
|
||||
"error",
|
||||
"always"
|
||||
],
|
||||
"node/prefer-global/url-search-params": [
|
||||
"error",
|
||||
"always"
|
||||
],
|
||||
"node/prefer-global/url": [
|
||||
"error",
|
||||
"always"
|
||||
],
|
||||
"node/prefer-promises/dns": "error",
|
||||
"node/prefer-promises/fs": "error"
|
||||
}
|
||||
}
|
||||
94
.github/actions/poetry_caching/action.yml
vendored
Normal file
94
.github/actions/poetry_caching/action.yml
vendored
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
# An action for setting up poetry install with caching.
|
||||
# Using a custom action since the default action does not
|
||||
# take poetry install groups into account.
|
||||
# Action code from:
|
||||
# https://github.com/actions/setup-python/issues/505#issuecomment-1273013236
|
||||
# Copy of https://github.com/langchain-ai/langchain/blob/2f8dd1a1619f25daa4737df4d378b1acd6ff83c4/.github/actions/poetry_setup/action.yml
|
||||
name: poetry-install-with-caching
|
||||
description: Poetry install with support for caching of dependency groups.
|
||||
|
||||
inputs:
|
||||
python-version:
|
||||
description: Python version, supporting MAJOR.MINOR only
|
||||
required: true
|
||||
|
||||
poetry-version:
|
||||
description: Poetry version
|
||||
required: true
|
||||
|
||||
cache-key:
|
||||
description: Cache key to use for manual handling of caching
|
||||
required: true
|
||||
|
||||
working-directory:
|
||||
description: Directory whose poetry.lock file should be cached
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/setup-python@v5
|
||||
name: Setup python ${{ inputs.python-version }}
|
||||
id: setup-python
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- uses: actions/cache@v4
|
||||
id: cache-bin-poetry
|
||||
name: Cache Poetry binary - Python ${{ inputs.python-version }}
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "1"
|
||||
with:
|
||||
path: |
|
||||
/opt/pipx/venvs/poetry
|
||||
# This step caches the poetry installation, so make sure it's keyed on the poetry version as well.
|
||||
key: bin-poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-${{ inputs.poetry-version }}
|
||||
|
||||
- name: Refresh shell hashtable and fixup softlinks
|
||||
if: steps.cache-bin-poetry.outputs.cache-hit == 'true'
|
||||
shell: bash
|
||||
env:
|
||||
POETRY_VERSION: ${{ inputs.poetry-version }}
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
run: |
|
||||
set -eux
|
||||
|
||||
# Refresh the shell hashtable, to ensure correct `which` output.
|
||||
hash -r
|
||||
|
||||
# `actions/cache@v3` doesn't always seem able to correctly unpack softlinks.
|
||||
# Delete and recreate the softlinks pipx expects to have.
|
||||
rm /opt/pipx/venvs/poetry/bin/python
|
||||
cd /opt/pipx/venvs/poetry/bin
|
||||
ln -s "$(which "python$PYTHON_VERSION")" python
|
||||
chmod +x python
|
||||
cd /opt/pipx_bin/
|
||||
ln -s /opt/pipx/venvs/poetry/bin/poetry poetry
|
||||
chmod +x poetry
|
||||
|
||||
# Ensure everything got set up correctly.
|
||||
/opt/pipx/venvs/poetry/bin/python --version
|
||||
/opt/pipx_bin/poetry --version
|
||||
|
||||
- name: Install poetry
|
||||
if: steps.cache-bin-poetry.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
env:
|
||||
POETRY_VERSION: ${{ inputs.poetry-version }}
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
# Install poetry using the python version installed by setup-python step.
|
||||
run: pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose
|
||||
|
||||
- name: Restore pip and poetry cached dependencies
|
||||
uses: actions/cache@v4
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "4"
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pip
|
||||
~/.cache/pypoetry/virtualenvs
|
||||
~/.cache/pypoetry/cache
|
||||
~/.cache/pypoetry/artifacts
|
||||
${{ env.WORKDIR }}/.venv
|
||||
key: py-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles(format('{0}/**/poetry.lock', env.WORKDIR)) }}
|
||||
44
.github/workflows/ci.yml
vendored
44
.github/workflows/ci.yml
vendored
|
|
@ -1,44 +0,0 @@
|
|||
name: "Async API tests"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
pull_request:
|
||||
branches:
|
||||
- dev
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up Docker
|
||||
run: docker --version && docker-compose --version
|
||||
|
||||
- name: "Create env file"
|
||||
working-directory: ./deploy
|
||||
run: |
|
||||
echo "${{ secrets.ENV_FILE }}" > .env
|
||||
|
||||
- name: Build and start services
|
||||
|
||||
working-directory: ./deploy
|
||||
run: docker compose up --exit-code-from tests tests result_backend broker celeryworker db --build
|
||||
continue-on-error: true
|
||||
|
||||
# - name: Stop services
|
||||
# run: docker compose down
|
||||
64
.github/workflows/create-release.yml
vendored
Normal file
64
.github/workflows/create-release.yml
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
name: Create Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: "Version to release"
|
||||
required: true
|
||||
type: string
|
||||
release_type:
|
||||
description: "Type of release (base or main)"
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- base
|
||||
- main
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.2"
|
||||
jobs:
|
||||
release:
|
||||
name: Build Langflow
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.check-version.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install poetry
|
||||
run: pipx install poetry==$POETRY_VERSION
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
cache: "poetry"
|
||||
- name: Build project for distribution
|
||||
run: |
|
||||
if [ "${{ inputs.release_type }}" == "base" ]; then
|
||||
make build base=true
|
||||
else
|
||||
make build main=true
|
||||
fi
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist${{ inputs.release_type }}
|
||||
path: ${{ inputs.release_type == 'base' && 'src/backend/base/dist' || 'dist' }}
|
||||
create_release:
|
||||
name: Create Release Job
|
||||
runs-on: ubuntu-latest
|
||||
needs: release
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: dist${{ inputs.release_type }}
|
||||
path: dist
|
||||
- name: Create Release Notes
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: true
|
||||
prerelease: true
|
||||
tag: v${{ inputs.version }}
|
||||
commit: dev
|
||||
2
.github/workflows/deploy_gh-pages.yml
vendored
2
.github/workflows/deploy_gh-pages.yml
vendored
|
|
@ -27,7 +27,7 @@ jobs:
|
|||
# Popular action to deploy to GitHub Pages:
|
||||
# Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
uses: peaceiris/actions-gh-pages@v4
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Build output to publish to the `gh-pages` branch:
|
||||
|
|
|
|||
54
.github/workflows/docker-build.yml
vendored
Normal file
54
.github/workflows/docker-build.yml
vendored
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
name: Docker Build and Push
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
version:
|
||||
required: true
|
||||
type: string
|
||||
release_type:
|
||||
required: true
|
||||
type: string
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
required: true
|
||||
type: string
|
||||
release_type:
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- base
|
||||
- main
|
||||
|
||||
jobs:
|
||||
docker_build:
|
||||
name: Build Docker Image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set Dockerfile and Tags
|
||||
id: set-vars
|
||||
run: |
|
||||
if [ "${{ inputs.release_type }}" == "base" ]; then
|
||||
echo "DOCKERFILE=./docker/build_and_push_base.Dockerfile" >> $GITHUB_ENV
|
||||
echo "TAGS=langflowai/langflow:base-${{ inputs.version }}" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKERFILE=./docker/build_and_push.Dockerfile" >> $GITHUB_ENV
|
||||
echo "TAGS=langflowai/langflow:${{ inputs.version }},langflowai/langflow:1.0-alpha" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: ${{ env.DOCKERFILE }}
|
||||
tags: ${{ env.TAGS }}
|
||||
29
.github/workflows/lint.yml
vendored
29
.github/workflows/lint.yml
vendored
|
|
@ -14,7 +14,7 @@ on:
|
|||
- "src/backend/**"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.0"
|
||||
POETRY_VERSION: "1.8.2"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
|
|
@ -22,22 +22,29 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.12"
|
||||
- "3.11"
|
||||
- "3.10"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry==$POETRY_VERSION
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_caching"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: poetry
|
||||
- name: Install dependencies
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
poetry env use ${{ matrix.python-version }}
|
||||
poetry install
|
||||
- name: Analysing the code with our lint
|
||||
- name: Get .mypy_cache to speed up mypy
|
||||
uses: actions/cache@v4
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
|
||||
with:
|
||||
path: |
|
||||
./.mypy_cache
|
||||
key: ${{ runner.os }}-mypy-${{ hashFiles('**/pyproject.toml') }}
|
||||
- name: Lint check
|
||||
run: |
|
||||
make lint
|
||||
|
|
|
|||
77
.github/workflows/pre-release-base.yml
vendored
Normal file
77
.github/workflows/pre-release-base.yml
vendored
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
name: Langflow Base Pre-release
|
||||
run-name: Langflow Base Pre-release by @${{ github.actor }}
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_package:
|
||||
description: "Release package"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.2"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release Langflow Base
|
||||
if: inputs.release_package == true
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.check-version.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install poetry
|
||||
run: pipx install poetry==$POETRY_VERSION
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "poetry"
|
||||
- name: Check Version
|
||||
id: check-version
|
||||
# In this step, we should check the version of the package
|
||||
# and see if it is a version that is already released
|
||||
# echo version=$(cd src/backend/base && poetry version --short) >> $GITHUB_OUTPUT
|
||||
# cd src/backend/base && poetry version --short should
|
||||
# be different than the last release version in pypi
|
||||
# which we can get from curl -s "https://pypi.org/pypi/langflow/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1
|
||||
run: |
|
||||
version=$(cd src/backend/base && poetry version --short)
|
||||
last_released_version=$(curl -s "https://pypi.org/pypi/langflow-base/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
|
||||
if [ "$version" = "$last_released_version" ]; then
|
||||
echo "Version $version is already released. Skipping release."
|
||||
exit 1
|
||||
else
|
||||
echo version=$version >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Build project for distribution
|
||||
run: make build base=true
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: |
|
||||
make publish base=true
|
||||
docker_build:
|
||||
name: Build Docker Image
|
||||
runs-on: ubuntu-latest
|
||||
needs: release
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: ./docker/build_and_push_base.Dockerfile
|
||||
tags: |
|
||||
langflowai/langflow:base-${{ needs.release.outputs.version }}
|
||||
104
.github/workflows/pre-release-langflow.yml
vendored
Normal file
104
.github/workflows/pre-release-langflow.yml
vendored
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
name: Langflow Pre-release
|
||||
run-name: Langflow Pre-release by @${{ github.actor }}
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_package:
|
||||
description: "Release package"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
workflow_run:
|
||||
workflows: ["pre-release-base"]
|
||||
types: [completed]
|
||||
branches: [dev]
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.2"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release Langflow
|
||||
if: inputs.release_package == true
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.check-version.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install poetry
|
||||
run: pipx install poetry==$POETRY_VERSION
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "poetry"
|
||||
- name: Check Version
|
||||
id: check-version
|
||||
run: |
|
||||
version=$(poetry version --short)
|
||||
last_released_version=$(curl -s "https://pypi.org/pypi/langflow/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
|
||||
if [ "$version" = "$last_released_version" ]; then
|
||||
echo "Version $version is already released. Skipping release."
|
||||
exit 1
|
||||
else
|
||||
echo version=$version >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Build project for distribution
|
||||
run: make build main=true
|
||||
- name: Display pyproject.toml langflow-base Version
|
||||
run: cat pyproject.toml | grep langflow-base
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: |
|
||||
make publish main=true
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist
|
||||
|
||||
docker_build:
|
||||
name: Build Docker Image
|
||||
runs-on: ubuntu-latest
|
||||
needs: release
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: ./docker/build_and_push.Dockerfile
|
||||
tags: |
|
||||
langflowai/langflow:${{ needs.release.outputs.version }}
|
||||
langflowai/langflow:1.0-alpha
|
||||
|
||||
create_release:
|
||||
name: Create Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [docker_build, release]
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: true
|
||||
prerelease: true
|
||||
tag: v${{ needs.release.outputs.version }}
|
||||
commit: dev
|
||||
113
.github/workflows/pre-release.yml
vendored
113
.github/workflows/pre-release.yml
vendored
|
|
@ -1,22 +1,31 @@
|
|||
name: pre-release
|
||||
|
||||
name: Langflow Pre-release (Unified)
|
||||
run-name: Langflow (${{inputs.release_type}}) Pre-release by @${{ github.actor }}
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "pyproject.toml"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_package:
|
||||
description: "Release package"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
release_type:
|
||||
description: "Type of release (base or main)"
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- base
|
||||
- main
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.5.1"
|
||||
POETRY_VERSION: "1.8.2"
|
||||
|
||||
jobs:
|
||||
if_release:
|
||||
if: ${{ (github.event.pull_request.merged == true) && contains(github.event.pull_request.labels.*.name, 'pre-release') }}
|
||||
release:
|
||||
name: Release Langflow
|
||||
if: inputs.release_package == true
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.check-version.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install poetry
|
||||
|
|
@ -26,12 +35,63 @@ jobs:
|
|||
with:
|
||||
python-version: "3.10"
|
||||
cache: "poetry"
|
||||
- name: Build project for distribution
|
||||
run: make build
|
||||
- name: Check Version
|
||||
id: check-version
|
||||
run: |
|
||||
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||
if [ "${{ inputs.release_type }}" == "base" ]; then
|
||||
version=$(cd src/backend/base && poetry version --short)
|
||||
last_released_version=$(curl -s "https://pypi.org/pypi/langflow-base/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
|
||||
else
|
||||
version=$(poetry version --short)
|
||||
last_released_version=$(curl -s "https://pypi.org/pypi/langflow/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
|
||||
fi
|
||||
if [ "$version" = "$last_released_version" ]; then
|
||||
echo "Version $version is already released. Skipping release."
|
||||
exit 1
|
||||
else
|
||||
echo version=$version >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Build project for distribution
|
||||
run: |
|
||||
if [ "${{ inputs.release_type }}" == "base" ]; then
|
||||
make build base=true
|
||||
else
|
||||
make build main=true
|
||||
fi
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: |
|
||||
if [ "${{ inputs.release_type }}" == "base" ]; then
|
||||
make publish base=true
|
||||
else
|
||||
make publish main=true
|
||||
fi
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist${{ inputs.release_type }}
|
||||
path: ${{ inputs.release_type == 'base' && 'src/backend/base/dist' || 'dist' }}
|
||||
|
||||
call_docker_build:
|
||||
name: Call Docker Build Workflow
|
||||
needs: release
|
||||
uses: langflow-ai/langflow/.github/workflows/docker-build.yml@dev
|
||||
with:
|
||||
version: ${{ needs.release.outputs.version }}
|
||||
release_type: ${{ inputs.release_type }}
|
||||
secrets: inherit
|
||||
|
||||
create_release:
|
||||
name: Create Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [release]
|
||||
if: ${{ inputs.release_type == 'main' }}
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: dist${{ inputs.release_type }}
|
||||
path: dist
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
|
|
@ -40,26 +100,5 @@ jobs:
|
|||
draft: false
|
||||
generateReleaseNotes: true
|
||||
prerelease: true
|
||||
tag: v${{ steps.check-version.outputs.version }}
|
||||
tag: v${{ needs.release.outputs.version }}
|
||||
commit: dev
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: |
|
||||
poetry publish
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: ./build_and_push.Dockerfile
|
||||
tags: logspace/langflow:${{ steps.check-version.outputs.version }}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ on:
|
|||
- "src/backend/**"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.5.0"
|
||||
POETRY_VERSION: "1.8.2"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
|
@ -23,21 +23,23 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.10"
|
||||
- "3.12"
|
||||
- "3.11"
|
||||
- "3.10"
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install poetry
|
||||
run: pipx install poetry==$POETRY_VERSION
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_caching"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: "poetry"
|
||||
- name: Install dependencies
|
||||
run: poetry install
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
poetry env use ${{ matrix.python-version }}
|
||||
poetry install
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
make tests
|
||||
make tests args="-n auto"
|
||||
26
.github/workflows/release.yml
vendored
26
.github/workflows/release.yml
vendored
|
|
@ -10,7 +10,7 @@ on:
|
|||
- "pyproject.toml"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.5.1"
|
||||
POETRY_VERSION: "1.8.2"
|
||||
|
||||
jobs:
|
||||
if_release:
|
||||
|
|
@ -31,15 +31,6 @@ jobs:
|
|||
id: check-version
|
||||
run: |
|
||||
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: true
|
||||
tag: v${{ steps.check-version.outputs.version }}
|
||||
commit: main
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
|
@ -59,7 +50,16 @@ jobs:
|
|||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: ./build_and_push.Dockerfile
|
||||
file: ./docker/build_and_push.Dockerfile
|
||||
tags: |
|
||||
logspace/langflow:${{ steps.check-version.outputs.version }}
|
||||
logspace/langflow:latest
|
||||
langflowai/langflow:${{ steps.check-version.outputs.version }}
|
||||
langflowai/langflow:latest
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: true
|
||||
tag: v${{ steps.check-version.outputs.version }}
|
||||
commit: main
|
||||
|
|
|
|||
130
.github/workflows/typescript_test.yml
vendored
Normal file
130
.github/workflows/typescript_test.yml
vendored
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
name: Run Frontend Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/frontend/**"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.2"
|
||||
NODE_VERSION: "21"
|
||||
PYTHON_VERSION: "3.12"
|
||||
# Define the directory where Playwright browsers will be installed.
|
||||
# Adjust if your project uses a different path.
|
||||
PLAYWRIGHT_BROWSERS_PATH: "ms-playwright"
|
||||
|
||||
jobs:
|
||||
setup-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
shardIndex: [1, 2, 3, 4]
|
||||
shardTotal: [4]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
id: setup-node
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Cache Node.js dependencies
|
||||
uses: actions/cache@v4
|
||||
id: npm-cache
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('src/frontend/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install Node.js dependencies
|
||||
run: |
|
||||
cd src/frontend
|
||||
npm ci
|
||||
if: ${{ steps.setup-node.outputs.cache-hit != 'true' }}
|
||||
|
||||
- name: Cache playwright binaries
|
||||
uses: actions/cache@v4
|
||||
id: playwright-cache
|
||||
with:
|
||||
path: |
|
||||
~/.cache/ms-playwright
|
||||
key: ${{ runner.os }}-playwright-${{ hashFiles('src/frontend/package-lock.json') }}
|
||||
- name: Install Frontend dependencies
|
||||
run: |
|
||||
cd src/frontend
|
||||
npm ci
|
||||
|
||||
- name: Install Playwright's browser binaries
|
||||
run: |
|
||||
cd src/frontend
|
||||
npx playwright install --with-deps
|
||||
if: steps.playwright-cache.outputs.cache-hit != 'true'
|
||||
- name: Install Playwright's dependencies
|
||||
run: |
|
||||
cd src/frontend
|
||||
npx playwright install-deps
|
||||
if: steps.playwright-cache.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_caching"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
poetry env use ${{ env.PYTHON_VERSION }}
|
||||
poetry install
|
||||
|
||||
- name: create .env
|
||||
run: |
|
||||
touch .env
|
||||
echo "${{ secrets.ENV_VARS }}" > .env
|
||||
|
||||
- name: Run Playwright Tests
|
||||
run: |
|
||||
cd src/frontend
|
||||
npx playwright test --shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }} --workers 2
|
||||
|
||||
- name: Upload blob report to GitHub Actions Artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: blob-report-${{ matrix.shardIndex }}
|
||||
path: src/frontend/blob-report
|
||||
retention-days: 1
|
||||
|
||||
merge-reports:
|
||||
needs: setup-and-test
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Download blob reports from GitHub Actions Artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: all-blob-reports
|
||||
pattern: blob-report-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Merge into HTML Report
|
||||
run: |
|
||||
npx playwright merge-reports --reporter html ./all-blob-reports
|
||||
|
||||
- name: Upload HTML report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: html-report--attempt-${{ github.run_attempt }}
|
||||
path: playwright-report
|
||||
retention-days: 14
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
|
|
@ -258,5 +258,11 @@ langflow.db
|
|||
|
||||
/tmp/*
|
||||
src/backend/langflow/frontend/
|
||||
src/backend/base/langflow/frontend/
|
||||
.docker
|
||||
scratchpad*
|
||||
chroma*/*
|
||||
stuff/*
|
||||
src/frontend/playwright-report/index.html
|
||||
*.bak
|
||||
prof/*
|
||||
47
.pre-commit-config.yaml
Normal file
47
.pre-commit-config.yaml
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
fail_fast: true
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/mirrors-eslint
|
||||
rev: "v9.1.1"
|
||||
hooks:
|
||||
- id: eslint
|
||||
files: \.[jt]sx?$ # *.js, *.jsx, *.ts and *.tsx
|
||||
types: [file]
|
||||
args: ["--fix", "--no-warn-ignored"]
|
||||
additional_dependencies:
|
||||
- eslint@9.1.1
|
||||
- eslint-plugin-prettier
|
||||
- eslint-config-prettier
|
||||
- prettier
|
||||
- eslint-plugin-react@latest
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.1.0
|
||||
hooks:
|
||||
- id: check-case-conflict
|
||||
- id: end-of-file-fixer
|
||||
# python, js and ts only
|
||||
files: \.(py|js|ts)$
|
||||
- id: mixed-line-ending
|
||||
files: \.(py|js|ts)$
|
||||
args:
|
||||
- --fix=lf
|
||||
- id: trailing-whitespace
|
||||
- id: pretty-format-json
|
||||
exclude: ^tsconfig.*.json
|
||||
args:
|
||||
- --autofix
|
||||
- --indent=4
|
||||
- --no-sort-keys
|
||||
- id: check-merge-conflict
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.4.2
|
||||
hooks:
|
||||
# Run the linter.
|
||||
- id: ruff
|
||||
# Python
|
||||
files: \.py$
|
||||
types: [file]
|
||||
# Run the formatter.
|
||||
- id: ruff-format
|
||||
files: \.py$
|
||||
types: [file]
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
# Read the Docs configuration file for Sphinx projects
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Set the OS, Python version and other tools you might need
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
# You can also specify other tool versions:
|
||||
# nodejs: "19"
|
||||
# rust: "1.64"
|
||||
# golang: "1.19"
|
||||
|
||||
# Build documentation in the "docs/" directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
# Optionally build your docs in additional formats such as PDF and ePub
|
||||
# formats:
|
||||
# - pdf
|
||||
# - epub
|
||||
|
||||
# Optional but recommended, declare the Python requirements required
|
||||
# to build your documentation
|
||||
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
|
||||
# python:
|
||||
# install:
|
||||
# - requirements: docs/requirements.txt
|
||||
23
.vscode/launch.json
vendored
23
.vscode/launch.json
vendored
|
|
@ -13,10 +13,29 @@
|
|||
"7860",
|
||||
"--reload",
|
||||
"--log-level",
|
||||
"debug"
|
||||
"debug",
|
||||
"--loop",
|
||||
"asyncio"
|
||||
],
|
||||
"jinja": true,
|
||||
"justMyCode": true,
|
||||
"justMyCode": false,
|
||||
"env": {
|
||||
"LANGFLOW_LOG_LEVEL": "debug"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/.env"
|
||||
},
|
||||
{
|
||||
"name": "Debug CLI",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"module": "langflow",
|
||||
"args": [
|
||||
"run",
|
||||
"--path",
|
||||
"${workspaceFolder}/src/backend/base/langflow/frontend"
|
||||
],
|
||||
"jinja": true,
|
||||
"justMyCode": false,
|
||||
"env": {
|
||||
"LANGFLOW_LOG_LEVEL": "debug"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -17,23 +17,23 @@ diverse, inclusive, and healthy community.
|
|||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
- Demonstrating empathy and kindness toward other people
|
||||
- Being respectful of differing opinions, viewpoints, and experiences
|
||||
- Giving and gracefully accepting constructive feedback
|
||||
- Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
- Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
- The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
- Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
- Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
|
@ -60,7 +60,7 @@ representative at an online or offline event.
|
|||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
contact@logspace.ai.
|
||||
contact@langflow.org.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
|
|
@ -106,7 +106,7 @@ Violating these terms may lead to a permanent ban.
|
|||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
|
|
|
|||
|
|
@ -16,12 +16,12 @@ The branch structure is as follows:
|
|||
|
||||
## 🚩GitHub Issues
|
||||
|
||||
Our [issues](https://github.com/logspace-ai/langflow/issues) page is kept up to date
|
||||
Our [issues](https://github.com/langflow-ai/langflow/issues) page is kept up to date
|
||||
with bugs, improvements, and feature requests. There is a taxonomy of labels to help
|
||||
with sorting and discovery of issues of interest.
|
||||
|
||||
If you're looking for help with your code, consider posting a question on the
|
||||
[GitHub Discussions board](https://github.com/logspace-ai/langflow/discussions). Please
|
||||
[GitHub Discussions board](https://github.com/langflow-ai/langflow/discussions). Please
|
||||
understand that we won't be able to provide individual support via email. We
|
||||
also believe that help is much more valuable if it's **shared publicly**,
|
||||
so that more people can benefit from it.
|
||||
|
|
@ -40,7 +40,7 @@ so that more people can benefit from it.
|
|||
|
||||
## Issue labels
|
||||
|
||||
[See this page](https://github.com/logspace-ai/langflow/labels) for an overview of
|
||||
[See this page](https://github.com/langflow-ai/langflow/labels) for an overview of
|
||||
the system we use to tag our issues and pull requests.
|
||||
|
||||
## Local development
|
||||
|
|
|
|||
2
LICENSE
2
LICENSE
|
|
@ -1,6 +1,6 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2023 Logspace
|
||||
Copyright (c) 2024 Langflow
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
175
Makefile
175
Makefile
|
|
@ -1,6 +1,37 @@
|
|||
.PHONY: all init format lint build build_frontend install_frontend run_frontend run_backend dev help tests coverage
|
||||
|
||||
all: help
|
||||
log_level ?= debug
|
||||
host ?= 0.0.0.0
|
||||
port ?= 7860
|
||||
env ?= .env
|
||||
open_browser ?= true
|
||||
path = src/backend/base/langflow/frontend
|
||||
|
||||
codespell:
|
||||
@poetry install --with spelling
|
||||
poetry run codespell --toml pyproject.toml
|
||||
|
||||
fix_codespell:
|
||||
@poetry install --with spelling
|
||||
poetry run codespell --toml pyproject.toml --write
|
||||
|
||||
setup_poetry:
|
||||
pipx install poetry
|
||||
|
||||
add:
|
||||
@echo 'Adding dependencies'
|
||||
ifdef devel
|
||||
cd src/backend/base && poetry add --group dev $(devel)
|
||||
endif
|
||||
|
||||
ifdef main
|
||||
poetry add $(main)
|
||||
endif
|
||||
|
||||
ifdef base
|
||||
cd src/backend/base && poetry add $(base)
|
||||
endif
|
||||
|
||||
init:
|
||||
@echo 'Installing backend dependencies'
|
||||
|
|
@ -16,24 +47,23 @@ coverage:
|
|||
|
||||
# allow passing arguments to pytest
|
||||
tests:
|
||||
@make install_backend
|
||||
|
||||
poetry run pytest tests --instafail $(args)
|
||||
# Use like:
|
||||
|
||||
format:
|
||||
poetry run ruff . --fix
|
||||
poetry run ruff check . --fix
|
||||
poetry run ruff format .
|
||||
cd src/frontend && npm run format
|
||||
|
||||
lint:
|
||||
make install_backend
|
||||
poetry run mypy src/backend/langflow
|
||||
poetry run ruff . --fix
|
||||
poetry run mypy --namespace-packages -p "langflow"
|
||||
|
||||
install_frontend:
|
||||
cd src/frontend && npm install
|
||||
|
||||
install_frontendci:
|
||||
cd src/frontend && npm ci
|
||||
|
||||
install_frontendc:
|
||||
cd src/frontend && rm -rf node_modules package-lock.json && npm install
|
||||
|
||||
|
|
@ -43,22 +73,57 @@ run_frontend:
|
|||
|
||||
tests_frontend:
|
||||
ifeq ($(UI), true)
|
||||
cd src/frontend && ./run-tests.sh --ui
|
||||
cd src/frontend && npx playwright test --ui --project=chromium
|
||||
else
|
||||
cd src/frontend && ./run-tests.sh
|
||||
cd src/frontend && npx playwright test --project=chromium
|
||||
endif
|
||||
|
||||
run_cli:
|
||||
poetry run langflow run --path src/frontend/build
|
||||
@echo 'Running the CLI'
|
||||
@make install_frontend > /dev/null
|
||||
@echo 'Install backend dependencies'
|
||||
@make install_backend > /dev/null
|
||||
@echo 'Building the frontend'
|
||||
@make build_frontend > /dev/null
|
||||
ifdef env
|
||||
@make start env=$(env) host=$(host) port=$(port) log_level=$(log_level)
|
||||
else
|
||||
@make start host=$(host) port=$(port) log_level=$(log_level)
|
||||
endif
|
||||
|
||||
run_cli_debug:
|
||||
poetry run langflow run --path src/frontend/build --log-level debug
|
||||
@echo 'Running the CLI in debug mode'
|
||||
@make install_frontend > /dev/null
|
||||
@echo 'Building the frontend'
|
||||
@make build_frontend > /dev/null
|
||||
@echo 'Install backend dependencies'
|
||||
@make install_backend > /dev/null
|
||||
ifdef env
|
||||
@make start env=$(env) host=$(host) port=$(port) log_level=debug
|
||||
else
|
||||
@make start host=$(host) port=$(port) log_level=debug
|
||||
endif
|
||||
|
||||
start:
|
||||
@echo 'Running the CLI'
|
||||
|
||||
ifeq ($(open_browser),false)
|
||||
@make install_backend && poetry run langflow run --path $(path) --log-level $(log_level) --host $(host) --port $(port) --env-file $(env) --no-open-browser
|
||||
else
|
||||
@make install_backend && poetry run langflow run --path $(path) --log-level $(log_level) --host $(host) --port $(port) --env-file $(env)
|
||||
endif
|
||||
|
||||
|
||||
|
||||
setup_devcontainer:
|
||||
make init
|
||||
make build_frontend
|
||||
poetry run langflow --path src/frontend/build
|
||||
|
||||
setup_env:
|
||||
@sh ./scripts/setup/update_poetry.sh 1.8.2
|
||||
@sh ./scripts/setup/setup_env.sh
|
||||
|
||||
frontend:
|
||||
make install_frontend
|
||||
make run_frontend
|
||||
|
|
@ -68,38 +133,70 @@ frontendc:
|
|||
make run_frontend
|
||||
|
||||
install_backend:
|
||||
poetry install --extras deploy
|
||||
@echo 'Installing backend dependencies'
|
||||
@poetry install
|
||||
@poetry run pre-commit install
|
||||
|
||||
backend:
|
||||
@echo 'Setting up the environment'
|
||||
@make setup_env
|
||||
make install_backend
|
||||
@-kill -9 `lsof -t -i:7860`
|
||||
ifeq ($(login),1)
|
||||
@echo "Running backend without autologin";
|
||||
poetry run langflow run --backend-only --port 7860 --host 0.0.0.0 --no-open-browser --env-file .env
|
||||
@-kill -9 $(lsof -t -i:7860)
|
||||
ifdef login
|
||||
@echo "Running backend autologin is $(login)";
|
||||
LANGFLOW_AUTO_LOGIN=$(login) poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio
|
||||
else
|
||||
@echo "Running backend with autologin";
|
||||
LANGFLOW_AUTO_LOGIN=True poetry run langflow run --backend-only --port 7860 --host 0.0.0.0 --no-open-browser --env-file .env
|
||||
@echo "Running backend respecting the .env file";
|
||||
poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio
|
||||
endif
|
||||
|
||||
build_and_run:
|
||||
echo 'Removing dist folder'
|
||||
@echo 'Removing dist folder'
|
||||
@make setup_env
|
||||
rm -rf dist
|
||||
make build && poetry run pip install dist/*.tar.gz && poetry run langflow run
|
||||
rm -rf src/backend/base/dist
|
||||
make build
|
||||
poetry run pip install dist/*.tar.gz
|
||||
poetry run langflow run
|
||||
|
||||
build_and_install:
|
||||
echo 'Removing dist folder'
|
||||
@echo 'Removing dist folder'
|
||||
rm -rf dist
|
||||
make build && poetry run pip install dist/*.tar.gz
|
||||
rm -rf src/backend/base/dist
|
||||
make build && poetry run pip install dist/*.whl && pip install src/backend/base/dist/*.whl --force-reinstall
|
||||
|
||||
build_frontend:
|
||||
cd src/frontend && CI='' npm run build
|
||||
cp -r src/frontend/build src/backend/langflow/frontend
|
||||
cp -r src/frontend/build src/backend/base/langflow/frontend
|
||||
|
||||
build:
|
||||
make install_frontend
|
||||
@echo 'Building the project'
|
||||
@make setup_env
|
||||
ifdef base
|
||||
make install_frontendci
|
||||
make build_frontend
|
||||
poetry build --format sdist
|
||||
rm -rf src/backend/langflow/frontend
|
||||
make build_langflow_base
|
||||
endif
|
||||
|
||||
ifdef main
|
||||
make build_langflow
|
||||
endif
|
||||
|
||||
build_langflow_base:
|
||||
cd src/backend/base && poetry build
|
||||
rm -rf src/backend/base/langflow/frontend
|
||||
|
||||
build_langflow_backup:
|
||||
poetry lock && poetry build
|
||||
|
||||
build_langflow:
|
||||
cd ./scripts && poetry run python update_dependencies.py
|
||||
poetry lock
|
||||
poetry build
|
||||
ifdef restore
|
||||
mv pyproject.toml.bak pyproject.toml
|
||||
mv poetry.lock.bak poetry.lock
|
||||
endif
|
||||
|
||||
dev:
|
||||
make install_frontend
|
||||
|
|
@ -111,10 +208,34 @@ else
|
|||
docker compose $(if $(debug),-f docker-compose.debug.yml) up
|
||||
endif
|
||||
|
||||
publish:
|
||||
make build
|
||||
lock_base:
|
||||
cd src/backend/base && poetry lock
|
||||
|
||||
lock_langflow:
|
||||
poetry lock
|
||||
|
||||
lock:
|
||||
# Run both in parallel
|
||||
@echo 'Locking dependencies'
|
||||
cd src/backend/base && poetry lock
|
||||
poetry lock
|
||||
|
||||
publish_base:
|
||||
cd src/backend/base && poetry publish
|
||||
|
||||
publish_langflow:
|
||||
poetry publish
|
||||
|
||||
publish:
|
||||
@echo 'Publishing the project'
|
||||
ifdef base
|
||||
make publish_base
|
||||
endif
|
||||
|
||||
ifdef main
|
||||
make publish_langflow
|
||||
endif
|
||||
|
||||
help:
|
||||
@echo '----'
|
||||
@echo 'format - run code formatters'
|
||||
|
|
|
|||
107
README.md
107
README.md
|
|
@ -1,69 +1,51 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
# ⛓️ Langflow
|
||||
# [](https://www.langflow.org)
|
||||
|
||||
<h3>Discover a simpler & smarter way to build around Foundation Models</h3>
|
||||
### [Langflow](https://www.langflow.org) is a new, visual way to build, iterate and deploy AI apps.
|
||||
|
||||
[](https://github.com/logspace-ai/langflow/releases)
|
||||
[](https://github.com/logspace-ai/langflow/contributors)
|
||||
[](https://github.com/logspace-ai/langflow/last-commit)
|
||||
[](https://github.com/logspace-ai/langflow/issues)
|
||||
[](https://github.com/logspace-ai/langflow/repo-size)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/logspace-ai/langflow)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://star-history.com/#logspace-ai/langflow)
|
||||
[](https://github.com/logspace-ai/langflow/fork)
|
||||
[](https://twitter.com/langflow_ai)
|
||||
[](https://discord.com/invite/EqksyE2EX9)
|
||||
[](https://huggingface.co/spaces/Logspace/Langflow)
|
||||
[](https://codespaces.new/logspace-ai/langflow)
|
||||
# ⚡️ Documentation and Community
|
||||
|
||||
The easiest way to create and customize your flow
|
||||
|
||||
<a href="https://github.com/logspace-ai/langflow">
|
||||
<img width="100%" src="https://github.com/logspace-ai/langflow/blob/dev/docs/static/img/new_langflow_demo.gif"></a>
|
||||
- [Documentation](https://docs.langflow.org)
|
||||
- [Discord](https://discord.com/invite/EqksyE2EX9)
|
||||
|
||||
# 📦 Installation
|
||||
|
||||
### <b>Locally</b>
|
||||
|
||||
You can install Langflow from pip:
|
||||
You can install Langflow with pip:
|
||||
|
||||
```shell
|
||||
# This installs the package without dependencies for local models
|
||||
pip install langflow
|
||||
# Make sure you have Python 3.10 installed on your system.
|
||||
# Install the pre-release version
|
||||
python -m pip install langflow --pre --force-reinstall
|
||||
|
||||
# or stable version
|
||||
python -m pip install langflow -U
|
||||
```
|
||||
|
||||
To use local models (e.g llama-cpp-python) run:
|
||||
Then, run Langflow with:
|
||||
|
||||
```shell
|
||||
pip install langflow[local]
|
||||
python -m langflow run
|
||||
```
|
||||
|
||||
This will install the following dependencies:
|
||||
You can also preview Langflow in [HuggingFace Spaces](https://huggingface.co/spaces/Langflow/Langflow-Preview). [Clone the space using this link](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true), to create your own Langflow workspace in minutes.
|
||||
|
||||
- [CTransformers](https://github.com/marella/ctransformers)
|
||||
- [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
|
||||
- [sentence-transformers](https://github.com/UKPLab/sentence-transformers)
|
||||
# 🎨 Creating Flows
|
||||
|
||||
You can still use models from projects like LocalAI, Ollama, LM Studio, Jan and others.
|
||||
Creating flows with Langflow is easy. Simply drag components from the sidebar onto the canvas and connect them to start building your application.
|
||||
|
||||
Next, run:
|
||||
Explore by editing prompt parameters, grouping components into a single high-level component, and building your own Custom Components.
|
||||
|
||||
```shell
|
||||
python -m langflow
|
||||
Once you’re done, you can export your flow as a JSON file.
|
||||
|
||||
Load the flow with:
|
||||
|
||||
```python
|
||||
from langflow.load import run_flow_from_json
|
||||
|
||||
results = run_flow_from_json("path/to/flow.json", input_value="Hello, World!")
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```shell
|
||||
langflow run # or langflow --help
|
||||
```
|
||||
|
||||
### HuggingFace Spaces
|
||||
|
||||
You can also check it out on [HuggingFace Spaces](https://huggingface.co/spaces/Logspace/Langflow) and run it in your browser! You can even clone it and have your own copy of Langflow to play with.
|
||||
|
||||
# 🖥️ Command Line Interface (CLI)
|
||||
|
||||
Langflow provides a command-line interface (CLI) for easy management and configuration.
|
||||
|
|
@ -83,7 +65,6 @@ Each option is detailed below:
|
|||
- `--workers`: Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`.
|
||||
- `--timeout`: Sets the worker timeout in seconds. The default is `60`.
|
||||
- `--port`: Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`.
|
||||
- `--config`: Defines the path to the configuration file. The default is `config.yaml`.
|
||||
- `--env-file`: Specifies the path to the .env file containing environment variables. The default is `.env`.
|
||||
- `--log-level`: Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`.
|
||||
- `--components-path`: Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`.
|
||||
|
|
@ -114,50 +95,36 @@ Follow our step-by-step guide to deploy Langflow on Google Cloud Platform (GCP)
|
|||
|
||||
Alternatively, click the **"Open in Cloud Shell"** button below to launch Google Cloud Shell, clone the Langflow repository, and start an **interactive tutorial** that will guide you through the process of setting up the necessary resources and deploying Langflow on your GCP project.
|
||||
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
|
||||
|
||||
## Deploy on Railway
|
||||
|
||||
Use this template to deploy Langflow 1.0 Preview on Railway:
|
||||
|
||||
[](https://railway.app/template/UsJ1uB?referralCode=MnPSdg)
|
||||
|
||||
Or this one to deploy Langflow 0.6.x:
|
||||
|
||||
[](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
|
||||
|
||||
## Deploy on Render
|
||||
|
||||
<a href="https://render.com/deploy?repo=https://github.com/logspace-ai/langflow/tree/main">
|
||||
<a href="https://render.com/deploy?repo=https://github.com/langflow-ai/langflow/tree/main">
|
||||
<img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
|
||||
</a>
|
||||
|
||||
# 🎨 Creating Flows
|
||||
|
||||
Creating flows with Langflow is easy. Simply drag components from the sidebar onto the canvas and connect them to start building your application.
|
||||
|
||||
Explore by editing prompt parameters, grouping components into a single high-level component, and building your own Custom Components.
|
||||
|
||||
Once you’re done, you can export your flow as a JSON file.
|
||||
|
||||
Load the flow with:
|
||||
|
||||
```python
|
||||
from langflow import load_flow_from_json
|
||||
|
||||
flow = load_flow_from_json("path/to/flow.json")
|
||||
# Now you can use it
|
||||
flow("Hey, have you heard of Langflow?")
|
||||
```
|
||||
|
||||
# 👋 Contributing
|
||||
|
||||
We welcome contributions from developers of all levels to our open-source project on GitHub. If you'd like to contribute, please check our [contributing guidelines](./CONTRIBUTING.md) and help make Langflow more accessible.
|
||||
|
||||
Join our [Discord](https://discord.com/invite/EqksyE2EX9) server to ask questions, make suggestions, and showcase your projects! 🦾
|
||||
|
||||
---
|
||||
|
||||
[](https://star-history.com/#logspace-ai/langflow&Date)
|
||||
[](https://star-history.com/#langflow-ai/langflow&Date)
|
||||
|
||||
# 🌟 Contributors
|
||||
|
||||
[](https://github.com/logspace-ai/langflow/graphs/contributors)
|
||||
[](https://github.com/langflow-ai/langflow/graphs/contributors)
|
||||
|
||||
# 📄 License
|
||||
|
||||
Langflow is released under the MIT License. See the LICENSE file for details.
|
||||
Langflow is released under the MIT License. See the [LICENSE](LICENSE) file for details.
|
||||
|
|
|
|||
|
|
@ -1,97 +0,0 @@
|
|||
|
||||
|
||||
# syntax=docker/dockerfile:1
|
||||
# Keep this syntax directive! It's used to enable Docker BuildKit
|
||||
|
||||
# Based on https://github.com/python-poetry/poetry/discussions/1879?sort=top#discussioncomment-216865
|
||||
# but I try to keep it updated (see history)
|
||||
|
||||
################################
|
||||
# PYTHON-BASE
|
||||
# Sets up all our shared environment variables
|
||||
################################
|
||||
FROM python:3.10-slim as python-base
|
||||
|
||||
# python
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
# prevents python creating .pyc files
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
\
|
||||
# pip
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=on \
|
||||
PIP_DEFAULT_TIMEOUT=100 \
|
||||
\
|
||||
# poetry
|
||||
# https://python-poetry.org/docs/configuration/#using-environment-variables
|
||||
POETRY_VERSION=1.7.1 \
|
||||
# make poetry install to this location
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
# make poetry create the virtual environment in the project's root
|
||||
# it gets named `.venv`
|
||||
POETRY_VIRTUALENVS_IN_PROJECT=true \
|
||||
# do not ask any interactive question
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
\
|
||||
# paths
|
||||
# this is where our requirements + virtual environment will live
|
||||
PYSETUP_PATH="/opt/pysetup" \
|
||||
VENV_PATH="/opt/pysetup/.venv"
|
||||
|
||||
|
||||
# prepend poetry and venv to path
|
||||
ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH"
|
||||
|
||||
|
||||
################################
|
||||
# BUILDER-BASE
|
||||
# Used to build deps + create our virtual environment
|
||||
################################
|
||||
FROM python-base as builder-base
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
# deps for installing poetry
|
||||
curl \
|
||||
# deps for building python deps
|
||||
build-essential
|
||||
|
||||
|
||||
# install poetry - respects $POETRY_VERSION & $POETRY_HOME
|
||||
# The --mount will mount the buildx cache directory to where
|
||||
# Poetry and Pip store their cache so that they can re-use it
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# copy project requirement files here to ensure they will be cached.
|
||||
WORKDIR $PYSETUP_PATH
|
||||
COPY poetry.lock pyproject.toml ./
|
||||
COPY ./src/backend/langflow/main.py ./src/backend/langflow/main.py
|
||||
# Copy README.md to the build context
|
||||
COPY README.md .
|
||||
# install runtime deps - uses $POETRY_VIRTUALENVS_IN_PROJECT internally
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
poetry install --without dev --extras deploy
|
||||
|
||||
|
||||
################################
|
||||
# DEVELOPMENT
|
||||
# Image used during development / testing
|
||||
################################
|
||||
FROM python-base as development
|
||||
WORKDIR $PYSETUP_PATH
|
||||
|
||||
# copy in our built poetry + venv
|
||||
COPY --from=builder-base $POETRY_HOME $POETRY_HOME
|
||||
COPY --from=builder-base $PYSETUP_PATH $PYSETUP_PATH
|
||||
|
||||
# Copy just one file to avoid rebuilding the whole image
|
||||
COPY ./src/backend/langflow/__init__.py ./src/backend/langflow/__init__.py
|
||||
# quicker install as runtime deps are already installed
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
poetry install --with=dev --extras deploy
|
||||
|
||||
# copy in our app code
|
||||
COPY ./src/backend ./src/backend
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
poetry install --with=dev --extras deploy
|
||||
COPY ./tests ./tests=
|
||||
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
export LANGFLOW_DATABASE_URL="mysql+pymysql://${username}:${password}@${host}:3306/${dbname}"
|
||||
# echo $LANGFLOW_DATABASE_URL
|
||||
uvicorn --factory src.backend.langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --log-level debug
|
||||
|
|
@ -69,10 +69,7 @@ services:
|
|||
- traefik.http.routers.${STACK_NAME?Variable not set}-proxy-http.middlewares=${STACK_NAME?Variable not set}-www-redirect,${STACK_NAME?Variable not set}-https-redirect
|
||||
|
||||
backend: &backend
|
||||
image: "ogabrielluiz/langflow:latest"
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: base.Dockerfile
|
||||
image: "langflowai/langflow:latest"
|
||||
depends_on:
|
||||
- db
|
||||
- broker
|
||||
|
|
@ -143,9 +140,6 @@ services:
|
|||
<<: *backend
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: base.Dockerfile
|
||||
command: celery -A langflow.worker.celery_app worker --loglevel=INFO --concurrency=1 -n lf-worker@%h -P eventlet
|
||||
healthcheck:
|
||||
test: "exit 0"
|
||||
|
|
@ -158,9 +152,6 @@ services:
|
|||
- .env
|
||||
networks:
|
||||
- default
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: base.Dockerfile
|
||||
environment:
|
||||
- FLOWER_PORT=5555
|
||||
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
version: "3.4"
|
||||
|
||||
services:
|
||||
backend:
|
||||
volumes:
|
||||
- ./:/app
|
||||
build:
|
||||
context: ./
|
||||
dockerfile: ./dev.Dockerfile
|
||||
command:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 -m uvicorn --factory src.backend.langflow.main:create_app --host 0.0.0.0 --port 7860 --reload",
|
||||
]
|
||||
ports:
|
||||
- 7860:7860
|
||||
- 5678:5678
|
||||
restart: on-failure
|
||||
|
||||
frontend:
|
||||
build:
|
||||
context: ./src/frontend
|
||||
dockerfile: ./dev.Dockerfile
|
||||
args:
|
||||
- BACKEND_URL=http://backend:7860
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./src/frontend/public:/home/node/app/public
|
||||
- ./src/frontend/src:/home/node/app/src
|
||||
- ./src/frontend/package.json:/home/node/app/package.json
|
||||
restart: on-failure
|
||||
9
docker/.dockerignore
Normal file
9
docker/.dockerignore
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
.venv/
|
||||
**/aws
|
||||
node_modules
|
||||
**/node_modules/
|
||||
dist/
|
||||
**/build/
|
||||
src/backend/langflow/frontend
|
||||
**/langflow-pre.db
|
||||
**/langflow.db
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
# PYTHON-BASE
|
||||
# Sets up all our shared environment variables
|
||||
################################
|
||||
FROM python:3.10-slim as python-base
|
||||
FROM python:3.12-slim as python-base
|
||||
|
||||
# python
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
|
|
@ -23,7 +23,7 @@ ENV PYTHONUNBUFFERED=1 \
|
|||
\
|
||||
# poetry
|
||||
# https://python-poetry.org/docs/configuration/#using-environment-variables
|
||||
POETRY_VERSION=1.7.1 \
|
||||
POETRY_VERSION=1.8.2 \
|
||||
# make poetry install to this location
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
# make poetry create the virtual environment in the project's root
|
||||
|
|
@ -47,7 +47,7 @@ ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH"
|
|||
# Used to build deps + create our virtual environment
|
||||
################################
|
||||
FROM python-base as builder-base
|
||||
RUN
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
# deps for installing poetry
|
||||
|
|
@ -55,25 +55,38 @@ RUN apt-get update \
|
|||
# deps for building python deps
|
||||
build-essential \
|
||||
# npm
|
||||
npm
|
||||
npm \
|
||||
# gcc
|
||||
gcc \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
|
||||
# Now we need to copy the entire project into the image
|
||||
WORKDIR /app
|
||||
COPY pyproject.toml poetry.lock ./
|
||||
COPY src ./src
|
||||
COPY scripts ./scripts
|
||||
COPY Makefile ./
|
||||
COPY README.md ./
|
||||
RUN curl -sSL https://install.python-poetry.org | python3 - && make build
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
RUN useradd -m -u 1000 user && \
|
||||
mkdir -p /app/langflow && \
|
||||
chown -R user:user /app && \
|
||||
chmod -R u+w /app/langflow
|
||||
|
||||
# Final stage for the application
|
||||
FROM python-base as final
|
||||
# Update PATH with home/user/.local/bin
|
||||
ENV PATH="/home/user/.local/bin:${PATH}"
|
||||
RUN python -m pip install requests && cd ./scripts && python update_dependencies.py
|
||||
RUN $POETRY_HOME/bin/poetry lock
|
||||
RUN $POETRY_HOME/bin/poetry build
|
||||
|
||||
# Copy virtual environment and built .tar.gz from builder base
|
||||
COPY --from=builder-base /app/dist/*.tar.gz ./
|
||||
|
||||
USER user
|
||||
# Install the package from the .tar.gz
|
||||
RUN pip install *.tar.gz
|
||||
RUN python -m pip install /app/dist/*.tar.gz --user
|
||||
|
||||
WORKDIR /app
|
||||
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]
|
||||
ENTRYPOINT ["python", "-m", "langflow", "run"]
|
||||
CMD ["--host", "0.0.0.0", "--port", "7860"]
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
# PYTHON-BASE
|
||||
# Sets up all our shared environment variables
|
||||
################################
|
||||
FROM python:3.10-slim as python-base
|
||||
FROM python:3.12-slim as python-base
|
||||
|
||||
# python
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
|
|
@ -23,7 +23,7 @@ ENV PYTHONUNBUFFERED=1 \
|
|||
\
|
||||
# poetry
|
||||
# https://python-poetry.org/docs/configuration/#using-environment-variables
|
||||
POETRY_VERSION=1.5.1 \
|
||||
POETRY_VERSION=1.8.2 \
|
||||
# make poetry install to this location
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
# make poetry create the virtual environment in the project's root
|
||||
|
|
@ -52,41 +52,47 @@ RUN apt-get update \
|
|||
# deps for installing poetry
|
||||
curl \
|
||||
# deps for building python deps
|
||||
build-essential
|
||||
build-essential \
|
||||
# npm
|
||||
npm \
|
||||
# gcc
|
||||
gcc \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# install poetry - respects $POETRY_VERSION & $POETRY_HOME
|
||||
# The --mount will mount the buildx cache directory to where
|
||||
# Poetry and Pip store their cache so that they can re-use it
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# copy project requirement files here to ensure they will be cached.
|
||||
WORKDIR $PYSETUP_PATH
|
||||
COPY ./poetry.lock ./pyproject.toml ./
|
||||
# Copy README.md to the build context
|
||||
COPY ./README.md ./
|
||||
# install runtime deps - uses $POETRY_VIRTUALENVS_IN_PROJECT internally
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
poetry install --without dev --extras deploy
|
||||
# Now we need to copy the entire project into the image
|
||||
COPY pyproject.toml poetry.lock ./
|
||||
COPY src/frontend/package.json /tmp/package.json
|
||||
RUN cd /tmp && npm install
|
||||
WORKDIR /app
|
||||
COPY src/frontend ./src/frontend
|
||||
RUN rm -rf src/frontend/node_modules
|
||||
RUN cp -a /tmp/node_modules /app/src/frontend
|
||||
COPY scripts ./scripts
|
||||
COPY Makefile ./
|
||||
COPY README.md ./
|
||||
RUN cd src/frontend && npm run build
|
||||
COPY src/backend ./src/backend
|
||||
RUN cp -r src/frontend/build src/backend/base/langflow/frontend
|
||||
RUN rm -rf src/backend/base/dist
|
||||
RUN useradd -m -u 1000 user && \
|
||||
mkdir -p /app/langflow && \
|
||||
chown -R user:user /app && \
|
||||
chmod -R u+w /app/langflow
|
||||
|
||||
# Update PATH with home/user/.local/bin
|
||||
ENV PATH="/home/user/.local/bin:${PATH}"
|
||||
RUN cd src/backend/base && $POETRY_HOME/bin/poetry build
|
||||
|
||||
# Copy virtual environment and built .tar.gz from builder base
|
||||
|
||||
USER user
|
||||
# Install the package from the .tar.gz
|
||||
RUN python -m pip install /app/src/backend/base/dist/*.tar.gz --user
|
||||
|
||||
|
||||
################################
|
||||
# DEVELOPMENT
|
||||
# Image used during development / testing
|
||||
################################
|
||||
FROM python-base as development
|
||||
WORKDIR $PYSETUP_PATH
|
||||
|
||||
# copy in our built poetry + venv
|
||||
COPY --from=builder-base $POETRY_HOME $POETRY_HOME
|
||||
COPY --from=builder-base $PYSETUP_PATH $PYSETUP_PATH
|
||||
|
||||
# Copy just one file to avoid rebuilding the whole image
|
||||
COPY ./src/backend/langflow/__init__.py ./src/backend/langflow/__init__.py
|
||||
# quicker install as runtime deps are already installed
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
poetry install --with=dev --extras deploy
|
||||
|
||||
# copy in our app code
|
||||
COPY ./src/backend ./src/backend
|
||||
COPY ./tests ./tests
|
||||
ENTRYPOINT ["python", "-m", "langflow", "run"]
|
||||
CMD ["--host", "0.0.0.0", "--port", "7860"]
|
||||
|
|
@ -13,7 +13,7 @@ services:
|
|||
- "7860:7860"
|
||||
volumes:
|
||||
- ./:/app
|
||||
command: bash -c "uvicorn --factory src.backend.langflow.main:create_app --host 0.0.0.0 --port 7860 --reload"
|
||||
command: bash -c "uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --loop asyncio"
|
||||
networks:
|
||||
- langflow
|
||||
frontend:
|
||||
|
|
@ -23,7 +23,7 @@ services:
|
|||
args:
|
||||
- BACKEND_URL=http://backend:7860
|
||||
depends_on:
|
||||
- backend
|
||||
- backend
|
||||
environment:
|
||||
- VITE_PROXY_TARGET=http://backend:7860
|
||||
ports:
|
||||
|
|
@ -15,6 +15,7 @@ COPY ./ ./
|
|||
# Install dependencies
|
||||
RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi
|
||||
|
||||
RUN poetry add pymysql==1.0.2
|
||||
RUN poetry add botocore
|
||||
RUN poetry add pymysql
|
||||
|
||||
CMD ["sh", "./container-cmd-cdk.sh"]
|
||||
5
docker/container-cmd-cdk.sh
Normal file
5
docker/container-cmd-cdk.sh
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
export LANGFLOW_DATABASE_URL="mysql+pymysql://${username}:${password}@${host}:3306/${dbname}"
|
||||
# echo $LANGFLOW_DATABASE_URL
|
||||
uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --log-level debug --loop asyncio
|
||||
|
||||
# python -m langflow run --host 0.0.0.0 --port 7860
|
||||
|
|
@ -15,4 +15,4 @@ COPY ./ ./
|
|||
# Install dependencies
|
||||
RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi
|
||||
|
||||
CMD ["uvicorn", "--factory", "src.backend.langflow.main:create_app", "--host", "0.0.0.0", "--port", "7860", "--reload", "--log-level", "debug"]
|
||||
CMD ["uvicorn", "--factory", "langflow.main:create_app", "--host", "0.0.0.0", "--port", "7860", "--reload", "--log-level", "debug", "--loop", "asyncio"]
|
||||
|
|
@ -1,15 +1,3 @@
|
|||
FROM python:3.10-slim
|
||||
FROM langflowai/langflow:latest
|
||||
|
||||
RUN apt-get update && apt-get install gcc g++ git make -y && apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN useradd -m -u 1000 user
|
||||
USER user
|
||||
ENV HOME=/home/user \
|
||||
PATH=/home/user/.local/bin:$PATH
|
||||
|
||||
WORKDIR $HOME/app
|
||||
|
||||
COPY --chown=user . $HOME/app
|
||||
|
||||
RUN pip install langflow>==0.5.0 -U --user
|
||||
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]
|
||||
|
|
|
|||
|
|
@ -1,9 +1,65 @@
|
|||
# LangFlow Docker Running
|
||||
# Running LangFlow with Docker
|
||||
|
||||
```sh
|
||||
git clone git@github.com:logspace-ai/langflow.git
|
||||
cd langflow/docker_example
|
||||
docker compose up
|
||||
```
|
||||
This guide will help you get LangFlow up and running using Docker and Docker Compose.
|
||||
|
||||
The web UI will be accessible on port [7860](http://localhost:7860/)
|
||||
## Prerequisites
|
||||
|
||||
- Docker
|
||||
- Docker Compose
|
||||
|
||||
## Steps
|
||||
|
||||
1. Clone the LangFlow repository:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/langflow-ai/langflow.git
|
||||
```
|
||||
|
||||
2. Navigate to the `docker_example` directory:
|
||||
|
||||
```sh
|
||||
cd langflow/docker_example
|
||||
```
|
||||
|
||||
3. Run the Docker Compose file:
|
||||
|
||||
```sh
|
||||
docker compose up
|
||||
```
|
||||
|
||||
LangFlow will now be accessible at [http://localhost:7860/](http://localhost:7860/).
|
||||
|
||||
## Docker Compose Configuration
|
||||
|
||||
The Docker Compose configuration spins up two services: `langflow` and `postgres`.
|
||||
|
||||
### LangFlow Service
|
||||
|
||||
The `langflow` service uses the `langflowai/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service.
|
||||
|
||||
Environment variables:
|
||||
|
||||
- `LANGFLOW_DATABASE_URL`: The connection string for the PostgreSQL database.
|
||||
- `LANGFLOW_CONFIG_DIR`: The directory where LangFlow stores logs, file storage, monitor data, and secret keys.
|
||||
|
||||
Volumes:
|
||||
|
||||
- `langflow-data`: This volume is mapped to `/var/lib/langflow` in the container.
|
||||
|
||||
### PostgreSQL Service
|
||||
|
||||
The `postgres` service uses the `postgres:16` Docker image and exposes port 5432.
|
||||
|
||||
Environment variables:
|
||||
|
||||
- `POSTGRES_USER`: The username for the PostgreSQL database.
|
||||
- `POSTGRES_PASSWORD`: The password for the PostgreSQL database.
|
||||
- `POSTGRES_DB`: The name of the PostgreSQL database.
|
||||
|
||||
Volumes:
|
||||
|
||||
- `langflow-postgres`: This volume is mapped to `/var/lib/postgresql/data` in the container.
|
||||
|
||||
## Switching to a Specific LangFlow Version
|
||||
|
||||
If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `langflowai/langflow:latest` to `langflowai/langflow:1.0-alpha`.
|
||||
|
|
|
|||
|
|
@ -1,10 +1,30 @@
|
|||
version: '3'
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
langflow:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
image: langflowai/langflow:latest
|
||||
ports:
|
||||
- "7860:7860"
|
||||
command: langflow run --host 0.0.0.0
|
||||
depends_on:
|
||||
- postgres
|
||||
environment:
|
||||
- LANGFLOW_DATABASE_URL=postgresql://langflow:langflow@postgres:5432/langflow
|
||||
# This variable defines where the logs, file storage, monitor data and secret keys are stored.
|
||||
- LANGFLOW_CONFIG_DIR=/var/lib/langflow
|
||||
volumes:
|
||||
- langflow-data:/var/lib/langflow
|
||||
|
||||
postgres:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_USER: langflow
|
||||
POSTGRES_PASSWORD: langflow
|
||||
POSTGRES_DB: langflow
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- langflow-postgres:/var/lib/postgresql/data
|
||||
|
||||
volumes:
|
||||
langflow-postgres:
|
||||
langflow-data:
|
||||
|
|
|
|||
3
docker_example/pre.Dockerfile
Normal file
3
docker_example/pre.Dockerfile
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
FROM langflowai/langflow:1.0-alpha
|
||||
|
||||
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]
|
||||
30
docker_example/pre.docker-compose.yml
Normal file
30
docker_example/pre.docker-compose.yml
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
langflow:
|
||||
image: langflowai/langflow:1.0-alpha
|
||||
ports:
|
||||
- "7860:7860"
|
||||
depends_on:
|
||||
- postgres
|
||||
environment:
|
||||
- LANGFLOW_DATABASE_URL=postgresql://langflow:langflow@postgres:5432/langflow
|
||||
# This variable defines where the logs, file storage, monitor data and secret keys are stored.
|
||||
- LANGFLOW_CONFIG_DIR=app/langflow
|
||||
volumes:
|
||||
- langflow-data:/app/langflow
|
||||
|
||||
postgres:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_USER: langflow
|
||||
POSTGRES_PASSWORD: langflow
|
||||
POSTGRES_DB: langflow
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- langflow-postgres:/var/lib/postgresql/data
|
||||
|
||||
volumes:
|
||||
langflow-postgres:
|
||||
langflow-data:
|
||||
|
|
@ -1,18 +1,24 @@
|
|||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# API Keys
|
||||
|
||||
## Introduction
|
||||
Langflow provides an API key functionality that allows users to access their individual components and flows without traditional login authentication. The API key is a user-specific token that can be included in the request header or query parameter to authenticate API calls. This documentation outlines how to generate, use, and manage API keys in Langflow.
|
||||
|
||||
Langflow offers an API Key functionality that allows users to access their individual components and flows without going through traditional login authentication. The API Key is a user-specific token that can be included in the request's header or query parameter to authenticate API calls. The following documentation outlines how to generate, use, and manage these API Keys in Langflow.
|
||||
<Admonition type="warning">
|
||||
The default user and password are set using the LANGFLOW_SUPERUSER and
|
||||
LANGFLOW_SUPERUSER_PASSWORD environment variables.
|
||||
|
||||
The default values are
|
||||
langflow and langflow, respectively.
|
||||
|
||||
</Admonition>
|
||||
|
||||
## Generating an API Key
|
||||
|
||||
### Through Langflow UI
|
||||
|
||||
{/* add image img/api-key.png */}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
|
|
@ -36,7 +42,7 @@ Include the `x-api-key` in the HTTP header when making API requests:
|
|||
|
||||
```bash
|
||||
curl -X POST \
|
||||
http://localhost:3000/api/v1/process/<your_flow_id> \
|
||||
http://localhost:3000/api/v1/run/<your_flow_id> \
|
||||
-H 'Content-Type: application/json'\
|
||||
-H 'x-api-key: <your api key>'\
|
||||
-d '{"inputs": {"text":""}, "tweaks": {}}'
|
||||
|
|
@ -87,7 +93,7 @@ print(run_flow(inputs, flow_id=FLOW_ID, tweaks=TWEAKS, apiKey=api_key))
|
|||
|
||||
### Using the Query Parameter
|
||||
|
||||
Alternatively, you can include the API key as a query parameter in the URL:
|
||||
Include the API key as a query parameter in the URL:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
|
|
@ -140,9 +146,9 @@ print(run_flow(inputs, flow_id=FLOW_ID, tweaks=TWEAKS, apiKey=api_key))
|
|||
|
||||
## Security Considerations
|
||||
|
||||
- **Visibility**: The API key won't be retrievable again through the UI for security reasons.
|
||||
- **Scope**: The key only allows access to the flows and components of the specific user to whom it was issued.
|
||||
- **Visibility**: For security reasons, the API key cannot be retrieved again through the UI.
|
||||
- **Scope**: The key allows access only to the flows and components of the specific user to whom it was issued.
|
||||
|
||||
## Revoking an API Key
|
||||
|
||||
To revoke an API key, simply delete it from the UI. This will immediately invalidate the key and prevent it from being used again.
|
||||
To revoke an API key, delete it from the UI. This action immediately invalidates the key and prevents it from being used again.
|
||||
|
|
@ -78,7 +78,7 @@ The Chat Widget can be embedded into any HTML page, inside a _`<body>`_ tag, as
|
|||
To embed the Chat Widget using React, you'll need to insert this _`<script>`_ tag into the React _index.html_ file, inside the _`<body>`_ tag:
|
||||
|
||||
```html
|
||||
<script src="https://cdn.jsdelivr.net/gh/logspace-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/gh/langflow-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
|
||||
```
|
||||
|
||||
Then, declare your Web Component and encapsulate it in a React component.
|
||||
|
|
@ -115,7 +115,7 @@ Finally, you can place the component anywhere in your code to display the Chat W
|
|||
To use it in Angular, first add this _`<script>`_ tag into the Angular _index.html_ file, inside the _`<body>`_ tag.
|
||||
|
||||
```html
|
||||
<script src="https://cdn.jsdelivr.net/gh/logspace-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/gh/langflow-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
|
||||
```
|
||||
|
||||
When you use a custom web component in an Angular template, the Angular compiler might show a warning when it doesn't recognize the custom elements by default. To suppress this warning, add _`CUSTOM_ELEMENTS_SCHEMA`_ to the module's _`@NgModule.schemas`_.
|
||||
|
|
@ -185,7 +185,7 @@ Use the widget API to customize your Chat Widget:
|
|||
</Admonition>
|
||||
|
||||
| Prop | Type | Required | Description |
|
||||
| --------------------- | ------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| --------------------- | ------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- |
|
||||
| bot_message_style | JSON | No | Applies custom formatting to bot messages. |
|
||||
| chat_input_field | String | Yes | Defines the type of the input field for chat messages. |
|
||||
| chat_inputs | JSON | Yes | Determines the chat input elements and their respective values. |
|
||||
|
|
@ -208,4 +208,4 @@ Use the widget API to customize your Chat Widget:
|
|||
| tweaks | JSON | No | Applies additional custom adjustments for the associated flow. |
|
||||
| user_message_style | JSON | No | Determines the formatting for user messages in the chat window. |
|
||||
| width | Number | No | Sets the width of the chat window in pixels. |
|
||||
| window_title | String | No | Sets the title displayed in the chat window's header or title bar. |
|
||||
| window_title | String | No | Sets the title displayed in the chat window's header or title bar. | import ThemedImage from "@theme/ThemedImage"; |
|
||||
77
docs/docs/administration/cli.mdx
Normal file
77
docs/docs/administration/cli.mdx
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
# Command Line Interface (CLI)
|
||||
|
||||
## Overview
|
||||
|
||||
Langflow's Command Line Interface (CLI) is a powerful tool that allows you to interact with the Langflow server from the command line. The CLI provides a wide range of commands to help you shape Langflow to your needs.
|
||||
|
||||
Running the CLI without any arguments will display a list of available commands and options.
|
||||
|
||||
```bash
|
||||
python -m langflow run --help
|
||||
# or
|
||||
python -m langflow run
|
||||
```
|
||||
|
||||
Each option for `run` command are detailed below:
|
||||
|
||||
- `--help`: Displays all available options.
|
||||
- `--host`: Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`.
|
||||
- `--workers`: Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`.
|
||||
- `--timeout`: Sets the worker timeout in seconds. The default is `60`.
|
||||
- `--port`: Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`.
|
||||
- `--env-file`: Specifies the path to the .env file containing environment variables. The default is `.env`.
|
||||
- `--log-level`: Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`.
|
||||
- `--components-path`: Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`.
|
||||
- `--log-file`: Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`.
|
||||
- `--cache`: Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`.
|
||||
- `--dev/--no-dev`: Toggles the development mode. The default is `no-dev`.
|
||||
- `--path`: Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable.
|
||||
- `--open-browser/--no-open-browser`: Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`.
|
||||
- `--remove-api-keys/--no-remove-api-keys`: Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`.
|
||||
- `--install-completion [bash|zsh|fish|powershell|pwsh]`: Installs completion for the specified shell.
|
||||
- `--show-completion [bash|zsh|fish|powershell|pwsh]`: Shows completion for the specified shell, allowing you to copy it or customize the installation.
|
||||
- `--backend-only`: This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable.
|
||||
- `--store`: This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable.
|
||||
|
||||
These parameters are important for users who need to customize the behavior of Langflow, especially in development or specialized deployment scenarios.
|
||||
|
||||
### API Key Command
|
||||
|
||||
The `api-key` command allows you to create an API key for accessing Langflow's API when `LANGFLOW_AUTO_LOGIN` is set to `True`.
|
||||
|
||||
```bash
|
||||
python -m langflow api-key --help
|
||||
|
||||
Usage: langflow api-key [OPTIONS]
|
||||
|
||||
Creates an API key for the default superuser if AUTO_LOGIN is enabled.
|
||||
Args: log_level (str, optional): Logging level. Defaults to "error".
|
||||
Returns: None
|
||||
|
||||
╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --log-level TEXT Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] │
|
||||
│ --help Show this message and exit. │
|
||||
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
```
|
||||
|
||||
Once you run the `api-key` command, it will create an API key for the default superuser if `LANGFLOW_AUTO_LOGIN` is set to `True`.
|
||||
|
||||
```bash
|
||||
python -m langflow api-key
|
||||
╭─────────────────────────────────────────────────────────────────────╮
|
||||
│ API Key Created Successfully: │
|
||||
│ │
|
||||
│ sk-O0elzoWID1izAH8RUKrnnvyyMwIzHi2Wk-uXWoNJ2Ro │
|
||||
│ │
|
||||
│ This is the only time the API key will be displayed. │
|
||||
│ Make sure to store it in a secure location. │
|
||||
│ │
|
||||
│ The API key has been copied to your clipboard. Cmd + V to paste it. │
|
||||
╰─────────────────────────────────────────────────────────────────────╯
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
You can configure many of the CLI options using environment variables. These can be exported in your operating system or added to a `.env` file and loaded using the `--env-file` option.
|
||||
|
||||
A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence.
|
||||
|
|
@ -26,13 +26,14 @@ Components are the building blocks of the flows. They are made of inputs, output
|
|||
</div>
|
||||
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/single-compenent.png"),
|
||||
dark: useBaseUrl("img/single-compenent.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
|
|
@ -30,7 +30,7 @@ Here is an example:
|
|||
<CH.Code linuNumbers={false}>
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class DocumentProcessor(CustomComponent):
|
||||
|
|
@ -92,7 +92,7 @@ The Python script for every Custom Component should follow a set of rules. Let's
|
|||
The script must contain a **single class** that inherits from _`CustomComponent`_.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
|
|
@ -113,7 +113,7 @@ class MyComponent(CustomComponent):
|
|||
This class requires a _`build`_ method used to run the component and define its fields.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
|
|
@ -131,10 +131,10 @@ class MyComponent(CustomComponent):
|
|||
|
||||
---
|
||||
|
||||
The [Return Type Annotation](https://docs.python.org/3/library/typing.html) of the _`build`_ method defines the component type (e.g., Chain, BaseLLM, or basic Python types). Check out all supported types in the [component reference](../components/custom).
|
||||
The [Return Type Annotation](https://docs.python.org/3/library/typing.html) of the _`build`_ method defines the component type (e.g., Chain, BaseLanguageModel, or basic Python types). Check out all supported types in the [component reference](../components/custom).
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
|
|
@ -153,7 +153,7 @@ class MyComponent(CustomComponent):
|
|||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
|
|
@ -179,7 +179,7 @@ Check out the [component reference](../components/custom) for more details on th
|
|||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
|
|
@ -204,7 +204,7 @@ Let's create a custom component that processes a document (_`langchain.schema.Do
|
|||
To start, let's choose a name for our component by adding a _`display_name`_ attribute. This name will appear on the canvas. The name of the class is not relevant, but let's call it _`DocumentProcessor`_.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
# focus
|
||||
|
|
@ -227,7 +227,7 @@ class DocumentProcessor(CustomComponent):
|
|||
We can also write a description for it using a _`description`_ attribute.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class DocumentProcessor(CustomComponent):
|
||||
|
|
@ -244,7 +244,7 @@ class DocumentProcessor(CustomComponent):
|
|||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class DocumentProcessor(CustomComponent):
|
||||
|
|
@ -283,11 +283,11 @@ The return type is _`Document`_.
|
|||
The _`build_config`_ method is here defined to customize the component fields.
|
||||
|
||||
- _`options`_ determines that the field will be a dropdown menu. The list values and field type must be _`str`_.
|
||||
- _`value`_ is the default option of the dropdown menu.
|
||||
- _`value`_ is the default value of the field.
|
||||
- _`display_name`_ is the name of the field to be displayed.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class DocumentProcessor(CustomComponent):
|
||||
|
|
@ -366,7 +366,7 @@ For advanced customization, Langflow offers the option to create and load custom
|
|||
|
||||
### Folder Structure
|
||||
|
||||
Create a folder that follows the same structural conventions as the [config.yaml](https://github.com/logspace-ai/langflow/blob/dev/src/backend/langflow/config.yaml) file. Inside this main directory, use a `custom_components` subdirectory for your custom components.
|
||||
Create a folder that follows the same structural conventions as the [config.yaml](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/config.yaml) file. Inside this main directory, use a `custom_components` subdirectory for your custom components.
|
||||
|
||||
Inside `custom_components`, you can create a Python file for each component. Similarly, any custom agents should be housed in an `agents` subdirectory.
|
||||
|
||||
|
|
@ -391,13 +391,13 @@ The recommended way to load custom components is to set the _`LANGFLOW_COMPONENT
|
|||
|
||||
```bash
|
||||
export LANGFLOW_COMPONENTS_PATH='["/path/to/components"]'
|
||||
langflow
|
||||
langflow run
|
||||
```
|
||||
|
||||
Alternatively, you can specify the path to your custom components using the _`--components-path`_ argument when running the Langflow CLI, as shown below:
|
||||
|
||||
```bash
|
||||
langflow --components-path /path/to/components
|
||||
langflow run --components-path /path/to/components
|
||||
```
|
||||
|
||||
Langflow will attempt to load all of the components found in the specified directory. If a component fails to load due to errors in the component's code, Langflow will print an error message to the console but will continue loading the rest of the components.
|
||||
|
|
@ -406,4 +406,5 @@ Langflow will attempt to load all of the components found in the specified direc
|
|||
|
||||
Once your custom components have been loaded successfully, they will appear in Langflow's sidebar. From there, you can add them to your Langflow canvas for use. However, please note that components with errors will not be available for addition to the canvas. Always ensure your code is error-free before attempting to load components.
|
||||
|
||||
Remember, creating custom components allows you to extend the functionality of Langflow to better suit your unique needs. Happy coding!
|
||||
Remember, creating custom components allows you to extend the functionality of Langflow to better suit your unique needs. Happy coding!import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
|
@ -1,9 +1,3 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Features
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
|
|
@ -14,13 +8,14 @@ import Admonition from "@theme/Admonition";
|
|||
</div>
|
||||
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/features.png"),
|
||||
dark: useBaseUrl("img/features.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
|
|
@ -46,14 +41,12 @@ The Code button shows snippets to use your flow as a Python object or an API.
|
|||
|
||||
**Python Code**
|
||||
|
||||
Through the Langflow package, you can load a flow from a JSON file and use it as a LangChain object.
|
||||
Through the Langflow package, you can run your flow from a JSON file. The example below shows how to run a flow from a JSON file.
|
||||
|
||||
```py
|
||||
from langflow import load_flow_from_json
|
||||
```python
|
||||
from langflow.load import run_flow_from_json
|
||||
|
||||
flow = load_flow_from_json("path/to/flow.json")
|
||||
# Now you can use it like any chain
|
||||
flow("Hey, have you heard of Langflow?")
|
||||
results = run_flow_from_json("path/to/flow.json", input_value="Hello, World!")
|
||||
```
|
||||
|
||||
**API**
|
||||
|
|
@ -67,3 +60,9 @@ The example below shows a Python script making a POST request to a local API end
|
|||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_api.mp4" />
|
||||
</div>
|
||||
|
||||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
54
docs/docs/administration/global-env.mdx
Normal file
54
docs/docs/administration/global-env.mdx
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
import ReactPlayer from "react-player";
|
||||
|
||||
# Global Environment Variables
|
||||
|
||||
Langflow 1.0 alpha includes the option to add **Global Environment Variables** for your application.
|
||||
|
||||
## Add a global variable to a project
|
||||
|
||||
In this example, you'll add the `openai_api_key` credential as a global environment variable to the **Basic Prompting** starter project.
|
||||
|
||||
For more information on the starter flow, see [Basic prompting](../starter-projects/basic-prompting.mdx).
|
||||
|
||||
1. From the Langflow dashboard, click **New Project**.
|
||||
2. Select **Basic Prompting**.
|
||||
|
||||
The **Basic Prompting** flow is created.
|
||||
|
||||
3. To create an environment variable for the **OpenAI** component:
|
||||
1. In the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
|
||||
2. In the **Variable Name** field, enter `openai_api_key`.
|
||||
3. In the **Value** field, paste your OpenAI API Key (`sk-...`).
|
||||
4. For the variable **Type**, select **Credential**.
|
||||
5. In the **Apply to Fields** field, select **OpenAI API Key** to apply this variable to all fields named **OpenAI API Key**.
|
||||
6. Click **Save Variable**.
|
||||
|
||||
You now have a `openai_api_key` global environment variable for your Langflow project.
|
||||
|
||||
<Admonition type="tip">
|
||||
You can also create global variables in **Settings** > **Variables and
|
||||
Secrets**.
|
||||
</Admonition>
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/global-env.png",
|
||||
dark: "img/global-env.png",
|
||||
}}
|
||||
style={{ width: "40%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
4. To view and manage your project's global environment variables, visit **Settings** > **Variables and Secrets**.
|
||||
|
||||
For more on variables in HuggingFace Spaces, see [Managing Secrets](https://huggingface.co/docs/hub/spaces-overview#managing-secrets).
|
||||
|
||||
## Video
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_global_variables.mp4" />
|
||||
</div>
|
||||
|
|
@ -4,7 +4,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Sign up and Sign in
|
||||
# Sign Up and Sign In
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
@ -105,7 +105,7 @@ Users can change their profile settings by clicking on the profile icon in the t
|
|||
light: useBaseUrl("img/my-account.png"),
|
||||
dark: useBaseUrl("img/my-account.png"),
|
||||
}}
|
||||
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
|
||||
style={{ width: "50%", maxWidth: "600px", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
By clicking on **Profile Settings**, the user is taken to the profile settings page, where they can change their password and their profile picture.
|
||||
|
|
@ -116,10 +116,11 @@ By clicking on **Profile Settings**, the user is taken to the profile settings p
|
|||
light: useBaseUrl("img/profile-settings.png"),
|
||||
dark: useBaseUrl("img/profile-settings.png"),
|
||||
}}
|
||||
style={{ maxWidth: "600px", margin: "0 auto" }}
|
||||
style={{ maxWidth: "600px", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
By clicking on **Admin Page**, the superuser is taken to the admin page, where they can manage users and groups.
|
||||
By clicking on **Admin Page**, the superuser is taken to the admin page, where they
|
||||
can manage users and groups.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
37
docs/docs/administration/playground.mdx
Normal file
37
docs/docs/administration/playground.mdx
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Playground
|
||||
|
||||
In Langflow 1.0 alpha, the **Playground** replaces the **Interaction Panel**.
|
||||
|
||||
The **Playground** provides an interface for interacting with flows without opening them in the flow editor.
|
||||
|
||||
It even works for flows hosted on the Langflow store!
|
||||
|
||||
As long as you have a flow's environment variables set, you can run it by clicking the **Playground** button.
|
||||
|
||||
1. From your **Collections** page, click the **Playground** in one of your flows.
|
||||
The **Playground** window opens.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/playground-chat.png"),
|
||||
dark: useBaseUrl("img/playground-chat.png"),
|
||||
}}
|
||||
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
2. Chat with your bot as you normally would, all without having to open the editor.
|
||||
|
||||
## Video
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_playground.mp4" />
|
||||
</div>
|
||||
|
|
@ -8,84 +8,83 @@ import Admonition from '@theme/Admonition';
|
|||
</p>
|
||||
</Admonition>
|
||||
|
||||
|
||||
Agents are components that use reasoning to make decisions and take actions, designed to autonomously perform tasks or provide services with some degree of “freedom” (or agency). They combine the power of LLM chaining processes with access to external tools such as APIs to interact with applications and accomplish tasks.
|
||||
Agents are components that use reasoning to make decisions and take actions, designed to autonomously perform tasks or provide services with some degree of agency. LLM chains can only perform hardcoded sequences of actions, while agents use LLMs to reason through which actions to take, and in which order.
|
||||
|
||||
---
|
||||
|
||||
### AgentInitializer
|
||||
|
||||
The `AgentInitializer` component is a quick way to construct a zero-shot agent from a language model (LLM) and tools.
|
||||
The `AgentInitializer` constructs a zero-shot agent from a language model (LLM) and additional tools.
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **LLM:** Language Model to use in the `AgentInitializer`.
|
||||
- **Memory:** Used to add memory functionality to an agent. It allows the agent to store and retrieve information from previous conversations.
|
||||
- **Tools:** Tools that the agent will have access to.
|
||||
- **Agent:** The type of agent to be instantiated. Current supported: `zero-shot-react-description`, `react-docstore`, `self-ask-with-search,conversational-react-description` and `openai-functions`.
|
||||
- **LLM:** The language model used by the `AgentInitializer`.
|
||||
- **Memory:** Enables memory functionality, allowing the agent to recall and use information from previous interactions.
|
||||
- **Tools:** The tools available to the agent.
|
||||
- **Agent:** Specifies the type of agent to instantiate. Currently supported types include `zero-shot-react-description`, `react-docstore`, `self-ask-with-search`, `conversational-react-description`, and `openai-functions`.
|
||||
|
||||
---
|
||||
|
||||
### CSVAgent
|
||||
|
||||
A `CSVAgent` is an agent that is designed to interact with CSV (Comma-Separated Values) files. CSV files are a common format for storing tabular data, where each row represents a record and each column represents a field. The CSV agent can perform various tasks, such as reading and writing CSV files, processing the data, and generating tables. It can extract information from the CSV file, manipulate the data, and perform operations like filtering, sorting, and aggregating.
|
||||
The `CSVAgent` interacts with CSV (Comma-Separated Values) files, commonly used to store tabular data. Each row in a CSV file represents a record, and each column represents a field. The CSV agent can read and write CSV files, process data, and perform tasks such as filtering, sorting, and aggregating.
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **LLM:** Language Model to use in the `CSVAgent`.
|
||||
- **path:** The file path to the CSV data.
|
||||
- **LLM:** The language model used by the `CSVAgent`.
|
||||
- **Path:** The file path to the CSV data.
|
||||
|
||||
---
|
||||
|
||||
### JSONAgent
|
||||
|
||||
The `JSONAgent` deals with JSON (JavaScript Object Notation) data. Similar to the CSVAgent, it works with a language model (LLM) and a toolkit designed for JSON manipulation. This agent can iteratively explore a JSON blob to find the information needed to answer the user's question. It can list keys, get values, and navigate through the structure of the JSON object.
|
||||
The `JSONAgent` manages JSON (JavaScript Object Notation) data. This agent, like the CSVAgent, uses a language model (LLM) and a toolkit for JSON manipulation. It can explore a JSON blob to extract needed information, list keys, retrieve values, and navigate through the JSON structure.
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **LLM:** Language Model to use in the `JSONAgent`.
|
||||
- **Toolkit:** Toolkit that the agent will have access to.
|
||||
- **LLM:** The language model used by the `JSONAgent`.
|
||||
- **Toolkit:** The toolkit available to the agent.
|
||||
|
||||
---
|
||||
|
||||
### SQLAgent
|
||||
|
||||
A `SQLAgent` is an agent that is designed to interact with SQL databases. It is capable of performing various tasks, such as querying the database, retrieving data, and executing SQL statements. The agent can provide information about the structure of the database, including the tables and their schemas. It can also perform operations like inserting, updating, and deleting data in the database. The SQL agent is a helpful tool for managing and working with SQL databases efficiently.
|
||||
The `SQLAgent` interacts with SQL databases, capable of querying, retrieving data, and executing SQL statements. It provides insights into the database structure, including tables and schemas, and can perform operations such as insertions, updates, and deletions.
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **LLM:** Language Model to use in the `SQLAgent`.
|
||||
- **database_uri:** A string representing the connection URI for the SQL database.
|
||||
- **LLM:** The language model used by the `SQLAgent`.
|
||||
- **Database URI:** The connection URI for the SQL database.
|
||||
|
||||
---
|
||||
|
||||
### VectorStoreAgent
|
||||
|
||||
The `VectorStoreAgent` is designed to work with a vector store – a data structure used for storing and querying vector-based representations of data. The `VectorStoreAgent` can query the vector store to find relevant information based on user inputs.
|
||||
The `VectorStoreAgent` operates with a vector store, which is a data structure for storing and querying vector-based data representations. This agent can query the vector store to find information relevant to user inputs.
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **LLM:** Language Model to use in the `VectorStoreAgent`.
|
||||
- **Vector Store Info:** `VectorStoreInfo` to use in the `VectorStoreAgent`.
|
||||
- **LLM:** The language model used by the `VectorStoreAgent`.
|
||||
- **Vector Store Info:** The `VectorStoreInfo` used by the agent.
|
||||
|
||||
---
|
||||
|
||||
### VectorStoreRouterAgent
|
||||
|
||||
The `VectorStoreRouterAgent` is a custom agent that takes a vector store router as input. It is typically used when there’s a need to retrieve information from multiple vector stores. These can be connected through a `VectorStoreRouterToolkit` and sent over to the `VectorStoreRouterAgent`. An agent configured with multiple vector stores can route queries to the appropriate store based on the context.
|
||||
The `VectorStoreRouterAgent` is a custom agent that uses a vector store router. It is typically used to retrieve information from multiple vector stores connected through a `VectorStoreRouterToolkit`.
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **LLM:** Language Model to use in the `VectorStoreRouterAgent`.
|
||||
- **Vector Store Router Toolkit:** `VectorStoreRouterToolkit` to use in the `VectorStoreRouterAgent`.
|
||||
- **LLM:** The language model used by the `VectorStoreRouterAgent`.
|
||||
- **Vector Store Router Toolkit:** The toolkit used by the agent.
|
||||
|
||||
---
|
||||
|
||||
### ZeroShotAgent
|
||||
|
||||
The `ZeroShotAgent` is an agent that uses the ReAct framework to determine which tool to use based solely on the tool's description. It can be configured with any number of tools and requires a description for each tool. The agent is designed to be the most general-purpose action agent. It uses an `LLMChain` to determine which actions to take and in what order.
|
||||
The `ZeroShotAgent` uses the ReAct framework to decide which tool to use based on the tool's description. It is the most general-purpose action agent, capable of determining the necessary actions and their sequence through an `LLMChain`.
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **Allowed Tools:** Tools that the agent will have access to.
|
||||
- **LLM Chain:** LLM Chain to be used by the agent.
|
||||
- **Allowed Tools:** The tools accessible to the agent.
|
||||
- **LLM Chain:** The LLM Chain used by the agent.
|
||||
|
|
@ -6,143 +6,65 @@ import Admonition from "@theme/Admonition";
|
|||
# Chains
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may
|
||||
contain some rough edges. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
</p>
|
||||
<p>
|
||||
Thank you for your patience while we enhance our documentation. It may
|
||||
have some imperfections. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
Chains, in the context of language models, refer to a series of calls made to a language model. It allows for the output of one call to be used as the input for another call. Different types of chains allow for different levels of complexity. Chains are useful for creating pipelines and executing specific scenarios.
|
||||
Chains, in the context of language models, refer to a series of calls made to a language model. This approach allows for using the output of one call as the input for another. Different chain types facilitate varying complexity levels, making them useful for creating pipelines and executing specific scenarios.
|
||||
|
||||
---
|
||||
|
||||
### CombineDocsChain
|
||||
|
||||
The `CombineDocsChain` incorporates methods to combine or aggregate loaded documents for question-answering functionality.
|
||||
`CombineDocsChain` includes methods to combine or aggregate loaded documents for question-answering functionality.
|
||||
|
||||
<Admonition type="info">
|
||||
Acts as a proxy for LangChain’s [documents](https://python.langchain.com/docs/modules/chains/document/) chains produced by the `load_qa_chain` function.
|
||||
|
||||
Works as a proxy of LangChain’s [documents](https://python.langchain.com/docs/modules/chains/document/) chains generated by the `load_qa_chain` function.
|
||||
|
||||
</Admonition>
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **LLM:** Language Model to use in the chain.
|
||||
- **chain_type:** The chain type to be used. Each one of them applies a different “combination strategy”.
|
||||
|
||||
- **stuff**: The stuff [documents](https://python.langchain.com/docs/modules/chains/document/stuff) chain (“stuff" as in "to stuff" or "to fill") is the most straightforward of _the_ document chains. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. This chain is well-suited for applications where documents are small and only a few are passed in for most calls.
|
||||
- **map_reduce**: The map-reduce [documents](https://python.langchain.com/docs/modules/chains/document/map_reduce) chain first applies an LLM chain to each document individually (the Map step), treating the chain output as a new document. It then passes all the new documents to a separate combined documents chain to get a single output (the Reduce step). It can optionally first compress or collapse the mapped documents to make sure that they fit in the combined documents chain (which will often pass them to an LLM). This compression step is performed recursively if necessary.
|
||||
- **map_rerank**: The map re-rank [documents](https://python.langchain.com/docs/modules/chains/document/map_rerank) chain runs an initial prompt on each document that not only tries to complete a task but also gives a score for how certain it is in its answer. The highest-scoring response is returned.
|
||||
- **refine**: The refine [documents](https://python.langchain.com/docs/modules/chains/document/refine) chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
|
||||
|
||||
Since the Refine chain only passes a single document to the LLM at a time, it is well-suited for tasks that require analyzing more documents than can fit in the model's context. The obvious tradeoff is that this chain will make far more LLM calls than, for example, the Stuff documents chain. There are also certain tasks that are difficult to accomplish iteratively. For example, the Refine chain can perform poorly when documents frequently cross-reference one another or when a task requires detailed information from many documents.
|
||||
- **chain_type:** Type of chain to be used, each applying a different combination strategy:
|
||||
- **stuff**: Most straightforward document chain. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. Suitable for cases where documents are small and few.
|
||||
- **map_reduce**: Applies an LLM to each document individually (the `Map` step), treating the output as a new document. It then combines these documents to get a single output (the `Reduce` step). Compression may occur to ensure documents fit in the final chain.
|
||||
- **map_rerank**: Runs an initial prompt on each document to complete a task and score its certainty. Returns the highest-scoring response.
|
||||
- **refine**: Iteratively updates its answer by looping over the input documents. Each document, along with the latest intermediate answer, is passed to an LLM to generate a new response. This method suits tasks requiring analysis of more documents than the model's context can handle, though it can be less effective for tasks requiring detailed cross-referencing or comprehensive information.
|
||||
|
||||
---
|
||||
|
||||
### ConversationChain
|
||||
|
||||
The `ConversationChain` is a straightforward chain for interactive conversations with a language model, making it ideal for chatbots or virtual assistants. It allows for dynamic conversations, question-answering, and complex dialogues.
|
||||
`ConversationChain` facilitates dynamic, interactive conversations with a language model, ideal for chatbots or virtual assistants.
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **LLM:** Language Model to use in the chain.
|
||||
- **Memory:** Default memory store.
|
||||
- **input_key:** Used to specify the key under which the user input will be stored in the conversation memory. It allows you to provide the user's input to the chain for processing and generating a response.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can be helpful for debugging and understanding the chain's behavior. If set to False, it will suppress the verbose output — defaults to `False`.
|
||||
- **input_key:** Specifies the key under which user input is stored in the conversation memory, enabling the chain to process and generate responses.
|
||||
- **output_key:** Specifies the key under which the generated response is stored, allowing retrieval of the response using this key.
|
||||
- **verbose:** Controls the verbosity of the chain's output. Set to `True` to enable detailed internal state outputs, useful for debugging and understanding the chain's behavior. Defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### ConversationalRetrievalChain
|
||||
|
||||
The `ConversationalRetrievalChain` extracts information and provides answers by combining document search and question-answering abilities.
|
||||
`ConversationalRetrievalChain` combines document search with question-answering capabilities, extracting information and providing answers.
|
||||
|
||||
<Admonition type="info">
|
||||
A retriever finds documents based on a query but doesn’t store them; it returns the documents matching the query.
|
||||
|
||||
A retriever is a component that finds documents based on a query. It doesn't store the documents themselves, but it returns the ones that match the query.
|
||||
|
||||
</Admonition >
|
||||
|
||||
**Params**
|
||||
**Parameters**:
|
||||
|
||||
- **LLM:** Language Model to use in the chain.
|
||||
- **Memory:** Default memory store.
|
||||
- **Retriever:** The retriever used to fetch relevant documents.
|
||||
- **chain_type:** The chain type to be used. Each one of them applies a different “combination strategy”.
|
||||
|
||||
- **stuff**: The stuff [documents](https://python.langchain.com/docs/modules/chains/document/stuff) chain (“stuff" as in "to stuff" or "to fill") is the most straightforward of _the_ document chains. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. This chain is well-suited for applications where documents are small and only a few are passed in for most calls.
|
||||
- **map_reduce**: The map-reduce [documents](https://python.langchain.com/docs/modules/chains/document/map_reduce) chain first applies an LLM chain to each document individually (the Map step), treating the chain output as a new document. It then passes all the new documents to a separate combined documents chain to get a single output (the Reduce step). It can optionally first compress or collapse the mapped documents to make sure that they fit in the combined documents chain (which will often pass them to an LLM). This compression step is performed recursively if necessary.
|
||||
- **map_rerank**: The map re-rank [documents](https://python.langchain.com/docs/modules/chains/document/map_rerank) chain runs an initial prompt on each document that not only tries to complete a task but also gives a score for how certain it is in its answer. The highest-scoring response is returned.
|
||||
- **refine**: The refine [documents](https://python.langchain.com/docs/modules/chains/document/refine) chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
|
||||
|
||||
Since the Refine chain only passes a single document to the LLM at a time, it is well-suited for tasks that require analyzing more documents than can fit in the model's context. The obvious tradeoff is that this chain will make far more LLM calls than, for example, the Stuff documents chain. There are also certain tasks that are difficult to accomplish iteratively. For example, the Refine chain can perform poorly when documents frequently cross-reference one another or when a task requires detailed information from many documents.
|
||||
|
||||
- **return_source_documents:** Used to specify whether or not to include the source documents that were used to answer the question in the output. When set to `True`, source documents will be included in the output along with the generated answer. This can be useful for providing additional context or references to the user — defaults to `True`.
|
||||
- **verbose:** Whether or not to run in verbose mode. In verbose mode, intermediate logs will be printed to the console — defaults to `False`.
|
||||
- **chain_type:** Type of chain to be used, each applying a different combination strategy:
|
||||
- **stuff**: Inserts a list of documents into a prompt and passes it to an LLM. Suitable for cases where documents are small and few.
|
||||
- **map_reduce**: Processes each document with an LLM separately, combines them for a single output. Compressions may occur to fit documents into the final chain.
|
||||
- **map_rerank**: Scores responses based on certainty from each document, returns the highest.
|
||||
- **refine**: Updates answers iteratively by looping through documents, passing each with intermediate answers to an LLM for a new response. This method is beneficial for tasks that involve extensive document analysis.
|
||||
- **return_source_documents:** Specifies whether to include source documents used in the output. Useful for providing context or references to the user. Defaults to `True`.
|
||||
- **verbose:** Controls verbosity of output. Set to `True` for detailed logs, useful for debugging. Defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### LLMChain
|
||||
|
||||
The `LLMChain` is a straightforward chain that adds functionality around language models. It combines a prompt template with a language model. To use it, create input variables to format the prompt template. The formatted prompt is then sent to the language model, and the generated output is returned as the result of the `LLMChain`.
|
||||
|
||||
**Params**
|
||||
|
||||
- **LLM:** Language Model to use in the chain.
|
||||
- **Memory:** Default memory store.
|
||||
- **Prompt**: Prompt template object to use in the chain.
|
||||
- **output_key:** This parameter is used to specify which key in the LLM output dictionary should be returned as the final output. By default, the `LLMChain` returns both the input and output key values — defaults to `text`.
|
||||
- **verbose:** Whether or not to run in verbose mode. In verbose mode, intermediate logs will be printed to the console — defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### LLMMathChain
|
||||
|
||||
The `LLMMathChain` combines a language model (LLM) and a math calculation component. It allows the user to input math problems and get the corresponding solutions.
|
||||
|
||||
The `LLMMathChain` works by using the language model with an `LLMChain` to understand the input math problem and generate a math expression. It then passes this expression to the math component, which evaluates it and returns the result.
|
||||
|
||||
**Params**
|
||||
|
||||
- **LLM:** Language Model to use in the chain.
|
||||
- **LLMChain:** LLM Chain to use in the chain.
|
||||
- **Memory:** Default memory store.
|
||||
- **input_key:** Used to specify the input value for the mathematical calculation. It allows you to provide the specific values or variables that you want to use in the calculation — defaults to `question`.
|
||||
- **output_key:** Used to specify the key under which the output of the mathematical calculation will be stored. It allows you to retrieve the result of the calculation using the specified key — defaults to `answer`.
|
||||
- **verbose:** Whether or not to run in verbose mode. In verbose mode, intermediate logs will be printed to the console — defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### RetrievalQA
|
||||
|
||||
`RetrievalQA` is a chain used to find relevant documents or information to answer a given query. The retriever is responsible for returning the relevant documents based on the query, and the QA component then extracts the answer from those documents. The retrieval QA system combines the capabilities of both the retriever and the QA component to provide accurate and relevant answers to user queries.
|
||||
|
||||
<Admonition type="info">
|
||||
|
||||
A retriever is a component that finds documents based on a query. It doesn't store the documents themselves, but it returns the ones that match the query.
|
||||
|
||||
</Admonition >
|
||||
|
||||
**Params**
|
||||
|
||||
- **Combine Documents Chain:** Chain to use to combine the documents.
|
||||
- **Memory:** Default memory store.
|
||||
- **Retriever:** The retriever used to fetch relevant documents.
|
||||
- **input_key:** This parameter is used to specify the key in the input data that contains the question. It is used to retrieve the question from the input data and pass it to the question-answering model for generating the answer — defaults to `query`.
|
||||
- **output_key:** This parameter is used to specify the key in the output data where the generated answer will be stored. It is used to retrieve the answer from the output data after the question-answering model has generated it — defaults to `result`.
|
||||
- **return_source_documents:** Used to specify whether or not to include the source documents that were used to answer the question in the output. When set to `True`, source documents will be included in the output along with the generated answer. This can be useful for providing additional context or references to the user — defaults to `True`.
|
||||
- **verbose:** Whether or not to run in verbose mode. In verbose mode, intermediate logs will be printed to the console — defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### SQLDatabaseChain
|
||||
|
||||
The `SQLDatabaseChain` finds answers to questions using a SQL database. It works by using the language model to understand the SQL query and generate the corresponding SQL code. It then passes the SQL code to the SQL database component, which executes the query on the database and returns the result.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Db:** SQL Database to connect to.
|
||||
- **LLM:** Language Model to use in the chain.
|
||||
- **Prompt:** Prompt template to translate natural language to SQL.
|
||||
|
|
|
|||
|
|
@ -2,115 +2,105 @@ import Admonition from "@theme/Admonition";
|
|||
|
||||
# Custom Components
|
||||
|
||||
Used to create a custom component, a special type of Langflow component that allows users to extend the functionality of the platform by creating their own reusable and configurable components from a Python script.
|
||||
|
||||
To use a custom component, follow these steps:
|
||||
|
||||
- Create a class that inherits from _`langflow.CustomComponent`_ and contains a _`build`_ method.
|
||||
- Use arguments with [Type Annotations (or Type Hints)](https://docs.python.org/3/library/typing.html) of the _`build`_ method to create component fields.
|
||||
- If applicable, use the _`build_config`_ method to customize how these fields look and behave.
|
||||
|
||||
<Admonition type="info" label="Tip">
|
||||
|
||||
For an in-depth explanation of custom components, their rules, and applications, make sure to read [Custom Component guidelines](../guidelines/custom-component).
|
||||
|
||||
Read the [Custom Component Guidelines](../administration/custom-component) for detailed information on custom components.
|
||||
</Admonition>
|
||||
|
||||
**Params**
|
||||
Custom components let you extend Langflow by creating reusable and configurable components from a Python script.
|
||||
|
||||
- **Code:** The Python code to define the component.
|
||||
## Usage
|
||||
|
||||
## The CustomComponent Class
|
||||
To create a custom component:
|
||||
|
||||
The CustomComponent class serves as the foundation for creating custom components. By inheriting this class, users can create new, configurable components, tailored to their specific requirements.
|
||||
1. Define a class that inherits from `langflow.CustomComponent`.
|
||||
2. Implement a `build` method in your class.
|
||||
3. Use type annotations in the `build` method to define component fields.
|
||||
4. Optionally, use the `build_config` method to customize field appearance and behavior.
|
||||
|
||||
**Methods**
|
||||
**Parameters**
|
||||
|
||||
- **build**: This method is required within a Custom Component class. It defines the component's functionality and specifies how it processes input data to produce output data. This method is called when the component is built (i.e., when you click the _Build_ ⚡ button in the canvas).
|
||||
- **Code:** The Python code that defines the component.
|
||||
|
||||
The type annotations of the _`build`_ instance method are used to create the fields of the component.
|
||||
## CustomComponent Class
|
||||
|
||||
| Supported Types |
|
||||
| --------------------------------------------------------- |
|
||||
| _`str`_, _`int`_, _`float`_, _`bool`_, _`list`_, _`dict`_ |
|
||||
| _`langflow.field_typing.NestedDict`_ |
|
||||
| _`langflow.field_typing.Prompt`_ |
|
||||
| _`langchain.chains.base.Chain`_ |
|
||||
| _`langchain.PromptTemplate`_ |
|
||||
| _`langchain.llms.base.BaseLLM`_ |
|
||||
| _`langchain.Tool`_ |
|
||||
| _`langchain.document_loaders.base.BaseLoader`_ |
|
||||
| _`langchain.schema.Document`_ |
|
||||
| _`langchain.text_splitters.TextSplitter`_ |
|
||||
| _`langchain.vectorstores.base.VectorStore`_ |
|
||||
| _`langchain.embeddings.base.Embeddings`_ |
|
||||
| _`langchain.schema.BaseRetriever`_ |
|
||||
This class is the foundation for creating custom components. It allows users to create new, configurable components tailored to their needs.
|
||||
|
||||
The difference between _`dict`_ and _`langflow.field_typing.NestedDict`_ is that one adds a simple key-value pair field, while the other opens a more robust dictionary editor.
|
||||
### Methods
|
||||
|
||||
<Admonition type="info">
|
||||
To use the _`Prompt`_ type, you must also add _`**kwargs`_ to the _`build`_ method. This is because the _`Prompt`_ type passes new arbitrary keyword arguments to it.
|
||||
**build:** This method is essential in a `CustomComponent` class. It defines the component's functionality and how it processes input data. The build method is invoked when you click the **Build** button on the canvas.
|
||||
|
||||
If you want to add the values of the variables to the template you defined, you must format the PromptTemplate inside the CustomComponent class.
|
||||
</Admonition>
|
||||
The following types are supported in the build method:
|
||||
|
||||
| Supported Types |
|
||||
| --------------------------------------------------------- |
|
||||
| _`str`_, _`int`_, _`float`_, _`bool`_, _`list`_, _`dict`_ |
|
||||
| _`langflow.field_typing.NestedDict`_ |
|
||||
| _`langflow.field_typing.Prompt`_ |
|
||||
| _`langchain.chains.base.Chain`_ |
|
||||
| _`langchain.PromptTemplate`_ |
|
||||
| _`from langchain.schema.language_model import BaseLanguageModel`_ |
|
||||
| _`langchain.Tool`_ |
|
||||
| _`langchain.document_loaders.base.BaseLoader`_ |
|
||||
| _`langchain.schema.Document`_ |
|
||||
| _`langchain.text_splitters.TextSplitter`_ |
|
||||
| _`langchain.vectorstores.base.VectorStore`_ |
|
||||
| _`langchain.embeddings.base.Embeddings`_ |
|
||||
| _`langchain.schema.BaseRetriever`_ |
|
||||
|
||||
<Admonition type="info">
|
||||
Unlike Langchain types, base Python types do not add a
|
||||
[handle](../guidelines/components) to the field by default. To add handles,
|
||||
use the _`input_types`_ key in the _`build_config`_ method.
|
||||
</Admonition>
|
||||
The difference between _`dict`_ and _`langflow.field_typing.NestedDict`_ is that one adds a simple key-value pair field, while the other opens a more robust dictionary editor.
|
||||
|
||||
- **build_config**: Used to define the configuration fields of the component (if applicable). It should always return a dictionary with specific keys representing the field names and corresponding configurations. This method is called when the code is processed (i.e., when you click _Check and Save_ in the code editor). It must follow the format described below:
|
||||
<Admonition type="info">
|
||||
Use the `Prompt` type by adding **kwargs to the build method.
|
||||
If you want to add the values of the variables to the template you defined, format the `PromptTemplate` inside the `CustomComponent` class.
|
||||
</Admonition>
|
||||
|
||||
- Top-level keys are field names.
|
||||
- Their values are can be of type _`langflow.field_typing.TemplateField`_ or _`dict`_. They specify the behavior of the generated fields.
|
||||
<Admonition type="info">
|
||||
Use base Python types without a handle by default. To add handles, use the `input_types` key in the `build_config` method.
|
||||
</Admonition>
|
||||
|
||||
Below are the available keys used to configure component fields:
|
||||
**build_config:** Defines the configuration fields of the component. This method returns a dictionary where each key represents a field name and each value defines the field's behavior.
|
||||
|
||||
| Key | Description |
|
||||
| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| _`field_type: str`_ | The type of the field (can be any of the types supported by the _`build`_ method). |
|
||||
| _`is_list: bool`_ | If the field can be a list of values, meaning that the user can manually add more inputs to the same field. |
|
||||
| _`options: List[str]`_ | When defined, the field becomes a dropdown menu where a list of strings defines the options to be displayed. If the _`value`_ attribute is set to one of the options, that option becomes default. For this parameter to work, _`field_type`_ should invariably be _`str`_. |
|
||||
| _`multiline: bool`_ | Defines if a string field opens a text editor. Useful for longer texts. |
|
||||
| _`input_types: List[str]`_ | Used when you want a _`str`_ field to have connectable handles. |
|
||||
| _`display_name: str`_ | Defines the name of the field. |
|
||||
| _`advanced: bool`_ | Hide the field in the canvas view (displayed component settings only). Useful when a field is for advanced users. |
|
||||
| _`password: bool`_ | To mask the input text. Useful to hide sensitive text (e.g. API keys). |
|
||||
| _`required: bool`_ | Makes the field required. |
|
||||
| _`info: str`_ | Adds a tooltip to the field. |
|
||||
| _`file_types: List[str]`_ | This is a requirement if the _`field_type`_ is _file_. Defines which file types will be accepted. For example, _json_, _yaml_ or _yml_. |
|
||||
| _`range_spec: langflow.field_typing.RangeSpec`_ | This is a requirement if the _`field_type`_ is _`float`_. Defines the range of values accepted and the step size. If none is defined, the default is _`[-1, 1, 0.1]`_. |
|
||||
| _`title_case: bool`_ | Formats the name of the field when _`display_name`_ is not defined. Set it to False to keep the name as you set it in the _`build`_ method. |
|
||||
Supported keys for configuring fields:
|
||||
|
||||
<Admonition type="info" label="Tip">
|
||||
| Key | Description |
|
||||
| --------------------- | --------------------------------------------------- |
|
||||
| `is_list` | Boolean indicating if the field can hold multiple values. |
|
||||
| `options` | Dropdown menu options. |
|
||||
| `multiline` | Boolean indicating if a field allows multiline input. |
|
||||
| `input_types` | Allows connection handles for string fields. |
|
||||
| `display_name` | Field name displayed in the UI. |
|
||||
| `advanced` | Hides the field in the default UI view. |
|
||||
| `password` | Masks input, useful for sensitive data. |
|
||||
| `required` | Overrides the default behavior to make a field mandatory. |
|
||||
| `info` | Tooltip for the field. |
|
||||
| `file_types` | Accepted file types, useful for file fields. |
|
||||
| `range_spec` | Defines valid ranges for float fields. |
|
||||
| `title_case` | Boolean that controls field name capitalization. |
|
||||
| `refresh_button` | Adds a refresh button that updates field values. |
|
||||
| `real_time_refresh` | Updates the configuration as field values change. |
|
||||
| `field_type` | Automatically set based on the build method's type hint. |
|
||||
|
||||
Keys _`options`_ and _`value`_ can receive a method or function that returns a list of strings or a string, respectively. This is useful when you want to dynamically generate the options or the default value of a field. A refresh button will appear next to the field in the component, allowing the user to update the options or the default value.
|
||||
<Admonition type="info" label="Tip">
|
||||
Use the `update_build_config` method to dynamically update configurations based on field values.
|
||||
</Admonition>
|
||||
|
||||
</Admonition>
|
||||
## Additional methods and attributes
|
||||
|
||||
The `CustomComponent` class also provides helpful methods for specific tasks (e.g., to load and use other flows from the Langflow platform):
|
||||
|
||||
### Methods
|
||||
|
||||
- `list_flows`: Lists available flows.
|
||||
- `get_flow`: Retrieves a specific flow by name or ID.
|
||||
- `load_flow`: Loads a flow by ID.
|
||||
|
||||
- The CustomComponent class also provides helpful methods for specific tasks (e.g., to load and use other flows from the Langflow platform):
|
||||
### Attributes
|
||||
|
||||
| Method Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------- |
|
||||
| _`list_flows`_ | Returns a list of Flow objects with an _`id`_ and a _`name`_. |
|
||||
| _`get_flow`_ | Returns a Flow object. Parameters are _`flow_name`_ or _`flow_id`_. |
|
||||
| _`load_flow`_ | Loads a flow from a given _`id`_. |
|
||||
|
||||
- Useful attributes:
|
||||
|
||||
| Attribute Name | Description |
|
||||
| -------------- | ----------------------------------------------------------------------------- |
|
||||
| _`status`_ | Displays the value it receives in the _`build`_ method. Useful for debugging. |
|
||||
| _`field_order`_ | Defines the order the fields will be displayed in the canvas. |
|
||||
| _`icon`_ | Defines the emoji (for example, _`:rocket:`_) that will be displayed in the canvas. |
|
||||
|
||||
<Admonition type="info" label="Tip">
|
||||
- `status`: Shows values from the `build` method, useful for debugging.
|
||||
- `field_order`: Controls the display order of fields.
|
||||
- `icon`: Sets the canvas display icon.
|
||||
|
||||
<Admonition type="info" label="Tip">
|
||||
Check out the [FlowRunner](../examples/flow-runner) example to understand how to call a flow from a custom component.
|
||||
</Admonition>
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
60
docs/docs/components/data.mdx
Normal file
60
docs/docs/components/data.mdx
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Data
|
||||
|
||||
## API Request
|
||||
|
||||
This component sends HTTP requests to the specified URLs.
|
||||
|
||||
Use this component to interact with external APIs or services and retrieve data. Ensure that the URLs are valid and that you configure the method, headers, body, and timeout correctly.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **URLs:** The URLs to target.
|
||||
- **Method:** The HTTP method, such as GET or POST.
|
||||
- **Headers:** The headers to include with the request.
|
||||
- **Body:** The data to send with the request (for methods like POST, PATCH, PUT).
|
||||
- **Timeout:** The maximum time to wait for a response.
|
||||
|
||||
---
|
||||
|
||||
## Directory
|
||||
|
||||
This component recursively retrieves files from a specified directory.
|
||||
|
||||
Use this component to retrieve various file types, such as text or JSON files, from a directory. Make sure to provide the correct path and configure the other parameters as needed.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Path:** The directory path.
|
||||
- **Types:** The types of files to retrieve. Leave this blank to retrieve all file types.
|
||||
- **Depth:** The level of directory depth to search.
|
||||
- **Max Concurrency:** The maximum number of simultaneous file loading operations.
|
||||
- **Load Hidden:** Set to true to include hidden files.
|
||||
- **Recursive:** Set to true to enable recursive search.
|
||||
- **Silent Errors:** Set to true to suppress exceptions on errors.
|
||||
- **Use Multithreading:** Set to true to use multithreading in file loading.
|
||||
|
||||
---
|
||||
|
||||
## File
|
||||
|
||||
This component loads a file.
|
||||
|
||||
Use this component to load files, such as text or JSON files. Ensure you specify the correct path and configure other parameters as necessary.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Path:** The file path.
|
||||
- **Silent Errors:** Set to true to prevent exceptions on errors.
|
||||
|
||||
---
|
||||
|
||||
## URL
|
||||
|
||||
This component retrieves content from specified URLs.
|
||||
|
||||
Ensure the URLs are valid and adjust other parameters as needed.
|
||||
**Parameters:**
|
||||
|
||||
- **URLs:** The URLs to retrieve content from.
|
||||
|
|
@ -1,123 +1,116 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Embeddings
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may
|
||||
contain some rough edges. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
## Amazon Bedrock Embeddings
|
||||
|
||||
Embeddings are vector representations of text that capture the semantic meaning of the text. They are created using text embedding models and allow us to think about the text in a vector space, enabling us to perform tasks like semantic search, where we look for pieces of text that are most similar in the vector space.
|
||||
Used to load embedding models from [Amazon Bedrock](https://aws.amazon.com/bedrock/).
|
||||
|
||||
---
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
|-----------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------|-------------|
|
||||
| `credentials_profile_name` | `str` | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information. | |
|
||||
| `model_id` | `str` | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API. | |
|
||||
| `endpoint_url` | `str` | URL to set a specific service endpoint other than the default AWS endpoint. | |
|
||||
| `region_name` | `str` | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided. | |
|
||||
|
||||
### BedrockEmbeddings
|
||||
## Cohere Embeddings
|
||||
|
||||
Used to load [Amazon Bedrocks’s](https://aws.amazon.com/bedrock/) embedding models.
|
||||
Used to load embedding models from [Cohere](https://cohere.com/).
|
||||
|
||||
**Params**
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------|
|
||||
| `cohere_api_key` | `str` | API key required to authenticate with the Cohere service. | |
|
||||
| `model` | `str` | Language model used for embedding text documents and performing queries. | `embed-english-v2.0` |
|
||||
| `truncate` | `bool` | Whether to truncate the input text to fit within the model's constraints. | `False` |
|
||||
|
||||
- **credentials_profile_name:** The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See [the AWS documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) for more details.
|
||||
## Azure OpenAI Embeddings
|
||||
|
||||
- **model_id:** Id of the model to call, e.g., amazon.titan-embed-text-v1, this is equivalent to the modelId property in the list-foundation-models api.
|
||||
Generate embeddings using Azure OpenAI models.
|
||||
|
||||
- **endpoint_url:** Needed if you don’t want to default to us-east-1 endpoint.
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------|
|
||||
| `Azure Endpoint` | `str` | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | |
|
||||
| `Deployment Name` | `str` | The name of the deployment. | |
|
||||
| `API Version` | `str` | The API version to use, options include various dates. | |
|
||||
| `API Key` | `str` | The API key to access the Azure OpenAI service. | |
|
||||
|
||||
- **region_name:** The aws region e.g., us-west-2. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here.
|
||||
## Hugging Face API Embeddings
|
||||
|
||||
---
|
||||
Generate embeddings using Hugging Face Inference API models.
|
||||
|
||||
### CohereEmbeddings
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------|
|
||||
| `API Key` | `str` | API key for accessing the Hugging Face Inference API. | |
|
||||
| `API URL` | `str` | URL of the Hugging Face Inference API. | `http://localhost:8080` |
|
||||
| `Model Name` | `str` | Name of the model to use for embeddings. | `BAAI/bge-large-en-v1.5` |
|
||||
| `Cache Folder` | `str` | Folder path to cache Hugging Face models. | |
|
||||
| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | |
|
||||
| `Model Kwargs` | `dict` | Additional arguments for the model. | |
|
||||
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
|
||||
|
||||
Used to load [Cohere’s](https://cohere.com/) embedding models.
|
||||
## Hugging Face Embeddings
|
||||
|
||||
**Params**
|
||||
Used to load embedding models from [HuggingFace](https://huggingface.co).
|
||||
|
||||
- **cohere_api_key:** Holds the API key required to authenticate with the Cohere service.
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------|
|
||||
| `Cache Folder` | `str` | Folder path to cache HuggingFace models. | |
|
||||
| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | |
|
||||
| `Model Kwargs` | `dict` | Additional arguments for the model. | |
|
||||
| `Model Name` | `str` | Name of the HuggingFace model to use. | `sentence-transformers/all-mpnet-base-v2` |
|
||||
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
|
||||
|
||||
- **model:** The language model used for embedding text documents and performing queries —defaults to `embed-english-v2.0`.
|
||||
## OpenAI Embeddings
|
||||
|
||||
- **truncate:** Used to specify whether or not to truncate the input text. Truncation is useful when dealing with long texts that exceed the model's maximum input length. By truncating the text, the user can ensure that it fits within the model's constraints.
|
||||
Used to load embedding models from [OpenAI](https://openai.com/).
|
||||
|
||||
---
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
|-----------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------|
|
||||
| `OpenAI API Key` | `str` | The API key to use for accessing the OpenAI API. | |
|
||||
| `Default Headers` | `Dict[str, str]` | Default headers for the HTTP requests. | |
|
||||
| `Default Query` | `NestedDict` | Default query parameters for the HTTP requests. | |
|
||||
| `Allowed Special` | `List[str]` | Special tokens allowed for processing. | `[]` |
|
||||
| `Disallowed Special` | `List[str]` | Special tokens disallowed for processing. | `["all"]` |
|
||||
| `Chunk Size` | `int` | Chunk size for processing. | `1000` |
|
||||
| `Client` | `Any` | HTTP client for making requests. | |
|
||||
| `Deployment` | `str` | Deployment name for the model. | `text-embedding-3-small` |
|
||||
| `Embedding Context Length` | `int` | Length of embedding context. | `8191` |
|
||||
| `Max Retries` | `int` | Maximum number of retries for failed requests. | `6` |
|
||||
| `Model` | `str` | Name of the model to use. | `text-embedding-3-small` |
|
||||
| `Model Kwargs` | `NestedDict` | Additional keyword arguments for the model. | |
|
||||
| `OpenAI API Base` | `str` | Base URL of the OpenAI API. | |
|
||||
| `OpenAI API Type` | `str` | Type of the OpenAI API. | |
|
||||
| `OpenAI API Version` | `str` | Version of the OpenAI API. | |
|
||||
| `OpenAI Organization` | `str` | Organization associated with the API key. | |
|
||||
| `OpenAI Proxy` | `str` | Proxy server for the requests. | |
|
||||
| `Request Timeout` | `float` | Timeout for the HTTP requests. | |
|
||||
| `Show Progress Bar` | `bool` | Whether to show a progress bar for processing. | `False` |
|
||||
| `Skip Empty` | `bool` | Whether to skip empty inputs. | `False` |
|
||||
| `TikToken Enable` | `bool` | Whether to enable TikToken. | `True` |
|
||||
| `TikToken Model Name` | `str` | Name of the TikToken model. | |
|
||||
|
||||
### HuggingFaceEmbeddings
|
||||
## Ollama Embeddings
|
||||
|
||||
Used to load [HuggingFace’s](https://huggingface.co) embedding models.
|
||||
Generate embeddings using Ollama models.
|
||||
|
||||
**Params**
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
|---------------------|-------------------|--------------------------------------------------------------------------------------------------------------------|---------------------------|
|
||||
| `Ollama Model` | `str` | Name of the Ollama model to use. | `llama2` |
|
||||
| `Ollama Base URL` | `str` | Base URL of the Ollama API. | `http://localhost:11434` |
|
||||
| `Model Temperature` | `float` | Temperature parameter for the model. Adjusts the randomness in the generated embeddings. | |
|
||||
|
||||
- **cache_folder:** Used to specify the folder where the embeddings will be cached. When embeddings are computed for a text, they can be stored in the cache folder so that they can be reused later without the need to recompute them. This can improve the performance of the application by avoiding redundant computations.
|
||||
|
||||
- **encode_kwargs:** Used to pass additional keyword arguments to the encoding method of the underlying HuggingFace model. These keyword arguments can be used to customize the encoding process, such as specifying the maximum length of the input sequence or enabling truncation or padding.
|
||||
|
||||
- **model_kwargs:** Used to customize the behavior of the model, such as specifying the model architecture, the tokenizer, or any other model-specific configuration options. By using `model_kwargs`, the user can configure the HuggingFace model according to specific needs and preferences.
|
||||
|
||||
- **model_name:** Used to specify the name or identifier of the HuggingFace model that will be used for generating embeddings. It allows users to choose a specific pre-trained model from the Hugging Face model hub — defaults to `sentence-transformers/all-mpnet-base-v2`.
|
||||
|
||||
---
|
||||
|
||||
### OpenAIEmbeddings
|
||||
|
||||
Used to load [OpenAI’s](https://openai.com/) embedding models.
|
||||
|
||||
**Params**
|
||||
|
||||
- **chunk_size:** Determines the maximum size of each chunk of text that is processed for embedding. If any of the incoming text chunks exceeds `chunk_size` characters, it will be split into multiple chunks of size `chunk_size` or less before being embedded — defaults to `1000`.
|
||||
|
||||
- **deployment:** Used to specify the deployment name or identifier of the text embedding model. It allows the user to choose a specific deployment of the model to use for embedding. When the deployment is provided, this can be useful when the user has multiple deployments of the same model with different configurations or versions — defaults to `text-embedding-ada-002`.
|
||||
|
||||
- **embedding_ctx_length:** This parameter determines the maximum context length for the text embedding model. It specifies the number of tokens that the model considers when generating embeddings for a piece of text — defaults to `8191` (this means that the model will consider up to 8191 tokens when generating embeddings).
|
||||
|
||||
- **max_retries:** Determines the maximum number of times to retry a request if the model provider returns an error from their API — defaults to `6`.
|
||||
|
||||
- **model:** Defines which pre-trained text embedding model to use — defaults to `text-embedding-ada-002`.
|
||||
|
||||
- **openai_api_base:** Refers to the base URL for the Azure OpenAI resource. It is used to configure the API to connect to the Azure OpenAI service. The base URL can be found in the Azure portal under the user Azure OpenAI resource.
|
||||
|
||||
- **openai_api_key:** Is used to authenticate and authorize access to the OpenAI service.
|
||||
|
||||
- **openai_api_type:** Is used to specify the type of OpenAI API being used, either the regular OpenAI API or the Azure OpenAI API. This parameter allows the `OpenAIEmbeddings` class to connect to the appropriate API service.
|
||||
|
||||
- **openai_api_version:** Is used to specify the version of the OpenAI API being used. This parameter allows the `OpenAIEmbeddings` class to connect to the appropriate version of the OpenAI API service.
|
||||
|
||||
- **openai_organization:** Is used to specify the organization associated with the OpenAI API key. If not provided, the default organization associated with the API key will be used.
|
||||
|
||||
- **openai_proxy:** Proxy enables better budgeting and cost management for making OpenAI API calls, including more transparency into pricing.
|
||||
|
||||
- **request_timeout:** Used to specify the maximum amount of time, in milliseconds, to wait for a response from the OpenAI API when generating embeddings for a given text.
|
||||
|
||||
- **tiktoken_model_name:** Used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name.
|
||||
|
||||
---
|
||||
|
||||
### VertexAIEmbeddings
|
||||
## VertexAI Embeddings
|
||||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings).
|
||||
|
||||
<Admonition type="info">
|
||||
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
|
||||
</Admonition>
|
||||
|
||||
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
|
||||
- **location:** The default location to use when making API calls – defaults to `us-central1`.
|
||||
- **max_output_tokens:** Token limit determines the maximum amount of text output from one prompt – defaults to `128`.
|
||||
- **model_name:** The name of the Vertex AI large language model – defaults to `text-bison`.
|
||||
- **project:** The default GCP project to use when making Vertex API calls.
|
||||
- **request_parallelism:** The amount of parallelism allowed for requests issued to VertexAI models – defaults to `5`.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0`.
|
||||
- **top_k:** How the model selects tokens for output, the next token is selected from – defaults to `40`.
|
||||
- **top_p:** Tokens are selected from most probable to least until the sum of their – defaults to `0.95`.
|
||||
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
|
||||
### OllamaEmbeddings
|
||||
|
||||
Used to load [Ollama’s](https://ollama.ai/) embedding models. Wrapper around LangChain's [Ollama API](https://python.langchain.com/docs/integrations/text_embedding/ollama).
|
||||
|
||||
- **model** The name of the Ollama model to use – defaults to `llama2`.
|
||||
- **base_url** The base URL for the Ollama API – defaults to `http://localhost:11434`.
|
||||
- **temperature** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0`.
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
|-----------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------|
|
||||
| `credentials` | `Credentials` | The default custom credentials to use. | |
|
||||
| `location` | `str` | The default location to use when making API calls. | `us-central1`|
|
||||
| `max_output_tokens` | `int` | Token limit determines the maximum amount of text output from one prompt. | `128` |
|
||||
| `model_name` | `str` | The name of the Vertex AI large language model. | `text-bison`|
|
||||
| `project` | `str` | The default GCP project to use when making Vertex API calls. | |
|
||||
| `request_parallelism` | `int` | The amount of parallelism allowed for requests issued to VertexAI models. | `5` |
|
||||
| `temperature` | `float` | Tunes the degree of randomness in text generations. Should be a non-negative value. | `0` |
|
||||
| `top_k` | `int` | How the model selects tokens for output, the next token is selected from the top `k` tokens. | `40` |
|
||||
| `top_p` | `float` | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value. | `0.95` |
|
||||
| `tuned_model_name` | `str` | The name of a tuned model. If provided, `model_name` is ignored. | |
|
||||
| `verbose` | `bool` | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug. | `False` |
|
||||
|
|
|
|||
252
docs/docs/components/experimental.mdx
Normal file
252
docs/docs/components/experimental.mdx
Normal file
|
|
@ -0,0 +1,252 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Experimental
|
||||
|
||||
Components in the experimental phase are currently in beta. They have been initially developed and tested but haven't yet achieved a stable or fully supported status. We encourage users to explore these components, provide feedback, and report any issues encountered.
|
||||
|
||||
### Clear Message History Component
|
||||
|
||||
This component clears the message history for a specified session ID.
|
||||
|
||||
**Beta:** This component is in beta.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Session ID:**
|
||||
- **Display Name:** Session ID
|
||||
- **Info:** Clears the message history for this ID.
|
||||
|
||||
**Usage**
|
||||
|
||||
Provide the session ID to clear its message history.
|
||||
|
||||
---
|
||||
|
||||
### Extract Key From Record
|
||||
|
||||
This component extracts specified keys from a record.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Record:**
|
||||
- **Display Name:** Record
|
||||
- **Info:** The record from which to extract keys.
|
||||
|
||||
- **Keys:**
|
||||
- **Display Name:** Keys
|
||||
- **Info:** The keys to be extracted.
|
||||
|
||||
- **Silent Errors:**
|
||||
- **Display Name:** Silent Errors
|
||||
- **Info:** Set to true to suppress errors.
|
||||
- **Advanced:** True
|
||||
|
||||
**Usage**
|
||||
|
||||
Provide the record and specify the keys you want to extract. Optionally, enable silent errors for missing keys.
|
||||
|
||||
---
|
||||
|
||||
### Flow as Tool
|
||||
|
||||
This component turns a function running a flow into a Tool.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Flow Name:**
|
||||
- **Display Name:** Flow Name
|
||||
- **Info:** Select the flow to run.
|
||||
- **Options:** List of available flows.
|
||||
- **Real-time Refresh:** True
|
||||
- **Refresh Button:** True
|
||||
|
||||
- **Name:**
|
||||
- **Display Name:** Name
|
||||
- **Description:** The tool's name.
|
||||
|
||||
- **Description:**
|
||||
- **Display Name:** Description
|
||||
- **Description:** Describes the tool.
|
||||
|
||||
- **Return Direct:**
|
||||
- **Display Name:** Return Direct
|
||||
- **Description:** Returns the result directly.
|
||||
- **Advanced:** True
|
||||
|
||||
**Usage**
|
||||
|
||||
Select a flow, name and describe the tool, and decide if you want to return the result directly.
|
||||
|
||||
---
|
||||
|
||||
### Listen
|
||||
|
||||
This component listens for a specified notification.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Name:**
|
||||
- **Display Name:** Name
|
||||
- **Info:** The notification to listen for.
|
||||
|
||||
**Usage**
|
||||
|
||||
Specify the notification to listen for.
|
||||
|
||||
---
|
||||
|
||||
### List Flows
|
||||
|
||||
This component lists all available flows.
|
||||
|
||||
**Usage**
|
||||
|
||||
Call this component without parameters to list all flows.
|
||||
|
||||
---
|
||||
|
||||
### Merge Records
|
||||
|
||||
This component merges a list of records.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Records:**
|
||||
- **Display Name:** Records
|
||||
|
||||
**Usage**
|
||||
|
||||
Provide the records you want to merge.
|
||||
|
||||
---
|
||||
|
||||
### Notify
|
||||
|
||||
This component generates a notification.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Name:**
|
||||
- **Display Name:** Name
|
||||
- **Info:** The notification's name.
|
||||
|
||||
- **Record:**
|
||||
- **Display Name:** Record
|
||||
- **Info:** Optionally, a record to store in the notification.
|
||||
|
||||
- **Append:**
|
||||
- **Display Name:** Append
|
||||
- **Info:** Set to true to append the record to the notification.
|
||||
|
||||
**Usage**
|
||||
|
||||
Specify the notification name, provide a record if necessary, and indicate whether to append it.
|
||||
|
||||
---
|
||||
|
||||
### Run Flow
|
||||
|
||||
This component runs a specified flow.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Input Value:**
|
||||
- **Display Name:** Input Value
|
||||
- **Multiline:** True
|
||||
|
||||
- **Flow Name:**
|
||||
- **Display Name:** Flow Name
|
||||
- **Info:** Select the flow to run.
|
||||
- **Options:** List of available flows.
|
||||
- **Refresh Button:** True
|
||||
|
||||
- **Tweaks:**
|
||||
- **Display Name:** Tweaks
|
||||
- **Info:** Modifications to apply to the flow.
|
||||
|
||||
**Usage**
|
||||
|
||||
Provide the input value, select the flow, and apply any tweaks.
|
||||
|
||||
---
|
||||
|
||||
### Runnable Executor
|
||||
|
||||
This component executes a specified runnable.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Input Key:**
|
||||
- **Display Name:** Input Key
|
||||
- **Info:** The input key.
|
||||
|
||||
- **Inputs:**
|
||||
- **Display Name:** Inputs
|
||||
- **Info:** Inputs for the runnable.
|
||||
|
||||
- **Runnable:**
|
||||
- **Display Name:** Runnable
|
||||
- **Info:** The runnable to execute.
|
||||
|
||||
- **Output Key:**
|
||||
- **Display Name:** Output Key
|
||||
- **Info:** The output key.
|
||||
|
||||
**Usage**
|
||||
|
||||
Specify the input key, provide inputs, select the runnable, and optionally define the output key.
|
||||
|
||||
---
|
||||
|
||||
### SQL Executor
|
||||
|
||||
This component executes an SQL query.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Database URL:**
|
||||
- **Display Name:** Database URL
|
||||
- **Info:** The database's URL.
|
||||
|
||||
- **Include Columns:**
|
||||
- **Display Name:** Include Columns
|
||||
- **Info:** Whether to include columns in the result.
|
||||
|
||||
- **Passthrough:**
|
||||
- **Display Name:** Passthrough
|
||||
- **Info:** Returns the query instead of raising an exception if an error occurs.
|
||||
|
||||
- **Add Error:**
|
||||
- **Display Name:** Add Error
|
||||
- **Info:** Includes the error in the result.
|
||||
|
||||
**Usage**
|
||||
|
||||
Provide the SQL query, specify the database URL, and configure settings for columns, error handling, and passthrough.
|
||||
|
||||
---
|
||||
|
||||
### SubFlow
|
||||
|
||||
This component dynamically generates a tool from a flow.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Input Value:**
|
||||
- **Display Name:** Input Value
|
||||
- **Multiline:** True
|
||||
|
||||
- **Flow Name:**
|
||||
- **Display Name:** Flow Name
|
||||
- **Info:** Select the flow to run.
|
||||
- **Options:** List of available flows.
|
||||
- **Real Time Refresh:** True
|
||||
- **Refresh Button:** True
|
||||
|
||||
- **Tweaks:**
|
||||
- **Display Name:** Tweaks
|
||||
- **Info:** Modifications to apply to the flow.
|
||||
|
||||
**Usage**
|
||||
|
||||
Select a flow, apply any necessary tweaks, and generate a tool.
|
||||
127
docs/docs/components/helpers.mdx
Normal file
127
docs/docs/components/helpers.mdx
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Helpers
|
||||
|
||||
### Chat memory
|
||||
|
||||
This component retrieves stored chat messages based on a specific session ID.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **Sender type:** Choose the sender type from options like "Machine", "User", or "Both".
|
||||
- **Sender name:** (Optional) The name of the sender.
|
||||
- **Number of messages:** Number of messages to retrieve.
|
||||
- **Session ID:** The session ID of the chat history.
|
||||
- **Order:** Choose the message order, either "Ascending" or "Descending".
|
||||
- **Record template:** (Optional) Template to convert a record to text. If left empty, the system dynamically sets it to the record's text key.
|
||||
|
||||
---
|
||||
|
||||
### Combine text
|
||||
|
||||
This component concatenates two text sources into a single text chunk using a specified delimiter.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **First text:** The first text input to concatenate.
|
||||
- **Second text:** The second text input to concatenate.
|
||||
- **Delimiter:** A string used to separate the two text inputs. Defaults to a space.
|
||||
|
||||
---
|
||||
|
||||
### Create record
|
||||
|
||||
This component dynamically creates a record with a specified number of fields.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **Number of fields:** Number of fields to be added to the record.
|
||||
- **Text key:** Key used as text.
|
||||
|
||||
---
|
||||
|
||||
### Custom component
|
||||
|
||||
Use this component as a template to create your custom component.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **Parameter:** Describe the purpose of this parameter.
|
||||
|
||||
<Admonition type="info" title="Info">
|
||||
<p>
|
||||
Customize the <code>build_config</code> and <code>build</code> methods according to your requirements.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
Learn more about creating custom components at [Custom Component](http://docs.langflow.org/components/custom).
|
||||
|
||||
---
|
||||
|
||||
### Documents to records
|
||||
|
||||
Convert LangChain documents into records.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **Documents:** Documents to be converted into records.
|
||||
|
||||
---
|
||||
|
||||
### ID generator
|
||||
|
||||
Generates a unique ID.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **Value:** Unique ID generated.
|
||||
|
||||
---
|
||||
|
||||
### Message history
|
||||
|
||||
Retrieves stored chat messages based on a specific session ID.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **Sender type:** Options for the sender type.
|
||||
- **Sender name:** Sender name.
|
||||
- **Number of messages:** Number of messages to retrieve.
|
||||
- **Session ID:** Session ID of the chat history.
|
||||
- **Order:** Order of the messages.
|
||||
|
||||
---
|
||||
|
||||
### Records to text
|
||||
|
||||
Convert records into plain text following a specified template.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **Records:** The records to convert to text.
|
||||
- **Template:** The template used for formatting the records. It can contain keys like `{text}`, `{data}`, or any other key in the record.
|
||||
|
||||
---
|
||||
|
||||
### Split text
|
||||
|
||||
Split text into chunks of a specified length.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **Texts:** Texts to split.
|
||||
- **Separators:** Characters to split on. Defaults to a space.
|
||||
- **Max chunk size:** The maximum length (in characters) of each chunk.
|
||||
- **Chunk overlap:** The amount of character overlap between chunks.
|
||||
- **Recursive:** Whether to split recursively.
|
||||
|
||||
---
|
||||
|
||||
### Update record
|
||||
|
||||
Update a record with text-based key/value pairs, similar to updating a Python dictionary.
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **Record:** The record to update.
|
||||
- **New data:** The new data to update the record with.
|
||||
99
docs/docs/components/inputs.mdx
Normal file
99
docs/docs/components/inputs.mdx
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
|
||||
# Inputs
|
||||
|
||||
## Chat Input
|
||||
|
||||
This component obtains user input from the chat.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Sender Type:** Specifies the sender type. Defaults to `User`. Options are `Machine` and `User`.
|
||||
- **Sender Name:** Specifies the name of the sender. Defaults to `User`.
|
||||
- **Message:** Specifies the message text. It is a multiline text input.
|
||||
- **Session ID:** Specifies the session ID of the chat history. If provided, the message will be saved in the Message History.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
If `As Record` is `true` and the `Message` is a `Record`, the data
|
||||
of the `Record` will be updated with the `Sender`, `Sender Name`, and
|
||||
`Session ID`.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/chat-input-expanded.png",
|
||||
dark: "img/chat-input-expanded.png",
|
||||
}}
|
||||
style={{ width: "40%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
One significant capability of the Chat Input component is its ability to transform the Playground into a chat window. This feature is particularly valuable for scenarios requiring user input to initiate or influence the flow.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/interaction-panel-with-chat-input.png",
|
||||
dark: "img/interaction-panel-with-chat-input.png",
|
||||
}}
|
||||
style={{ width: "50%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
---
|
||||
|
||||
## Prompt
|
||||
|
||||
This component creates a prompt template with dynamic variables. This is useful for structuring prompts and passing dynamic data to a language model.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Template:** The template for the prompt. This field allows you to create other fields dynamically by using curly brackets `{}`. For example, if you have a template like `Hello {name}, how are you?`, a new field called `name` will be created. Prompt variables can be created with any name inside curly brackets, e.g. `{variable_name}`.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/prompt-with-template.png",
|
||||
dark: "img/prompt-with-template.png",
|
||||
}}
|
||||
style={{ width: "50%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
---
|
||||
|
||||
## Text Input
|
||||
|
||||
The **Text Input** component adds an **Input** field on the Playground. This enables you to define parameters while running and testing your flow.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Value:** Specifies the text input value. This is where the user inputs text data that will be passed to the next component in the sequence. If no value is provided, it defaults to an empty string.
|
||||
- **Record Template:** Specifies how a `Record` should be converted into `Text`.
|
||||
|
||||
The **Record Template** field is used to specify how a `Record` should be converted into `Text`. This is particularly useful when you want to extract specific information from a `Record` and pass it as text to the next component in the sequence.
|
||||
|
||||
For example, if you have a `Record` with the following structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "John Doe",
|
||||
"age": 30,
|
||||
"email": "johndoe@email.com"
|
||||
}
|
||||
```
|
||||
|
||||
A template with `Name: {name}, Age: {age}` will convert the `Record` into a text string of `Name: John Doe, Age: 30`.
|
||||
|
||||
If you pass more than one `Record`, the text will be concatenated with a new line separator.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/text-input-expanded.png",
|
||||
dark: "img/text-input-expanded.png",
|
||||
}}
|
||||
style={{ width: "50%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
|
|
@ -1,73 +0,0 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# I/O
|
||||
|
||||
### ChatInput
|
||||
|
||||
This component is designed to get user input from the chat.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Sender Type:** specifies the sender type. Defaults to _`"User"`_. Options are _`"Machine"`_ and _`"User"`_.
|
||||
|
||||
- **Sender Name:** specifies the name of the sender. Defaults to _`"User"`_.
|
||||
|
||||
- **Message:** specifies the message text. It is a multiline text input.
|
||||
|
||||
- **Session ID:** specifies the session ID of the chat history. If provided, the message will be saved in the Message History.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
If _`As Record`_ is _`true`_ and the _`Message`_ is a _`Record`_, the data of the _`Record`_ will be updated with the _`Sender`_, _`Sender Name`_, and _`Session ID`_.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
### ChatOutput
|
||||
|
||||
This component is designed to send a message to the chat.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Sender Type:** specifies the sender type. Defaults to _`"Machine"`_. Options are _`"Machine"`_ and _`"User"`_.
|
||||
|
||||
- **Sender Name:** specifies the name of the sender. Defaults to _`"AI"`_.
|
||||
|
||||
- **Session ID:** specifies the session ID of the chat history. If provided, the message will be saved in the Message History.
|
||||
|
||||
- **Message:** specifies the message text.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
If _`As Record`_ is _`true`_ and the _`Message`_ is a _`Record`_, the data of the _`Record`_ will be updated with the _`Sender`_, _`Sender Name`_, and _`Session ID`_.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
|
||||
### TextInput
|
||||
|
||||
This component is designed for simple text input, allowing users to pass textual data to subsequent components in the workflow. It's particularly useful for scenarios where a brief user input is required to initiate or influence the flow.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Value:** Specifies the text input value. This is where the user can input the text data that will be passed to the next component in the sequence. If no value is provided, it defaults to an empty string.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
The `TextInput` component serves as a straightforward means for setting Text input values in the chat window. It ensures that textual data can be seamlessly passed to subsequent components in the flow.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
### TextOutput
|
||||
|
||||
This component is designed to display text data to the user. It's particularly useful for scenarios where you don't want to send the text data to the chat, but still want to display it.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Value:** Specifies the text data to be displayed. This is where the text data to be displayed is provided. If no value is provided, it defaults to an empty string.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
The `TextOutput` component serves as a straightforward means for displaying text data. It ensures that textual data can be seamlessly observed in the chat window throughout your flow.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
|
@ -1,221 +0,0 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# LLMs
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
An LLM stands for Large Language Model. It is a core component of Langflow and provides a standard interface for interacting with different LLMs from various providers such as OpenAI, Cohere, and HuggingFace. LLMs are used widely throughout Langflow, including in chains and agents. They can be used to generate text based on a given prompt (or input).
|
||||
|
||||
---
|
||||
|
||||
### Anthropic
|
||||
|
||||
Wrapper around Anthropic's large language models. Find out more at [Anthropic](https://www.anthropic.com).
|
||||
|
||||
- **anthropic_api_key:** Used to authenticate and authorize access to the Anthropic API.
|
||||
|
||||
- **anthropic_api_url:** Specifies the URL of the Anthropic API to connect to.
|
||||
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value.
|
||||
|
||||
---
|
||||
|
||||
### ChatAnthropic
|
||||
|
||||
Wrapper around Anthropic's large language model used for chat-based interactions. Find out more at [Anthropic](https://www.anthropic.com).
|
||||
|
||||
- **anthropic_api_key:** Used to authenticate and authorize access to the Anthropic API.
|
||||
|
||||
- **anthropic_api_url:** Specifies the URL of the Anthropic API to connect to.
|
||||
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value.
|
||||
|
||||
---
|
||||
|
||||
### CTransformers
|
||||
|
||||
The `CTransformers` component provides access to the Transformer models implemented in C/C++ using the [GGML](https://github.com/ggerganov/ggml) library.
|
||||
|
||||
<Admonition type="info">
|
||||
|
||||
Make sure to have the `ctransformers` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/marella/ctransformers).
|
||||
</Admonition>
|
||||
|
||||
**config:** Configuration for the Transformer models. Check out [config](https://github.com/marella/ctransformers#config). Defaults to:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
"top_k": 40,
|
||||
|
||||
"top_p": 0.95,
|
||||
|
||||
"temperature": 0.8,
|
||||
|
||||
"repetition_penalty": 1.1,
|
||||
|
||||
"last_n_tokens": 64,
|
||||
|
||||
"seed": -1,
|
||||
|
||||
"max_new_tokens": 256,
|
||||
|
||||
"stop": null,
|
||||
|
||||
"stream": false,
|
||||
|
||||
"reset": true,
|
||||
|
||||
"batch_size": 8,
|
||||
|
||||
"threads": -1,
|
||||
|
||||
"context_length": -1,
|
||||
|
||||
"gpu_layers": 0
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
**model:** The path to a model file or directory or the name of a Hugging Face Hub model repo.
|
||||
|
||||
**model_file:** The name of the model file in the repo or directory.
|
||||
|
||||
**model_type:** Transformer model to be used. Learn more [here](https://github.com/marella/ctransformers).
|
||||
|
||||
---
|
||||
|
||||
### ChatOpenAI
|
||||
|
||||
Wrapper around [OpenAI's](https://openai.com) chat large language models. This component supports some of the LLMs (Large Language Models) available by OpenAI and is used for tasks such as chatbots, Generative Question-Answering (GQA), and summarization.
|
||||
|
||||
- **max_tokens:** The maximum number of tokens to generate in the completion. `-1` returns as many tokens as possible, given the prompt and the model's maximal context size – defaults to `256`.
|
||||
- **model_kwargs:** Holds any model parameters valid for creating non-specified calls.
|
||||
- **model_name:** Defines the OpenAI chat model to be used.
|
||||
- **openai_api_base:** Used to specify the base URL for the OpenAI API. It is typically set to the API endpoint provided by the OpenAI service.
|
||||
- **openai_api_key:** Key used to authenticate and access the OpenAI API.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0.7`.
|
||||
|
||||
---
|
||||
|
||||
### Cohere
|
||||
|
||||
Wrapper around [Cohere's](https://cohere.com) large language models.
|
||||
|
||||
- **cohere_api_key:** Holds the API key required to authenticate with the Cohere service.
|
||||
- **max_tokens:** Maximum number of tokens to predict per generation – defaults to `256`.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0.75`.
|
||||
|
||||
---
|
||||
|
||||
### HuggingFaceHub
|
||||
|
||||
Wrapper around [HuggingFace](https://www.huggingface.co/models) models.
|
||||
|
||||
<Admonition type="info">
|
||||
The HuggingFace Hub is an online platform that hosts over 120k models, 20k datasets, and 50k demo apps, all of which are open-source and publicly available. Discover more at [HuggingFace](http://www.huggingface.co).
|
||||
</Admonition>
|
||||
|
||||
- **huggingfacehub_api_token:** Token needed to authenticate the API.
|
||||
- **model_kwargs:** Keyword arguments to pass to the model.
|
||||
- **repo_id:** Model name to use – defaults to `gpt2`.
|
||||
- **task:** Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.
|
||||
|
||||
---
|
||||
|
||||
### LlamaCpp
|
||||
|
||||
The `LlamaCpp` component provides access to the `llama.cpp` models.
|
||||
|
||||
<Admonition type="info">
|
||||
Make sure to have the `llama.cpp` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/ggerganov/llama.cpp).
|
||||
</Admonition>
|
||||
|
||||
- **echo:** Whether to echo the prompt – defaults to `False`.
|
||||
- **f16_kv:** Use half-precision for key/value cache – defaults to `True`.
|
||||
- **last_n_tokens_size:** The number of tokens to look back at when applying the repeat_penalty. Defaults to `64`.
|
||||
- **logits_all:** Return logits for all tokens, not just the last token Defaults to `False`.
|
||||
- **logprobs:** The number of logprobs to return. If None, no logprobs are returned.
|
||||
- **lora_base:** The path to the Llama LoRA base model.
|
||||
- **lora_path:** The path to the Llama LoRA. If None, no LoRa is loaded.
|
||||
- **max_tokens:** The maximum number of tokens to generate. Defaults to `256`.
|
||||
- **model_path:** The path to the Llama model file.
|
||||
- **n_batch:** Number of tokens to process in parallel. Should be a number between 1 and n_ctx. Defaults to `8`.
|
||||
- **n_ctx:** Token context window. Defaults to `512`.
|
||||
- **n_gpu_layers:** Number of layers to be loaded into GPU memory. Default None.
|
||||
- **n_parts:**Number of parts to split the model into. If -1, the number of parts is automatically determined. Defaults to `-1`.
|
||||
- **n_threads:** Number of threads to use. If None, the number of threads is automatically determined.
|
||||
- **repeat_penalty:** The penalty to apply to repeated tokens. Defaults to `1.1`.
|
||||
- **seed:** Seed. If -1, a random seed is used. Defaults to `-1`.
|
||||
- **stop:** A list of strings to stop generation when encountered.
|
||||
- **streaming:** Whether to stream the results, token by token. Defaults to `True`.
|
||||
- **suffix:** A suffix to append to the generated text. If None, no suffix is appended.
|
||||
- **tags:** Tags to add to the run trace.
|
||||
- **temperature:** The temperature to use for sampling. Defaults to `0.8`.
|
||||
- **top_k:** The top-k value to use for sampling. Defaults to `40`.
|
||||
- **top_p:** The top-p value to use for sampling. Defaults to `0.95`.
|
||||
- **use_mlock:** Force the system to keep the model in RAM. Defaults to `False`.
|
||||
- **use_mmap:** Whether to keep the model loaded in RAM. Defaults to `True`.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output. Defaults to `False`.
|
||||
- **vocab_only:** Only load the vocabulary, no weights. Defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### OpenAI
|
||||
|
||||
Wrapper around [OpenAI's](https://openai.com) large language models.
|
||||
|
||||
- **max_tokens:** The maximum number of tokens to generate in the completion. `-1` returns as many tokens as possible, given the prompt and the model's maximal context size – defaults to `256`.
|
||||
- **model_kwargs:** Holds any model parameters valid for creating non-specified calls.
|
||||
- **model_name:** Defines the OpenAI model to be used.
|
||||
- **openai_api_base:** Used to specify the base URL for the OpenAI API. It is typically set to the API endpoint provided by the OpenAI service.
|
||||
- **openai_api_key:** Key used to authenticate and access the OpenAI API.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0.7`.
|
||||
|
||||
---
|
||||
|
||||
### VertexAI
|
||||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
|
||||
|
||||
<Admonition type="info">
|
||||
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
|
||||
</Admonition>
|
||||
|
||||
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
|
||||
- **location:** The default location to use when making API calls – defaults to `us-central1`.
|
||||
- **max_output_tokens:** Token limit determines the maximum amount of text output from one prompt – defaults to `128`.
|
||||
- **model_name:** The name of the Vertex AI large language model – defaults to `text-bison`.
|
||||
- **project:** The default GCP project to use when making Vertex API calls.
|
||||
- **request_parallelism:** The amount of parallelism allowed for requests issued to VertexAI models – defaults to `5`.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0`.
|
||||
- **top_k:** How the model selects tokens for output, the next token is selected from – defaults to `40`.
|
||||
- **top_p:** Tokens are selected from most probable to least until the sum of their – defaults to `0.95`.
|
||||
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### ChatVertexAI
|
||||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
|
||||
|
||||
<Admonition type="info">
|
||||
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
|
||||
</Admonition>
|
||||
|
||||
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
|
||||
- **location:** The default location to use when making API calls – defaults to `us-central1`.
|
||||
- **max_output_tokens:** Token limit determines the maximum amount of text output from one prompt – defaults to `128`.
|
||||
- **model_name:** The name of the Vertex AI large language model – defaults to `text-bison`.
|
||||
- **project:** The default GCP project to use when making Vertex API calls.
|
||||
- **request_parallelism:** The amount of parallelism allowed for requests issued to VertexAI models – defaults to `5`.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0`.
|
||||
- **top_k:** How the model selects tokens for output, the next token is selected from – defaults to `40`.
|
||||
- **top_p:** Tokens are selected from most probable to least until the sum of their – defaults to `0.95`.
|
||||
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
|
|
@ -4,125 +4,124 @@ import Admonition from '@theme/Admonition';
|
|||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
Thanks for your patience as we improve our documentation—it might have some rough edges. Share your feedback or report issues to help us enhance it! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
Memory is a concept in chat-based applications that allows the system to remember previous interactions. It helps in maintaining the context of the conversation and enables the system to understand new messages in relation to past messages.
|
||||
Memory is a concept in chat-based applications that allows the system to remember previous interactions. This capability helps maintain the context of the conversation and enables the system to understand new messages in light of past messages.
|
||||
|
||||
---
|
||||
|
||||
### MessageHistory
|
||||
|
||||
This component is designed to retrieve stored messages based on various filters such as sender type, sender name, session ID, and a specific file path where messages are stored. It allows for a flexible retrieval of chat history, providing insights into past interactions.
|
||||
This component retrieves stored messages using various filters such as sender type, sender name, session ID, and the specific file path where messages are stored. It offers flexible retrieval of chat history, providing insights into past interactions.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **Sender Type:** (Optional) Specifies the type of the sender. Options are _`"Machine"`_, _`"User"`_, or _`"Machine and User"`_. Filters the messages by the type of the sender.
|
||||
|
||||
- **Sender Name:** (Optional) Specifies the name of the sender. Filters the messages by the name of the sender.
|
||||
|
||||
- **Session ID:** (Optional) Specifies the session ID of the chat history. Filters the messages belonging to a specific session.
|
||||
|
||||
- **Number of Messages:** Specifies the number of messages to retrieve. Defaults to _`5`_. Determines how many recent messages from the chat history to fetch.
|
||||
- **sender_type** (optional): Specifies the sender's type. Options include `"Machine"`, `"User"`, or `"Machine and User"`. Filters messages by the sender type.
|
||||
- **sender_name** (optional): Specifies the sender's name. Filters messages by the sender's name.
|
||||
- **session_id** (optional): Specifies the session ID of the chat history. Filters messages by session.
|
||||
- **number_of_messages**: Specifies the number of messages to retrieve. Defaults to `5`. Determines the number of recent messages from the chat history to fetch.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
The component retrieves messages based on the provided criteria, including the specific file path for stored messages. If no specific criteria are provided, it will return the most recent messages up to the specified limit. This component can be used to review past interactions and analyze the flow of conversations.
|
||||
The component retrieves messages based on the provided criteria, including the specific file path for stored messages. If no specific criteria are provided, it returns the most recent messages up to the specified limit. This component can be used to review past interactions and analyze conversation flows.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
### ConversationBufferMemory
|
||||
|
||||
The `ConversationBufferMemory` component is a type of memory system that plainly stores the last few inputs and outputs of a conversation.
|
||||
The `ConversationBufferMemory` component stores the last few inputs and outputs of a conversation.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **input_key:** Used to specify the key under which the user input will be stored in the conversation memory. It allows you to provide the user's input to the chain for processing and generating a response.
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model – defaults to `chat_history`.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
- **input_key**: Specifies the key under which the user input will be stored in the conversation memory.
|
||||
- **memory_key**: Specifies the prompt variable name where the memory will store and retrieve chat messages. Defaults to `chat_history`.
|
||||
- **output_key**: Specifies the key under which the generated response will be stored.
|
||||
- **return_messages**: Determines whether the history should be returned as a string or as a list of messages. The default is `False`.
|
||||
|
||||
---
|
||||
|
||||
### ConversationBufferWindowMemory
|
||||
|
||||
`ConversationBufferWindowMemory` is a variation of the `ConversationBufferMemory` that maintains a list of the recent interactions in a conversation. It only keeps the last K interactions in memory, which can be useful for maintaining a sliding window of the most recent interactions without letting the buffer get too large.
|
||||
`ConversationBufferWindowMemory` is a variant of the `ConversationBufferMemory` that keeps only the last K interactions in memory. It's useful for maintaining a sliding window of recent interactions without letting the buffer get too large.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model. Defaults to `chat_history`.
|
||||
- **k:** Used to specify the number of interactions or messages that should be stored in the conversation buffer. It determines the size of the sliding window that keeps track of the most recent interactions.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
- **input_key**: Specifies the keys in the memory object where input messages are stored.
|
||||
- **memory_key**: Specifies the prompt variable name for storing and retrieving chat messages. Defaults to `chat_history`.
|
||||
- **k**: Specifies the number of interactions or messages to be stored in the conversation buffer.
|
||||
- **output_key**: Specifies the key under which the generated response will be stored.
|
||||
- **return_messages**: Determines whether the history should be returned as a string or as a list of messages. The default is `False`.
|
||||
|
||||
---
|
||||
|
||||
### ConversationEntityMemory
|
||||
|
||||
The `ConversationEntityMemory` component incorporates intricate memory structures, specifically a key-value store, for entities referenced in a conversation. This facilitates the storage and retrieval of information related to entities that have been mentioned throughout the conversation.
|
||||
The `ConversationEntityMemory` component uses a key-value store to manage entities mentioned in conversations. This structure enhances the storage and retrieval of information about specific entities.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **Entity Store:** Structure that stores information about specific entities mentioned in a conversation.
|
||||
- **LLM:** Language Model to use in the `ConversationEntityMemory`.
|
||||
- **chat_history_key:** Specify a unique identifier for the chat history data associated with a particular entity. This allows for organizing and accessing the chat history data for each entity within the conversation entity memory. Defaults to `history`
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **k:** Refers to the number of entities that can be stored in the memory. It determines the maximum number of entities that can be stored and retrieved from the memory object. Defaults to `10`
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
- **entity_store**: A structure that stores information about entities mentioned in a conversation.
|
||||
- **LLM**: Specifies the language model used in the `ConversationEntityMemory`.
|
||||
- **chat_history_key**: A unique identifier for the chat history data associated with a particular entity. This key helps organize and access chat history data for each entity within the memory. Defaults to `history`.
|
||||
- **input_key**: Identifies where input messages are stored in the memory object, allowing for their retrieval and manipulation.
|
||||
- **k**: Specifies the maximum number of entities that can be stored and retrieved from the memory. Defaults to `10`.
|
||||
- **output_key**: Identifies the key under which the generated response is stored, enabling retrieval using this key.
|
||||
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### ConversationKGMemory
|
||||
|
||||
`ConversationKGMemory` is a type of memory that uses a knowledge graph to recreate memory. It allows the extraction of entities and knowledge triplets from a new message, using previous messages as context.
|
||||
The `ConversationKGMemory` utilizes a knowledge graph to enhance memory capabilities. It extracts entities and knowledge triplets from new messages, using previous messages as context.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **LLM:** Language Model to use in the `ConversationKGMemory`.
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **k:** Represents the number of previous conversation turns that will be stored in the memory. By setting "k" to 2, it means that the memory will retain the previous 2 conversation turns, allowing the model to access and utilize the information from those turns during the conversation. Defaults to `10`
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model. Defaults to `chat_history`.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
- **LLM**: Specifies the language model used in the `ConversationKGMemory`.
|
||||
- **input_key**: Identifies where input messages are stored in the memory object, facilitating their retrieval and manipulation.
|
||||
- **k**: Indicates the number of previous conversation turns stored in memory, allowing the model to utilize information from these turns. Defaults to `10`.
|
||||
- **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`.
|
||||
- **output_key**: Identifies the key under which the generated response
|
||||
|
||||
is stored, enabling retrieval using this key.
|
||||
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### ConversationSummaryMemory
|
||||
|
||||
The `ConversationSummaryMemory` is a memory component that creates a summary of the conversation over time. It condenses information from the conversation and stores the current summary in memory. It is particularly useful for longer conversations where keeping the entire message history in the prompt would take up too many tokens.
|
||||
The `ConversationSummaryMemory` summarizes conversations over time, condensing information and storing it efficiently. It's particularly useful for long conversations.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **LLM:** Language Model to use in the `ConversationSummaryMemory`.
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model. Defaults to `chat_history`.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
- **LLM**: Specifies the language model used in the `ConversationSummaryMemory`.
|
||||
- **input_key**: Identifies where input messages are stored in the memory object, facilitating their retrieval and manipulation.
|
||||
- **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`.
|
||||
- **output_key**: Identifies the key under which the generated response is stored, enabling retrieval using this key.
|
||||
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### PostgresChatMessageHistory
|
||||
|
||||
The `PostgresChatMessageHistory` is a memory component that allows for the storage and retrieval of chat message history using a PostgreSQL database. The connection to the PostgreSQL database is established using a connection string, which includes the necessary authentication and database information.
|
||||
The `PostgresChatMessageHistory` component uses a PostgreSQL database to store and retrieve chat message history.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **connection_string:** Refers to a string that contains the necessary information to establish a connection to a PostgreSQL database. The `connection_string` typically includes details such as the username, password, host, port, and database name required to connect to the PostgreSQL database. Defaults to `postgresql://postgres:mypassword@localhost/chat_history`
|
||||
- **session_id:** It is a unique identifier that is used to associate chat message history with a specific session or conversation.
|
||||
- **table_name:** Refers to the name of the table in the PostgreSQL database where the chat message history will be stored. Defaults to `message_store`
|
||||
- **connection_string**: Specifies the details needed to connect to the PostgreSQL database, including username, password, host, port, and database name. Defaults to `postgresql://postgres:mypassword@localhost/chat_history`.
|
||||
- **session_id**: A unique identifier used to link chat message history with a specific session or conversation.
|
||||
- **table_name**: The name of the PostgreSQL database table where chat message history is stored. Defaults to `message_store`.
|
||||
|
||||
---
|
||||
|
||||
### VectorRetrieverMemory
|
||||
|
||||
The `VectorRetrieverMemory` is a memory component that allows for the retrieval of vectors based on a given query. It is used to perform vector-based searches and retrievals.
|
||||
The `VectorRetrieverMemory` retrieves vectors based on queries, facilitating vector-based searches and retrievals.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **Retriever:** The retriever used to fetch documents.
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model – defaults to `chat_history`.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string – defaults to `False`.
|
||||
- **Retriever**: The tool used to fetch documents.
|
||||
- **input_key**: Identifies where input messages are stored in the memory object, facilitating their retrieval and manipulation.
|
||||
- **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`.
|
||||
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.
|
||||
143
docs/docs/components/model_specs.mdx
Normal file
143
docs/docs/components/model_specs.mdx
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Large Language Models (LLMs)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
Thank you for your patience as we refine our documentation. You might encounter some inconsistencies. Please help us improve by sharing your feedback or reporting any issues! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
A Large Language Model (LLM) is a foundational component of Langflow. It provides a uniform interface for interacting with LLMs from various providers, including OpenAI, Cohere, and HuggingFace. Langflow extensively uses LLMs across its chains and agents, employing them to generate text based on specific prompts or inputs.
|
||||
|
||||
---
|
||||
|
||||
### Anthropic
|
||||
|
||||
This is a wrapper for Anthropic's large language models. Learn more at [Anthropic](https://www.anthropic.com).
|
||||
|
||||
- **anthropic_api_key:** This key authenticates and authorizes access to the Anthropic API.
|
||||
- **anthropic_api_url:** This URL connects to the Anthropic API.
|
||||
- **temperature:** This parameter adjusts the randomness level in text generation. Set this to a non-negative number.
|
||||
|
||||
---
|
||||
|
||||
### ChatAnthropic
|
||||
|
||||
This is a wrapper for Anthropic's large language model designed for chat-based interactions. Learn more at [Anthropic](https://www.anthropic.com).
|
||||
|
||||
- **anthropic_api_key:** This key authenticates and authorizes access to the Anthropic API.
|
||||
- **anthropic_api_url:** This URL connects to the Anthropic API.
|
||||
- **temperature:** This parameter adjusts the randomness level in text generation. Set this to a non-negative number.
|
||||
|
||||
---
|
||||
|
||||
### CTransformers
|
||||
|
||||
`CTransformers` provides access to Transformer models implemented in C/C++ using the [GGML](https://github.com/ggerganov/ggml) library.
|
||||
|
||||
<Admonition type="info">
|
||||
Ensure the `ctransformers` Python package is installed. Discover more about installation, supported models, and usage [here](https://github.com/marella/ctransformers).
|
||||
</Admonition>
|
||||
|
||||
- **config:** This configuration is for the Transformer models. Check the default settings and possible configurations at [config](https://github.com/marella/ctransformers#config).
|
||||
|
||||
```json
|
||||
{
|
||||
"top_k": 40,
|
||||
"top_p": 0.95,
|
||||
"temperature": 0.8,
|
||||
"repetition_penalty": 1.1,
|
||||
"last_n_tokens": 64,
|
||||
"seed": -1,
|
||||
"max_new_tokens": 256,
|
||||
"stop": null,
|
||||
"stream": false,
|
||||
"reset": true,
|
||||
"batch_size": 8,
|
||||
"threads": -1,
|
||||
"context_length": -1,
|
||||
"gpu_layers": 0
|
||||
}
|
||||
```
|
||||
|
||||
- **model**: The file path, directory, or Hugging Face Hub model repository name.
|
||||
- **model_file**: The specific model file name within the repository or directory.
|
||||
- **model_type**: The type of transformer model used. For further information, visit [ctransformers](https://github.com/marella/ctransformers).
|
||||
|
||||
### ChatOpenAI Component
|
||||
|
||||
This component interfaces with [OpenAI's](https://openai.com) large language models, supporting a variety of tasks such as chatbots, generative question-answering, and summarization.
|
||||
|
||||
- **max_tokens**: The maximum number of tokens to generate for each completion. Set to `-1` to generate as many tokens as possible, based on the model's context size. The default is `256`.
|
||||
- **model_kwargs**: A dictionary containing any additional model parameters for undefined calls.
|
||||
- **model_name**: Specifies the OpenAI chat model in use.
|
||||
- **openai_api_base**: The base URL for accessing the OpenAI API.
|
||||
- **openai_api_key**: The API key required for authentication with the OpenAI API.
|
||||
- **temperature**: Adjusts the randomness level of the text generation. This should be a non-negative number, defaulting to `0.7`.
|
||||
|
||||
### Cohere Component
|
||||
|
||||
A wrapper for accessing [Cohere's](https://cohere.com) large language models.
|
||||
|
||||
- **cohere_api_key**: The API key needed for Cohere service authentication.
|
||||
- **max_tokens**: The limit on the number of tokens to generate per request, defaulting to `256`.
|
||||
- **temperature**: Adjusts the randomness level in text generations. This should be a non-negative number, defaulting to `0.75`.
|
||||
|
||||
### HuggingFaceHub Component
|
||||
|
||||
A component facilitating access to models hosted on the [HuggingFace Hub](https://www.huggingface.co/models).
|
||||
|
||||
- **huggingfacehub_api_token**: The token required for API authentication.
|
||||
- **model_kwargs**: Parameters passed to the model.
|
||||
- **repo_id**: Specifies the model repository, defaulting to `gpt2`.
|
||||
- **task**: The specific task to execute with the model, returning either `generated_text` or `summary_text`.
|
||||
|
||||
### LlamaCpp Component
|
||||
|
||||
This component provides access to `llama.cpp` models, ensuring high performance and flexibility.
|
||||
|
||||
- **echo**: Whether to echo the input prompt, defaulting to `False`.
|
||||
- **f16_kv**: Indicates if half-precision should be used for the key/value cache, defaulting to `True`.
|
||||
- **last_n_tokens_size**: The lookback size for applying repeat penalties, defaulting to `64`.
|
||||
- **logits_all**: Whether to return logits for all tokens or just the last one, defaulting to `False`.
|
||||
- **logprobs**: The number of log probabilities to return. If set to None, no probabilities are returned.
|
||||
- **lora_base**: The path to the base Llama LoRA model.
|
||||
- **lora_path**: The specific path to the Llama LoRA model. If set to None, no LoRA model is loaded.
|
||||
- **max_tokens**: The maximum number of tokens to generate in one session, defaulting to `256`.
|
||||
- **model_path**: The file path to the Llama model.
|
||||
- **n_batch**: The number of tokens processed in parallel, defaulting to `8`.
|
||||
- **n_ctx**: The context window size for tokens, defaulting to `512`.
|
||||
- **repeat_penalty**: The penalty applied to repeated tokens, defaulting to `1.1`.
|
||||
- **seed**: The seed for random number generation. If set to `-1`, a random seed is used.
|
||||
- **stop**: A list of stop strings that terminate generation when encountered.
|
||||
- **streaming**: Indicates whether to stream results token by token, defaulting to `True`.
|
||||
- **suffix**: A suffix appended to generated text. If None, no suffix is appended.
|
||||
- **tags**: Tags added to the execution trace for monitoring.
|
||||
- **temperature**: The sampling temperature, defaulting to `0.8`.
|
||||
- **top_k**: The top-k sampling setting, defaulting to `40`.
|
||||
- **top_p**: The cumulative probability threshold for top-p sampling, defaulting to `0.95`.
|
||||
- **use_mlock**: Forces the system to retain the model in RAM, defaulting to `False`.
|
||||
- **use_mmap**: Indicates whether to maintain the model loaded in RAM, defaulting to `True`.
|
||||
- **verbose**: Controls the verbosity of output details. When enabled, it provides insights into internal states to aid debugging and understanding, defaulting to `False`.
|
||||
- **vocab_only**: Loads only the vocabulary without model weights, defaulting to `False`.
|
||||
|
||||
### VertexAI Component
|
||||
|
||||
This component integrates with [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models to enhance AI capabilities.
|
||||
|
||||
- **credentials**: Custom
|
||||
|
||||
credentials used for API interactions.
|
||||
- **location**: The default location for API calls, defaulting to `us-central1`.
|
||||
- **max_output_tokens**: Limits the output tokens per prompt, defaulting to `128`.
|
||||
- **model_name**: The name of the Vertex AI model in use, defaulting to `text-bison`.
|
||||
- **project**: The default Google Cloud Platform project for API calls.
|
||||
- **request_parallelism**: The level of request parallelism for VertexAI model interactions, defaulting to `5`.
|
||||
- **temperature**: Adjusts the randomness level in text generations, defaulting to `0`.
|
||||
- **top_k**: The setting for selecting the top-k tokens for outputs.
|
||||
- **top_p**: The threshold for summing probabilities of the most likely tokens, defaulting to `0.95`.
|
||||
- **tuned_model_name**: Specifies a tuned model name, which overrides the default model name if provided.
|
||||
- **verbose**: Controls the output verbosity to assist in debugging and understanding the operational details, defaulting to `False`.
|
||||
|
||||
---
|
||||
350
docs/docs/components/models.mdx
Normal file
350
docs/docs/components/models.mdx
Normal file
|
|
@ -0,0 +1,350 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Models
|
||||
|
||||
### Amazon Bedrock
|
||||
|
||||
This component facilitates the generation of text using the LLM (Large Language Model) model from Amazon Bedrock.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
- **Model ID (Optional):** Specifies the model ID to be used for text generation. Defaults to _`"anthropic.claude-instant-v1"`_. Available options include:
|
||||
|
||||
- _`"ai21.j2-grande-instruct"`_
|
||||
- _`"ai21.j2-jumbo-instruct"`_
|
||||
- _`"ai21.j2-mid"`_
|
||||
- _`"ai21.j2-mid-v1"`_
|
||||
- _`"ai21.j2-ultra"`_
|
||||
- _`"ai21.j2-ultra-v1"`_
|
||||
- _`"anthropic.claude-instant-v1"`_
|
||||
- _`"anthropic.claude-v1"`_
|
||||
- _`"anthropic.claude-v2"`_
|
||||
- _`"cohere.command-text-v14"`_
|
||||
|
||||
- **Credentials Profile Name (Optional):** Specifies the name of the credentials profile.
|
||||
|
||||
- **Region Name (Optional):** Specifies the region name.
|
||||
|
||||
- **Model Kwargs (Optional):** Additional keyword arguments for the model.
|
||||
|
||||
- **Endpoint URL (Optional):** Specifies the endpoint URL.
|
||||
|
||||
- **Streaming (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
- **Cache (Optional):** Specifies whether to cache the response.
|
||||
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
Ensure that necessary credentials are provided to connect to the Amazon
|
||||
Bedrock API. If connection fails, a ValueError will be raised.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
### Anthropic
|
||||
|
||||
This component allows the generation of text using Anthropic Chat&Completion large language models.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Model Name:** Specifies the name of the Anthropic model to be used for text generation. Available options include:
|
||||
|
||||
- _`"claude-2.1"`_
|
||||
- _`"claude-2.0"`_
|
||||
- _`"claude-instant-1.2"`_
|
||||
- _`"claude-instant-1"`_
|
||||
|
||||
- **Anthropic API Key:** Your Anthropic API key.
|
||||
|
||||
- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to _`256`_.
|
||||
|
||||
- **Temperature (Optional):** Specifies the sampling temperature. Defaults to _`0.7`_.
|
||||
|
||||
- **API Endpoint (Optional):** Specifies the endpoint of the Anthropic API. Defaults to _`"https://api.anthropic.com"`_ if not specified.
|
||||
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
For detailed documentation and integration guides, please refer to the [Anthropic Component Documentation](https://python.langchain.com/docs/integrations/chat/anthropic).
|
||||
|
||||
---
|
||||
|
||||
### Azure OpenAI
|
||||
|
||||
This component allows the generation of text using the LLM (Large Language Model) model from Azure OpenAI.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Model Name:** Specifies the name of the Azure OpenAI model to be used for text generation. Available options include:
|
||||
|
||||
- _`"gpt-35-turbo"`_
|
||||
- _`"gpt-35-turbo-16k"`_
|
||||
- _`"gpt-35-turbo-instruct"`_
|
||||
- _`"gpt-4"`_
|
||||
- _`"gpt-4-32k"`_
|
||||
- _`"gpt-4-vision"`_
|
||||
|
||||
- **Azure Endpoint:** Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/`.
|
||||
|
||||
- **Deployment Name:** Specifies the name of the deployment.
|
||||
|
||||
- **API Version:** Specifies the version of the Azure OpenAI API to be used. Available options include:
|
||||
|
||||
- _`"2023-03-15-preview"`_
|
||||
- _`"2023-05-15"`_
|
||||
- _`"2023-06-01-preview"`_
|
||||
- _`"2023-07-01-preview"`_
|
||||
- _`"2023-08-01-preview"`_
|
||||
- _`"2023-09-01-preview"`_
|
||||
- _`"2023-12-01-preview"`_
|
||||
|
||||
- **API Key:** Your Azure OpenAI API key.
|
||||
|
||||
- **Temperature (Optional):** Specifies the sampling temperature. Defaults to _`0.7`_.
|
||||
|
||||
- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to _`1000`_.
|
||||
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
For detailed documentation and integration guides, please refer to the [Azure OpenAI Component Documentation](https://python.langchain.com/docs/integrations/llms/azure_openai).
|
||||
|
||||
---
|
||||
|
||||
### Cohere
|
||||
|
||||
This component enables text generation using Cohere large language models.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Cohere API Key:** Your Cohere API key.
|
||||
|
||||
- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to _`256`_.
|
||||
|
||||
- **Temperature (Optional):** Specifies the sampling temperature. Defaults to _`0.75`_.
|
||||
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
### Google Generative AI
|
||||
|
||||
This component enables text generation using Google Generative AI.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Google API Key:** Your Google API key to use for the Google Generative AI.
|
||||
|
||||
- **Model:** The name of the model to use. Supported examples are _`"gemini-pro"`_ and _`"gemini-pro-vision"`_.
|
||||
|
||||
- **Max Output Tokens (Optional):** The maximum number of tokens to generate.
|
||||
|
||||
- **Temperature:** Run inference with this temperature. Must be in the closed interval [0.0, 1.0].
|
||||
|
||||
- **Top K (Optional):** Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.
|
||||
|
||||
- **Top P (Optional):** The maximum cumulative probability of tokens to consider when sampling.
|
||||
|
||||
- **N (Optional):** Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.
|
||||
|
||||
- **Input Value:** The input to the model.
|
||||
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
### Hugging Face API
|
||||
|
||||
This component facilitates text generation using LLM models from the Hugging Face Inference API.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Endpoint URL:** The URL of the Hugging Face Inference API endpoint. Should be provided along with necessary authentication credentials.
|
||||
|
||||
- **Task:** Specifies the task for text generation. Options include _`"text2text-generation"`_, _`"text-generation"`_, and _`"summarization"`_.
|
||||
|
||||
- **API Token:** The API token required for authentication with the Hugging Face Hub.
|
||||
|
||||
- **Model Keyword Arguments (Optional):** Additional keyword arguments for the model. Should be provided as a Python dictionary.
|
||||
|
||||
- **Input Value:** The input text for text generation.
|
||||
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
### LiteLLM Model
|
||||
|
||||
Generates text using the `LiteLLM` collection of large language models.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Model name:** The name of the model to use. For example, `gpt-3.5-turbo`. (Type: str)
|
||||
- **API key:** The API key to use for accessing the provider's API. (Type: str, Optional)
|
||||
- **Provider:** The provider of the API key. (Type: str, Choices: "OpenAI", "Azure", "Anthropic", "Replicate", "Cohere", "OpenRouter")
|
||||
- **Temperature:** Controls the randomness of the text generation. (Type: float, Default: 0.7)
|
||||
- **Model kwargs:** Additional keyword arguments for the model. (Type: Dict, Optional)
|
||||
- **Top p:** Filter responses to keep the cumulative probability within the top p tokens. (Type: float, Optional)
|
||||
- **Top k:** Filter responses to only include the top k tokens. (Type: int, Optional)
|
||||
- **N:** Number of chat completions to generate for each prompt. (Type: int, Default: 1)
|
||||
- **Max tokens:** The maximum number of tokens to generate for each chat completion. (Type: int, Default: 256)
|
||||
- **Max retries:** Maximum number of retries for failed requests. (Type: int, Default: 6)
|
||||
- **Verbose:** Whether to print verbose output. (Type: bool, Default: False)
|
||||
- **Input:** The input prompt for text generation. (Type: str)
|
||||
- **Stream:** Whether to stream the output. (Type: bool, Default: False)
|
||||
- **System message:** System message to pass to the model. (Type: str, Optional)
|
||||
|
||||
---
|
||||
|
||||
### Ollama
|
||||
|
||||
Generate text using Ollama Local LLMs.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Base URL:** Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.
|
||||
- **Model Name:** The model name to use. Refer to [Ollama Library](https://ollama.ai/library) for more models.
|
||||
- **Temperature:** Controls the creativity of model responses. (Default: 0.8)
|
||||
- **Cache:** Enable or disable caching. (Default: False)
|
||||
- **Format:** Specify the format of the output (e.g., json). (Advanced)
|
||||
- **Metadata:** Metadata to add to the run trace. (Advanced)
|
||||
- **Mirostat:** Enable/disable Mirostat sampling for controlling perplexity. (Default: Disabled)
|
||||
- **Mirostat Eta:** Learning rate for Mirostat algorithm. (Default: None) (Advanced)
|
||||
- **Mirostat Tau:** Controls the balance between coherence and diversity of the output. (Default: None) (Advanced)
|
||||
- **Context Window Size:** Size of the context window for generating tokens. (Default: None) (Advanced)
|
||||
- **Number of GPUs:** Number of GPUs to use for computation. (Default: None) (Advanced)
|
||||
- **Number of Threads:** Number of threads to use during computation. (Default: None) (Advanced)
|
||||
- **Repeat Last N:** How far back the model looks to prevent repetition. (Default: None) (Advanced)
|
||||
- **Repeat Penalty:** Penalty for repetitions in generated text. (Default: None) (Advanced)
|
||||
- **TFS Z:** Tail free sampling value. (Default: None) (Advanced)
|
||||
- **Timeout:** Timeout for the request stream. (Default: None) (Advanced)
|
||||
- **Top K:** Limits token selection to top K. (Default: None) (Advanced)
|
||||
- **Top P:** Works together with top-k. (Default: None) (Advanced)
|
||||
- **Verbose:** Whether to print out response text.
|
||||
- **Tags:** Tags to add to the run trace. (Advanced)
|
||||
- **Stop Tokens:** List of tokens to signal the model to stop generating text. (Advanced)
|
||||
- **System:** System to use for generating text. (Advanced)
|
||||
- **Template:** Template to use for generating text. (Advanced)
|
||||
- **Input:** The input text.
|
||||
- **Stream:** Whether to stream the response.
|
||||
- **System Message:** System message to pass to the model. (Advanced)
|
||||
|
||||
---
|
||||
|
||||
### OpenAI
|
||||
|
||||
This component facilitates text generation using OpenAI's models.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Input Value:** The input text for text generation.
|
||||
|
||||
- **Max Tokens (Optional):** The maximum number of tokens to generate. Defaults to _`256`_.
|
||||
|
||||
- **Model Kwargs (Optional):** Additional keyword arguments for the model. Should be provided as a nested dictionary.
|
||||
|
||||
- **Model Name (Optional):** The name of the model to use. Defaults to _`gpt-4-1106-preview`_. Supported options include: _`gpt-4-turbo-preview`_, _`gpt-4-0125-preview`_, _`gpt-4-1106-preview`_, _`gpt-4-vision-preview`_, _`gpt-3.5-turbo-0125`_, _`gpt-3.5-turbo-1106`_.
|
||||
|
||||
- **OpenAI API Base (Optional):** The base URL of the OpenAI API. Defaults to _`https://api.openai.com/v1`_.
|
||||
|
||||
- **OpenAI API Key (Optional):** The API key for accessing the OpenAI API.
|
||||
|
||||
- **Temperature:** Controls the creativity of model responses. Defaults to _`0.7`_.
|
||||
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
- **System Message (Optional):** System message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
### Qianfan
|
||||
|
||||
This component facilitates the generation of text using Baidu Qianfan chat models.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Model Name:** Specifies the name of the Qianfan chat model to be used for text generation. Available options include:
|
||||
|
||||
- _`"ERNIE-Bot"`_
|
||||
- _`"ERNIE-Bot-turbo"`_
|
||||
- _`"BLOOMZ-7B"`_
|
||||
- _`"Llama-2-7b-chat"`_
|
||||
- _`"Llama-2-13b-chat"`_
|
||||
- _`"Llama-2-70b-chat"`_
|
||||
- _`"Qianfan-BLOOMZ-7B-compressed"`_
|
||||
- _`"Qianfan-Chinese-Llama-2-7B"`_
|
||||
- _`"ChatGLM2-6B-32K"`_
|
||||
- _`"AquilaChat-7B"`_
|
||||
|
||||
- **Qianfan Ak:** Your Baidu Qianfan access key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
|
||||
|
||||
- **Qianfan Sk:** Your Baidu Qianfan secret key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
|
||||
|
||||
- **Top p (Optional):** Model parameter. Specifies the top-p value. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.8`_.
|
||||
|
||||
- **Temperature (Optional):** Model parameter. Specifies the sampling temperature. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.95`_.
|
||||
|
||||
- **Penalty Score (Optional):** Model parameter. Specifies the penalty score. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`1.0`_.
|
||||
|
||||
- **Endpoint (Optional):** Endpoint of the Qianfan LLM, required if custom model is used.
|
||||
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
### Vertex AI
|
||||
|
||||
The `ChatVertexAI` is a component for generating text using Vertex AI Chat large language models API.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Credentials:** The JSON file containing the credentials for accessing the Vertex AI Chat API.
|
||||
|
||||
- **Project:** The name of the project associated with the Vertex AI Chat API.
|
||||
|
||||
- **Examples (Optional):** List of examples to provide context for text generation.
|
||||
|
||||
- **Location:** The location of the Vertex AI Chat API service. Defaults to _`us-central1`_.
|
||||
|
||||
- **Max Output Tokens:** The maximum number of tokens to generate. Defaults to _`128`_.
|
||||
|
||||
- **Model Name:** The name of the model to use. Defaults to _`chat-bison`_.
|
||||
|
||||
- **Temperature:** Controls the creativity of model responses. Defaults to _`0.0`_.
|
||||
|
||||
- **Input Value:** The input text for text generation.
|
||||
|
||||
- **Top K:** Limits token selection to top K. Defaults to _`40`_.
|
||||
|
||||
- **Top P:** Works together with top-k. Defaults to _`0.95`_.
|
||||
|
||||
- **Verbose:** Whether to print out response text. Defaults to _`False`_.
|
||||
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
|
||||
|
||||
- **System Message (Optional):** System message to pass to the model.
|
||||
34
docs/docs/components/outputs.mdx
Normal file
34
docs/docs/components/outputs.mdx
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Outputs
|
||||
|
||||
## Chat Output
|
||||
|
||||
This component sends a message to the chat.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Sender Type:** Specifies the sender type. Default is `"Machine"`. Options are `"Machine"` and `"User"`.
|
||||
|
||||
- **Sender Name:** Specifies the sender's name. Default is `"AI"`.
|
||||
|
||||
- **Session ID:** Specifies the session ID of the chat history. If provided, messages are saved in the Message History.
|
||||
|
||||
- **Message:** Specifies the text of the message.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
If `As Record` is `true` and the `Message` is a `Record`, the data in the `Record` is updated with the `Sender`, `Sender Name`, and `Session ID`.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
## Text Output
|
||||
|
||||
This component displays text data to the user. It is useful when you want to show text without sending it to the chat.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Value:** Specifies the text data to be displayed. Defaults to an empty string.
|
||||
|
||||
|
||||
The `TextOutput` component provides a simple way to display text data. It allows textual data to be visible in the chat window during your interaction flow.
|
||||
|
|
@ -2,26 +2,24 @@ import Admonition from "@theme/Admonition";
|
|||
|
||||
# Prompts
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<Admonition type="caution" icon="🚧" title="Zone Under Construction">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may
|
||||
contain some rough edges. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
Thank you for your patience as we refine our documentation. It may
|
||||
still have some areas under development. Please share your feedback or report any issues to help us improve!
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
A prompt refers to the input given to a language model. It is constructed from multiple components and can be parametrized using prompt templates. A prompt template is a reproducible way to generate prompts and allow for easy customization through input variables.
|
||||
A prompt is the input provided to a language model, consisting of multiple components and can be parameterized using prompt templates. A prompt template offers a reproducible method for generating prompts, enabling easy customization through input variables.
|
||||
|
||||
---
|
||||
|
||||
### PromptTemplate
|
||||
|
||||
The `PromptTemplate` component allows users to create prompts and define variables that provide control over instructing the model. The template can take in a set of variables from the end user and generates the prompt once the conversation is initiated.
|
||||
The `PromptTemplate` component enables users to create prompts and define variables that control how the model is instructed. Users can input a set of variables which the template uses to generate the prompt when a conversation starts.
|
||||
|
||||
<Admonition type="info">
|
||||
Once a variable is defined in the prompt template, it becomes a component
|
||||
input of its own. Check out [Prompt
|
||||
Customization](../docs/guidelines/prompt-customization.mdx) to learn more.
|
||||
After defining a variable in the prompt template, it acts as its own component
|
||||
input. See [Prompt Customization](../administration/prompt-customization) for more details.
|
||||
</Admonition>
|
||||
|
||||
- **template:** Template used to format an individual request.
|
||||
- **template:** The template used to format an individual request.
|
||||
|
|
|
|||
|
|
@ -4,21 +4,21 @@ import Admonition from '@theme/Admonition';
|
|||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
We appreciate your patience as we enhance our documentation. It may have some imperfections. Please share your feedback or report issues to help us improve. 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
A retriever is an interface that returns documents given an unstructured query. It is more general than a vector store and does not need to be able to store documents, only to return or retrieve them.
|
||||
A retriever is an interface that returns documents in response to an unstructured query. It's broader than a vector store because it doesn't need to store documents; it only needs to retrieve them.
|
||||
|
||||
---
|
||||
|
||||
### MultiQueryRetriever
|
||||
|
||||
The `MultiQueryRetriever` component automates the process of generating multiple queries, retrieves relevant documents for each query, and combines the results to provide a more extensive and diverse set of potentially relevant documents. This approach enhances the effectiveness of the retrieval process and helps overcome the limitations of traditional distance-based retrieval methods.
|
||||
The `MultiQueryRetriever` automates generating multiple queries, retrieves relevant documents for each query, and aggregates the results. This method improves retrieval effectiveness and addresses the limitations of traditional distance-based methods.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **LLM:** Language Model to use in the `MultiQueryRetriever`.
|
||||
- **Prompt:** Prompt to represent a schema for an LLM.
|
||||
- **Retriever:** The retriever used to fetch documents.
|
||||
- **parser_key:** This parameter is used to specify the key or attribute name of the parsed output that will be used for retrieval. It determines how the results from the language model are split into a list of queries. Defaults to `lines`, which means that the output from the language model will be split into a list of lines of text. This allows the retriever to retrieve relevant documents based on each line of text separately.
|
||||
- **LLM:** Specifies the language model used in the `MultiQueryRetriever`.
|
||||
- **Prompt:** Defines a schema for the LLM.
|
||||
- **Retriever:** Identifies the retriever that fetches documents.
|
||||
- **parser_key:** Specifies the key or attribute name of the parsed output for retrieval. By default, it's set to `lines`, meaning the output from the language model is split into separate lines of text. This allows the retriever to fetch documents relevant to each line of text.
|
||||
|
|
|
|||
|
|
@ -4,60 +4,47 @@ import Admonition from "@theme/Admonition";
|
|||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may
|
||||
contain some rough edges. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
Thank you for your patience as we enhance our documentation. It might
|
||||
currently have some rough edges. Please share your feedback or report any
|
||||
issues to assist us in improving! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
A text splitter is a tool that divides a document or text into smaller chunks or segments. It is used to break down large texts into more manageable pieces for analysis or processing.
|
||||
A text splitter is a tool that divides a document or text into smaller chunks or segments. This helps make large texts more manageable for analysis or processing.
|
||||
|
||||
---
|
||||
|
||||
### CharacterTextSplitter
|
||||
|
||||
The `CharacterTextSplitter` is used to split a long text into smaller chunks based on a specified character. It splits the text by trying to keep paragraphs, sentences, and words together as long as possible, as these are semantically related pieces of text.
|
||||
The `CharacterTextSplitter` splits a long text into smaller chunks based on a specified character. It aims to keep paragraphs, sentences, and words intact as much as possible since these are semantically related elements of text.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **Documents:** Input documents to split.
|
||||
|
||||
- **chunk_overlap:** Determines the number of characters that overlap between consecutive chunks when splitting text. It specifies how much of the previous chunk should be included in the next chunk.
|
||||
|
||||
For example, if the `chunk_overlap` is set to 20 and the `chunk_size` is set to 100, the splitter will create chunks of 100 characters each, but the last 20 characters of each chunk will overlap with the first 20 characters of the next chunk. This allows for a smoother transition between chunks and ensures that no information is lost – defaults to `200`.
|
||||
|
||||
- **chunk_size:** Determines the maximum number of characters in each chunk when splitting a text. It specifies the size or length of each chunk.
|
||||
|
||||
For example, if the chunk_size is set to 100, the splitter will create chunks of 100 characters each. If the text is longer than 100 characters, it will be divided into multiple chunks of equal size, except for the last chunk, which may be smaller if there are remaining characters –defaults to `1000`.
|
||||
|
||||
- **separator:** Specifies the character that will be used to split the text into chunks – defaults to `.`
|
||||
- **Documents:** The input documents to split.
|
||||
- **chunk_overlap:** The number of characters that overlap between consecutive chunks. This setting ensures a smoother transition between chunks and prevents information loss. For example, with a `chunk_overlap` of 20 and a `chunk_size` of 100, each chunk will have the last 20 characters overlap with the next chunk's first 20 characters. The default is `200`.
|
||||
- **chunk_size:** The maximum number of characters in each chunk. If the text exceeds the specified `chunk_size`, it will be divided into multiple chunks of equal size, with the possible exception of the last chunk, which may be smaller if fewer characters remain. The default is `1000`.
|
||||
- **separator:** The character used to split the text into chunks. The default is `.`.
|
||||
|
||||
---
|
||||
|
||||
### RecursiveCharacterTextSplitter
|
||||
|
||||
The `RecursiveCharacterTextSplitter` splits the text by trying to keep paragraphs, sentences, and words together as long as possible, similar to the `CharacterTextSplitter`. However, it also recursively splits the text into smaller chunks if the chunk size exceeds a specified threshold.
|
||||
The `RecursiveCharacterTextSplitter` functions similarly to the `CharacterTextSplitter` by trying to keep paragraphs, sentences, and words together. It also recursively splits the text into smaller chunks if the initial chunk size exceeds a specified threshold.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **Documents:** Input documents to split.
|
||||
|
||||
- **chunk_overlap:** Determines the number of characters that overlap between consecutive chunks when splitting text. It specifies how much of the previous chunk should be included in the next chunk.
|
||||
|
||||
- **chunk_size:** Determines the maximum number of characters in each chunk when splitting a text. It specifies the size or length of each chunk.
|
||||
|
||||
- **separators:** The `separators` in RecursiveCharacterTextSplitter are the characters used to split the text into chunks. The text splitter tries to create chunks based on splitting on the first character in the list of `separators`. If any chunks are too large, it moves on to the next character in the list and continues splitting. Defaults to ["\n\n", "\n", " ", ""].
|
||||
- **Documents:** The input documents to split.
|
||||
- **chunk_overlap:** The number of characters that overlap between consecutive chunks.
|
||||
- **chunk_size:** The maximum number of characters in each chunk.
|
||||
- **separators:** A list of characters used to split the text into chunks. The splitter first tries to split text using the first character in the `separators` list. If any chunk exceeds the maximum size, it proceeds to the next character in the list and continues splitting. The defaults are ["\n\n", "\n", " ", ""].
|
||||
|
||||
### LanguageRecursiveTextSplitter
|
||||
|
||||
The `LanguageRecursiveTextSplitter` is a text splitter that splits the text into smaller chunks based on the (programming) language of the text.
|
||||
The `LanguageRecursiveTextSplitter` divides text into smaller chunks based on the programming language of the text.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **Documents:** Input documents to split.
|
||||
|
||||
- **chunk_overlap:** Determines the number of characters that overlap between consecutive chunks when splitting text. It specifies how much of the previous chunk should be included in the next chunk.
|
||||
|
||||
- **chunk_size:** Determines the maximum number of characters in each chunk when splitting a text. It specifies the size or length of each chunk.
|
||||
|
||||
- **separator_type:** The parameter allows the user to split the code with multiple language support. It supports various languages such as Ruby, Python, Solidity, Java, and more. Defaults to `Python`.
|
||||
- **Documents:** The input documents to split.
|
||||
- **chunk_overlap:** The number of characters that overlap between consecutive chunks.
|
||||
- **chunk_size:** The maximum number of characters in each chunk.
|
||||
- **separator_type:** This parameter allows splitting text across multiple programming languages such as Ruby, Python, Solidity, Java, and more. The default is `Python`.
|
||||
|
|
|
|||
|
|
@ -4,75 +4,68 @@ import Admonition from '@theme/Admonition';
|
|||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
Thanks for your patience as we refine our documentation. It might have some rough edges currently. Please share your feedback or report issues to help us enhance it! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
|
||||
### SearchApi
|
||||
|
||||
Real-time search engine results API. Returns structured JSON data that includes answer box, knowledge graph, organic results, and more.
|
||||
SearchApi offers a real-time search engine results API that returns structured JSON data, including answer boxes, knowledge graphs, organic results, and more.
|
||||
|
||||
**Parameters**
|
||||
#### Parameters
|
||||
|
||||
- **Api Key:** A unique identifier for the SearchApi, necessary for authenticating requests to real-time search engines. This key can be retrieved from the [SearchApi dashboard](https://www.searchapi.io/).
|
||||
- **Engine:** Specifies the search engine. For instance: google, google_scholar, bing, youtube, and youtube_transcripts. A full list of supported engines is available in the [documentation](https://www.searchapi.io/docs/google).
|
||||
- **Parameters:** Allows the selection of any parameters recognized by SearchApi, with some being required and others optional.
|
||||
- **Api Key:** A unique identifier required for authentication with real-time search engines, obtainable through the [SearchApi dashboard](https://www.searchapi.io/).
|
||||
- **Engine:** Specifies the search engine used, such as Google, Google Scholar, Bing, YouTube, and YouTube transcripts. Refer to the [documentation](https://www.searchapi.io/docs/google) for a complete list of supported engines.
|
||||
- **Parameters:** Allows the selection of various parameters recognized by SearchApi. Some parameters are mandatory while others are optional.
|
||||
|
||||
**Output**
|
||||
|
||||
- **Document:** The JSON response from the request as a Document.
|
||||
#### Output
|
||||
|
||||
- **Document:** The JSON response from the request.
|
||||
|
||||
### BingSearchRun
|
||||
|
||||
Bing Search is a web search engine owned and operated by Microsoft. It provides search results for various types of content, including web pages, images, videos, and news articles. It uses a combination of algorithms and human editors to deliver search results to users.
|
||||
Bing Search, a web search engine by Microsoft, provides search results for various content types like web pages, images, videos, and news articles. It combines algorithms and human editors to deliver these results.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Api Wrapper:** A BingSearchAPIWrapper component that takes the search URL and a subscription key.
|
||||
#### Parameters
|
||||
|
||||
- **Api Wrapper:** A BingSearchAPIWrapper component that processes the search URL and subscription key.
|
||||
|
||||
### Calculator
|
||||
|
||||
The calculator tool provides mathematical calculation capabilities to an agent by leveraging an LLMMathChain. It allows the agent to perform math when needed to answer questions.
|
||||
The calculator tool leverages an LLMMathChain to provide mathematical calculation capabilities, enabling the agent to perform computations as needed.
|
||||
|
||||
**Params**
|
||||
|
||||
- **LLM:** Language Model to use in the calculation.
|
||||
#### Parameters
|
||||
|
||||
- **LLM:** The Language Model used for calculations.
|
||||
|
||||
### GoogleSearchResults
|
||||
|
||||
A wrapper around Google Search. Useful for when the user needs to answer questions about with more control over the JSON data returned from the API. It returns the full JSON response configured based on the parameters passed to the API wrapper.
|
||||
This is a wrapper around Google Search tailored for users who need precise control over the JSON data returned from the API.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Api Wrapper:** A GoogleSearchAPIWrapper with Google API key and CSE ID
|
||||
#### Parameters
|
||||
|
||||
- **Api Wrapper:** A GoogleSearchAPIWrapper equipped with a Google API key and CSE ID.
|
||||
|
||||
### GoogleSearchRun
|
||||
|
||||
A quick wrapper around Google Search. It executes the search query and returns just the first result snippet from the highest-priority result type.
|
||||
This tool acts as a quick wrapper around Google Search, executing the search query and returning the snippet from the most relevant result.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Api Wrapper:** A GoogleSearchAPIWrapper with Google API key and CSE ID
|
||||
#### Parameters
|
||||
|
||||
- **Api Wrapper:** A GoogleSearchAPIWrapper equipped with a Google API key and CSE ID.
|
||||
|
||||
### GoogleSerperRun
|
||||
|
||||
A low-cost Google Search API.
|
||||
A cost-effective Google Search API.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Api Wrapper:** A GoogleSerperAPIWrapper component with API key and result keys
|
||||
#### Parameters
|
||||
|
||||
- **Api Wrapper:** A GoogleSerperAPIWrapper with the required API key and result keys.
|
||||
|
||||
### InfoSQLDatabaseTool
|
||||
|
||||
Tool for getting metadata about a SQL database. The input to this tool is a comma-separated list of tables, and the output is the schema and sample rows for those tables. Example Input: `“table1`, `table2`, `table3”`.
|
||||
This tool retrieves metadata about SQL databases. It takes a comma-separated list of table names as input and outputs the schema and sample rows for those tables.
|
||||
|
||||
**Params**
|
||||
#### Parameters
|
||||
|
||||
- **Db:** SQLDatabase to query.
|
||||
- **Db:** The SQL database to query.
|
||||
|
|
|
|||
|
|
@ -2,95 +2,91 @@ import Admonition from "@theme/Admonition";
|
|||
|
||||
# Utilities
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may
|
||||
contain some rough edges. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
</p>
|
||||
<Admonition type="caution" icon="🚧" title="Zone Under Construction">
|
||||
We appreciate your understanding as we polish our documentation—it may
|
||||
contain some rough edges. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
</Admonition>
|
||||
|
||||
Utilities are a set of actions that can be used to perform common tasks in a flow. They are available in the **Utilities** section in the sidebar.
|
||||
|
||||
---
|
||||
|
||||
### GET Request
|
||||
### GET request
|
||||
|
||||
Make a GET request to the given URL.
|
||||
Make a GET request to the specified URL.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **URL:** The URL to make the request to. There can be more than one URL, in which case the request will be made to each URL in order.
|
||||
- **URL:** The URL to make the request to. If there are multiple URLs, the request will be made to each URL in order.
|
||||
- **Headers:** A dictionary of headers to send with the request.
|
||||
|
||||
**Output**
|
||||
|
||||
- **List of Documents:** A list of Documents containing the JSON response from each request.
|
||||
- **List of documents:** A list of documents containing the JSON response from each request.
|
||||
|
||||
---
|
||||
|
||||
### POST Request
|
||||
### POST request
|
||||
|
||||
Make a POST request to the given URL.
|
||||
Make a POST request to the specified URL.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **URL:** The URL to make the request to.
|
||||
- **Headers:** A dictionary of headers to send with the request.
|
||||
- **Document:** The Document containing a JSON object to send with the request.
|
||||
- **Document:** The document containing a JSON object to send with the request.
|
||||
|
||||
**Output**
|
||||
|
||||
- **Document:** The JSON response from the request as a Document.
|
||||
- **Document:** The JSON response from the request as a document.
|
||||
|
||||
---
|
||||
|
||||
### Update Request
|
||||
### Update request
|
||||
|
||||
Make a PATCH or PUT request to the given URL.
|
||||
Make a PATCH or PUT request to the specified URL.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **URL:** The URL to make the request to.
|
||||
- **Headers:** A dictionary of headers to send with the request.
|
||||
- **Document:** The Document containing a JSON object to send with the request.
|
||||
- **Method:** The HTTP method to use for the request. Can be either `PATCH` or `PUT`.
|
||||
- **Document:** The document containing a JSON object to send with the request.
|
||||
- **Method:** The HTTP method to use for the request, either `PATCH` or `PUT`.
|
||||
|
||||
**Output**
|
||||
|
||||
- **Document:** The JSON response from the request as a Document.
|
||||
- **Document:** The JSON response from the request as a document.
|
||||
|
||||
---
|
||||
|
||||
### JSON Document Builder
|
||||
### JSON document builder
|
||||
|
||||
Build a Document containing a JSON object using a key and another Document page content.
|
||||
Build a document containing a JSON object using a key and another document page content.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **Key:** The key to use for the JSON object.
|
||||
- **Document:** The Document page to use for the JSON object.
|
||||
- **Document:** The document page to use for the JSON object.
|
||||
|
||||
**Output**
|
||||
|
||||
- **List of Documents:** A list containing the Document with the JSON object.
|
||||
- **List of documents:** A list containing the document with the JSON object.
|
||||
|
||||
## Unique ID Generator
|
||||
## Unique ID generator
|
||||
|
||||
Generates a unique identifier (UUID) for each instance it is invoked, providing a distinct and reliable identifier suitable for a variety of applications.
|
||||
|
||||
**Params**
|
||||
**Parameters**
|
||||
|
||||
- **Value:** This field displays the generated unique identifier (UUID). The UUID is generated dynamically for each instance of the component, ensuring uniqueness across different uses.
|
||||
- **Value:** This field displays the generated unique identifier (UUID). The UUID is dynamically generated for each instance of the component, ensuring uniqueness across different uses.
|
||||
|
||||
**Output**
|
||||
|
||||
- Returns a unique identifier (UUID) as a string. This UUID is generated using Python's `uuid` module, ensuring that each identifier is unique and can be used as a reliable reference in your application.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
The Unique ID Generator is crucial for scenarios requiring distinct identifiers, such as session management, transaction tracking, or any context where different instances or entities must be uniquely identified. The generated UUID is provided as a hexadecimal string, offering a high level of uniqueness and security for identification purposes.
|
||||
</p>
|
||||
The Unique ID Generator is crucial for scenarios requiring distinct identifiers, such as session management, transaction tracking, or any context where different instances or entities must be uniquely identified. The generated UUID is provided as a hexadecimal string, offering a high level of uniqueness and security for identification purposes.
|
||||
</Admonition>
|
||||
|
||||
For additional information and examples, please consult the [Langflow Components Custom Documentation](http://docs.langflow.org/components/custom).
|
||||
|
|
|
|||
|
|
@ -1,9 +1,454 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Vector Stores
|
||||
# Vector Stores Documentation
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
### Astra DB
|
||||
|
||||
The `Astra DB` initializes a vector store using Astra DB from records. It creates Astra DB-based vector indexes to efficiently store and retrieve documents.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Documents or records for input.
|
||||
- **Embedding:** Embedding model Astra DB uses.
|
||||
- **Collection Name:** Name of the Astra DB collection.
|
||||
- **Token:** Authentication token for Astra DB.
|
||||
- **API Endpoint:** API endpoint for Astra DB.
|
||||
- **Namespace:** Astra DB namespace.
|
||||
- **Metric:** Metric used by Astra DB.
|
||||
- **Batch Size:** Batch size for operations.
|
||||
- **Bulk Insert Batch Concurrency:** Concurrency level for bulk inserts.
|
||||
- **Bulk Insert Overwrite Concurrency:** Concurrency level for overwriting during bulk inserts.
|
||||
- **Bulk Delete Concurrency:** Concurrency level for bulk deletions.
|
||||
- **Setup Mode:** Setup mode for the vector store.
|
||||
- **Pre Delete Collection:** Option to delete the collection before setup.
|
||||
- **Metadata Indexing Include:** Fields to include in metadata indexing.
|
||||
- **Metadata Indexing Exclude:** Fields to exclude from metadata indexing.
|
||||
- **Collection Indexing Policy:** Indexing policy for the collection.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
Ensure you configure the necessary Astra DB token and API endpoint before
|
||||
starting.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
### Astra DB Search
|
||||
|
||||
`Astra DBSearch` searches an existing Astra DB vector store for documents similar to the input. It uses the `Astra DB` component's functionality for efficient retrieval.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as Similarity or MMR.
|
||||
- **Input Value:** Value to search for.
|
||||
- **Embedding:** Embedding model Astra DB uses.
|
||||
- **Collection Name:** Name of the Astra DB collection.
|
||||
- **Token:** Authentication token for Astra DB.
|
||||
- **API Endpoint:** API endpoint for Astra DB.
|
||||
- **Namespace:** Astra DB namespace.
|
||||
- **Metric:** Metric used by Astra DB.
|
||||
- **Batch Size:** Batch size for operations.
|
||||
- **Bulk Insert Batch Concurrency:** Concurrency level for bulk inserts.
|
||||
- **Bulk Insert Overwrite Concurrency:** Concurrency level for overwriting during bulk inserts.
|
||||
- **Bulk Delete Concurrency:** Concurrency level for bulk deletions.
|
||||
- **Setup Mode:** Setup mode for the vector store.
|
||||
- **Pre Delete Collection:** Option to delete the collection before setup.
|
||||
- **Metadata Indexing Include:** Fields to include in metadata indexing.
|
||||
- **Metadata Indexing Exclude:** Fields to exclude from metadata indexing.
|
||||
- **Collection Indexing Policy:** Indexing policy for the collection.
|
||||
|
||||
---
|
||||
|
||||
### Chroma
|
||||
|
||||
`Chroma` sets up a vector store using Chroma for efficient vector storage and retrieval within language processing workflows.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Collection Name:** Name of the collection.
|
||||
- **Persist Directory:** Directory to persist the Vector Store.
|
||||
- **Server CORS Allow Origins (Optional):** CORS allow origins for the Chroma server.
|
||||
- **Server Host (Optional):** Host for the Chroma server.
|
||||
- **Server Port (Optional):** Port for the Chroma server.
|
||||
- **Server gRPC Port (Optional):** gRPC port for the Chroma server.
|
||||
- **Server SSL Enabled (Optional):** SSL configuration for the Chroma server.
|
||||
- **Input:** Input data for creating the Vector Store.
|
||||
- **Embedding:** Embeddings used for the Vector Store.
|
||||
|
||||
For detailed documentation and integration guides, please refer to the [Chroma Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/chroma).
|
||||
|
||||
---
|
||||
|
||||
### Chroma Search
|
||||
|
||||
`ChromaSearch` searches a Chroma collection for documents similar to the input text. It leverages Chroma to ensure efficient document retrieval.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Input text for search.
|
||||
- **Search Type:** Type of search, such as Similarity or MMR.
|
||||
- **Collection Name:** Name of the Chroma collection.
|
||||
- **Index Directory:** Directory where the Chroma index is stored.
|
||||
- **Embedding:** Embedding model used for vectorization.
|
||||
- **Server CORS Allow Origins (Optional):** CORS allow origins for the Chroma server.
|
||||
- **Server Host (Optional):** Host for the Chroma server.
|
||||
- **Server Port (Optional):** Port for the Chroma server.
|
||||
- **Server gRPC Port (Optional):** gRPC port for the Chroma server.
|
||||
- **Server SSL Enabled (Optional):** SSL configuration for the Chroma server.
|
||||
|
||||
---
|
||||
|
||||
### Couchbase
|
||||
|
||||
`Couchbase` builds a Couchbase vector store from records, streamlining the storage and retrieval of documents.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Embedding:** Model used by Couchbase.
|
||||
- **Input:** Documents or records.
|
||||
- **Couchbase Cluster Connection String:** Cluster Connection string.
|
||||
- **Couchbase Cluster Username:** Cluster Username.
|
||||
- **Couchbase Cluster Password:** Cluster Password.
|
||||
- **Bucket Name:** Bucket identifier in Couchbase.
|
||||
- **Scope Name:** Scope identifier in Couchbase.
|
||||
- **Collection Name:** Collection identifier in Couchbase.
|
||||
- **Index Name:** Index identifier.
|
||||
|
||||
For detailed documentation and integration guides, please refer to the [Couchbase Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/couchbase).
|
||||
|
||||
---
|
||||
|
||||
### Couchbase Search
|
||||
|
||||
`CouchbaseSearch` leverages the Couchbase component to search for documents based on similarity metric.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Search query.
|
||||
- **Embedding:** Model used in the Vector Store.
|
||||
- **Couchbase Cluster Connection String:** Cluster Connection string.
|
||||
- **Couchbase Cluster Username:** Cluster Username.
|
||||
- **Couchbase Cluster Password:** Cluster Password.
|
||||
- **Bucket Name:** Bucket identifier.
|
||||
- **Scope Name:** Scope identifier.
|
||||
- **Collection Name:** Collection identifier in Couchbase.
|
||||
- **Index Name:** Index identifier.
|
||||
|
||||
---
|
||||
|
||||
### FAISS
|
||||
|
||||
The `FAISS` component manages document ingestion into a FAISS Vector Store, optimizing document indexing and retrieval.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Embedding:** Model used for vectorizing inputs.
|
||||
- **Input:** Documents to ingest.
|
||||
- **Folder Path:** Save path for the FAISS index, relative to Langflow.
|
||||
- **Index Name:** Index identifier.
|
||||
|
||||
For more details, see the [FAISS Component Documentation](https://faiss.ai/index.html).
|
||||
|
||||
---
|
||||
|
||||
### FAISS Search
|
||||
|
||||
`FAISSSearch` searches a FAISS Vector Store for documents similar to a given input, using similarity metrics for efficient retrieval.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Embedding:** Model used in the FAISS Vector Store.
|
||||
- **Folder Path:** Path to load the FAISS index from, relative to Langflow.
|
||||
- **Input:** Search query.
|
||||
- **Index Name:** Index identifier.
|
||||
|
||||
---
|
||||
|
||||
### MongoDB Atlas
|
||||
|
||||
`MongoDBAtlas` builds a MongoDB Atlas-based vector store from records, streamlining the storage and retrieval of documents.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Embedding:** Model used by MongoDB Atlas.
|
||||
- **Input:** Documents or records.
|
||||
- **Collection Name:** Collection identifier in MongoDB Atlas.
|
||||
- **Database Name:** Database identifier.
|
||||
- **Index Name:** Index identifier.
|
||||
- **MongoDB Atlas Cluster URI:** Cluster URI.
|
||||
- **Search Kwargs:** Additional search parameters.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
Ensure pymongo is installed for using MongoDB Atlas Vector Store.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
### MongoDB Atlas Search
|
||||
|
||||
`MongoDBAtlasSearch` leverages the MongoDBAtlas component to search for documents based on similarity metrics.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input:** Search query.
|
||||
- **Embedding:** Model used in the Vector Store.
|
||||
- **Collection Name:** Collection identifier.
|
||||
- **Database Name:** Database identifier.
|
||||
- **Index Name:** Index identifier.
|
||||
- **MongoDB Atlas Cluster URI:** Cluster URI.
|
||||
- **Search Kwargs:** Additional search parameters.
|
||||
|
||||
---
|
||||
|
||||
### PGVector
|
||||
|
||||
`PGVector` integrates a Vector Store within a PostgreSQL database, allowing efficient storage and retrieval of vectors.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Value for the Vector Store.
|
||||
- **Embedding:** Model used.
|
||||
- **PostgreSQL Server Connection String:** Server URL.
|
||||
- **Table:** Table name in the PostgreSQL database.
|
||||
|
||||
For more details, see the [PGVector Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/pgvector).
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
Ensure the PostgreSQL server is accessible and configured correctly.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
### PGVector Search
|
||||
|
||||
`PGVectorSearch` extends `PGVector` to search for documents based on similarity metrics.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Search query.
|
||||
- **Embedding:** Model used.
|
||||
- **PostgreSQL Server Connection String:** Server URL.
|
||||
- **Table:** Table name.
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
|
||||
---
|
||||
|
||||
### Pinecone
|
||||
|
||||
`Pinecone` constructs a Pinecone wrapper from records, setting up Pinecone-based vector indexes for document storage and retrieval.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Documents or records.
|
||||
- **Embedding:** Model used.
|
||||
- **Index Name:** Index identifier.
|
||||
- **Namespace:** Namespace used.
|
||||
- **Pinecone API Key:** API key.
|
||||
- **Pinecone Environment:** Environment settings.
|
||||
- **Search Kwargs:** Additional search parameters.
|
||||
- **Pool Threads:** Number of threads.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
Ensure the Pinecone API key and environment are correctly configured.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
### Pinecone Search
|
||||
|
||||
`PineconeSearch` searches a Pinecone Vector Store for documents similar to the input, using advanced similarity metrics.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Embedding:** Model used.
|
||||
- **Index Name:** Index identifier.
|
||||
- **Namespace:** Namespace used.
|
||||
- **Pinecone API Key:** API key.
|
||||
- **Pinecone Environment:** Environment settings.
|
||||
- **Search Kwargs:** Additional search parameters.
|
||||
- **Pool Threads:** Number of threads.
|
||||
|
||||
---
|
||||
|
||||
### Qdrant
|
||||
|
||||
`Qdrant` allows efficient similarity searches and retrieval operations, using a list of texts to construct a Qdrant wrapper.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Documents or records.
|
||||
- **Embedding:** Model used.
|
||||
- **API Key:** Qdrant API key.
|
||||
- **Collection Name:** Collection identifier.
|
||||
- **Advanced Settings:** Includes content payload key, distance function, gRPC port, host, HTTPS, location, metadata payload key, path, port, prefer gRPC, prefix, search kwargs, timeout, URL.
|
||||
|
||||
---
|
||||
|
||||
### Qdrant Search
|
||||
|
||||
`QdrantSearch` extends `Qdrant` to search for documents similar to the input based on advanced similarity metrics.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Embedding:** Model used.
|
||||
- **API Key:** Qdrant API key.
|
||||
- **Collection Name:** Collection identifier.
|
||||
- **Advanced Settings:** Includes content payload key, distance function, gRPC port, host, HTTPS, location, metadata payload key, path, port, prefer gRPC, prefix, search kwargs, timeout, URL.
|
||||
|
||||
---
|
||||
|
||||
### Redis
|
||||
|
||||
`Redis` manages a Vector Store in a Redis database, supporting efficient vector storage and retrieval.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Index Name:** Default index name.
|
||||
- **Input:** Data for building the Redis Vector Store.
|
||||
- **Embedding:** Model used.
|
||||
- **Schema:** Optional schema file (.yaml) for document structure.
|
||||
- **Redis Server Connection String:** Server URL.
|
||||
- **Redis Index:** Optional index name.
|
||||
|
||||
For detailed documentation, refer to the [Redis Documentation](https://python.langchain.com/docs/integrations/vectorstores/redis).
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
Ensure the Redis server URL and index name are configured correctly. Provide a
|
||||
schema if no documents are available.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
### Redis Search
|
||||
|
||||
`RedisSearch` searches a Redis Vector Store for documents similar to the input.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Index Name:** Default index name.
|
||||
- **Embedding:** Model used.
|
||||
- **Schema:** Optional schema file (.yaml) for document structure.
|
||||
- **Redis Server Connection String:** Server URL.
|
||||
- **Redis Index:** Optional index name.
|
||||
|
||||
---
|
||||
|
||||
### Supabase
|
||||
|
||||
`Supabase` initializes a Supabase Vector Store from texts and embeddings, setting up an environment for efficient document retrieval.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Documents or records.
|
||||
- **Embedding:** Model used.
|
||||
- **Query Name:** Optional query name.
|
||||
- **Search Kwargs:** Advanced search parameters.
|
||||
- **Supabase Service Key:** Service key.
|
||||
- **Supabase URL:** Instance URL.
|
||||
- **Table Name:** Optional table name.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
Ensure the Supabase service key, URL, and table name are properly configured.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
### Supabase Search
|
||||
|
||||
`SupabaseSearch` searches a Supabase Vector Store for documents similar to the input.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Embedding:** Model used.
|
||||
- **Query Name:** Optional query name.
|
||||
- **Search Kwargs:** Advanced search parameters.
|
||||
- **Supabase Service Key:** Service key.
|
||||
- **Supabase URL:** Instance URL.
|
||||
- **Table Name:** Optional table name.
|
||||
|
||||
---
|
||||
|
||||
### Vectara
|
||||
|
||||
`Vectara` sets up a Vectara Vector Store from files or upserted data, optimizing document retrieval.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Vectara Customer ID:** Customer ID.
|
||||
- **Vectara Corpus ID:** Corpus ID.
|
||||
- **Vectara API Key:** API key.
|
||||
- **Files Url:** Optional URLs for file initialization.
|
||||
- **Input:** Optional data for corpus upsert.
|
||||
|
||||
For more information, consult the [Vectara Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/vectara).
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
If inputs or files_url are provided, they will be processed accordingly.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
### Vectara Search
|
||||
|
||||
`VectaraSearch` searches a Vectara Vector Store for documents based on the provided input.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Vectara Customer ID:** Customer ID.
|
||||
- **Vectara Corpus ID:** Corpus ID.
|
||||
- **Vectara API Key:** API key.
|
||||
- **Files Url:** Optional URLs for file initialization.
|
||||
|
||||
---
|
||||
|
||||
### Weaviate
|
||||
|
||||
`Weaviate` facilitates a Weaviate Vector Store setup, optimizing text and document indexing and retrieval.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Weaviate URL:** Default instance URL.
|
||||
- **Search By Text:** Indicates whether to search by text.
|
||||
- **API Key:** Optional API key for authentication.
|
||||
- **Index Name:** Optional index name.
|
||||
- **Text Key:** Default text extraction key.
|
||||
- **Input:** Document or record.
|
||||
- **Embedding:** Model used.
|
||||
- **Attributes:** Optional additional attributes.
|
||||
|
||||
For more details, see the [Weaviate Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/weaviate).
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
Ensure Weaviate instance is running and accessible. Verify API key, index
|
||||
name, text key, and attributes are set correctly.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
### Weaviate Search
|
||||
|
||||
`WeaviateSearch` searches a Weaviate Vector Store for documents similar to the input.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Weaviate URL:** Default instance URL.
|
||||
- **Search By Text:** Indicates whether to search by text.
|
||||
- **API Key:** Optional API key for authentication.
|
||||
- **Index Name:** Optional index name.
|
||||
- **Text Key:** Default text extraction key.
|
||||
- **Embedding:** Model used.
|
||||
- **Attributes:** Optional additional attributes.
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -1,20 +0,0 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Wrappers
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
|
||||
### TextRequestsWrapper
|
||||
|
||||
This component is designed to work with the Python Requests module, which is a popular tool for making web requests. Used to fetch data from a particular website.
|
||||
|
||||
**Params**
|
||||
|
||||
- **header:** specifies the headers to be included in the HTTP request. Defaults to `{'Authorization': 'Bearer <token>'}`.
|
||||
|
||||
Headers are key-value pairs that provide additional information about the request or the client making the request. They can be used to send authentication credentials, specify the content type of the request, set cookies, and more. They allow the client and the server to communicate additional information beyond the basic request.
|
||||
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
## 🤖 Join **Langflow** Discord server
|
||||
|
||||
Join us to ask questions and showcase your projects.
|
||||
Join us to ask questions and showcase your projects.
|
||||
|
||||
Let's bring together the building blocks of AI integration!
|
||||
Let's bring together the building blocks of AI integration!
|
||||
|
||||
Langflow [Discord](https://discord.gg/EqksyE2EX9) server.
|
||||
Langflow [Discord](https://discord.gg/EqksyE2EX9) server.
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -15,9 +15,10 @@
|
|||
Follow [@langflow_ai](https://twitter.com/langflow_ai) on **Twitter** to get the latest news about **Langflow**.
|
||||
|
||||
---
|
||||
|
||||
## ⭐️ Star **Langflow** on GitHub
|
||||
|
||||
You can "star" **Langflow** in [GitHub](https://github.com/logspace-ai/langflow).
|
||||
You can "star" **Langflow** in [GitHub](https://github.com/langflow-ai/langflow).
|
||||
|
||||
By adding a star, other users will be able to find it more easily and see that it has been already useful for others.
|
||||
|
||||
|
|
@ -25,14 +26,12 @@ By adding a star, other users will be able to find it more easily and see that i
|
|||
|
||||
## 👀 Watch the GitHub repository for releases
|
||||
|
||||
You can "watch" **Langflow** in [GitHub](https://github.com/logspace-ai/langflow).
|
||||
|
||||
You can "watch" **Langflow** in [GitHub](https://github.com/langflow-ai/langflow).
|
||||
|
||||
If you select "Watching" instead of "Releases only" you will receive notifications when someone creates a new issue or question. You can also specify that you only want to be notified about new issues, discussions, PRs, etc.
|
||||
|
||||
|
||||
Then you can try and help them solve those questions.
|
||||
|
||||
---
|
||||
|
||||
Thanks! 🚀
|
||||
Thanks! 🚀
|
||||
|
|
|
|||
45
docs/docs/contributing/contribute-component.md
Normal file
45
docs/docs/contributing/contribute-component.md
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
|
||||
# How to Contribute Components?
|
||||
|
||||
As of Langflow 1.0 alpha, new components are added as objects of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/interface/custom/custom_component/custom_component.py) class and any dependencies are added to the [pyproject.toml](https://github.com/langflow-ai/langflow/blob/dev/pyproject.toml#L27) file.
|
||||
|
||||
## Add an example component
|
||||
|
||||
You have a new document loader called **MyCustomDocumentLoader** and it would look awesome in Langflow.
|
||||
|
||||
1. Write your loader as an object of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/interface/custom/custom_component/custom_component.py) class. You'll create a new class, `MyCustomDocumentLoader`, that will inherit from `CustomComponent` and override the base class's methods.
|
||||
2. Define optional attributes like `display_name`, `description`, and `documentation` to provide information about your custom component.
|
||||
3. Implement the `build_config` method to define the configuration options for your custom component.
|
||||
4. Implement the `build` method to define the logic for taking input parameters specified in the `build_config` method and returning the desired output.
|
||||
5. Add the code to the [/components/documentloaders](https://github.com/langflow-ai/langflow/tree/dev/src/backend/base/langflow/components) folder.
|
||||
6. Add the dependency to [/documentloaders/\_\_init\_\_.py](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/components/documentloaders/__init__.py) as `from .MyCustomDocumentLoader import MyCustomDocumentLoader`.
|
||||
7. Add any new dependencies to the outer [pyproject.toml](https://github.com/langflow-ai/langflow/blob/dev/pyproject.toml#L27) file.
|
||||
8. Submit documentation for your component. For this example, you'd submit documentation to the [loaders page](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/components/loaders.mdx).
|
||||
9. Submit your changes as a pull request. The Langflow team will have a look, suggest changes, and add your component to Langflow.
|
||||
|
||||
## User Sharing
|
||||
|
||||
You might want to share and test your custom component with others, but don't need it merged into the main source code.
|
||||
|
||||
If so, you can share your component on the Langflow store.
|
||||
|
||||
1. [Register at the Langflow store](https://www.langflow.store/login/).
|
||||
2. Undergo pre-validation before receiving an API key.
|
||||
3. To deploy your amazing component directly to the Langflow store, without it being merged into the main source code, navigate to your flow, and then click **Share**.
|
||||
The share window appears:
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/add-component-to-store.png",
|
||||
dark: "img/add-component-to-store.png",
|
||||
}}
|
||||
style={{ width: "50%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
5. Choose whether you want to flow to be public or private.
|
||||
You can also **Export** your flow as a JSON file from this window.
|
||||
When you're ready to share the flow, click **Share Flow**.
|
||||
You should see a **Flow shared successfully** popup.
|
||||
6. To confirm, navigate to the **Langflow Store** and filter results by **Created By Me**. You should see your new flow on the **Langflow Store**.
|
||||
|
|
@ -1,11 +1,11 @@
|
|||
# GitHub Issues
|
||||
|
||||
Our [issues](https://github.com/logspace-ai/langflow/issues) page is kept up to date
|
||||
Our [issues](https://github.com/langflow-ai/langflow/issues) page is kept up to date
|
||||
with bugs, improvements, and feature requests. There is a taxonomy of labels to help
|
||||
with sorting and discovery of issues of interest.
|
||||
|
||||
If you're looking for help with your code, consider posting a question on the
|
||||
[GitHub Discussions board](https://github.com/logspace-ai/langflow/discussions). Please
|
||||
[GitHub Discussions board](https://github.com/langflow-ai/langflow/discussions). Please
|
||||
understand that we won't be able to provide individual support via email. We
|
||||
also believe that help is much more valuable if it's **shared publicly**,
|
||||
so that more people can benefit from it.
|
||||
|
|
@ -21,7 +21,6 @@ so that more people can benefit from it.
|
|||
logs or tracebacks, you can wrap them in `<details>` and `</details>`. This
|
||||
[collapses the content](https://developer.mozilla.org/en/docs/Web/HTML/Element/details) so it only becomes visible on click, making the issue easier to read and follow.
|
||||
|
||||
|
||||
## Issue labels
|
||||
|
||||
[See this page](https://github.com/logspace-ai/langflow/labels) for an overview of the system we use to tag our issues and pull requests.
|
||||
[See this page](https://github.com/langflow-ai/langflow/labels) for an overview of the system we use to tag our issues and pull requests.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# How to contribute?
|
||||
|
||||
👋 Hello there! We welcome contributions from developers of all levels to our open-source project on [GitHub](https://github.com/logspace-ai/langflow). If you'd like to contribute, please check our contributing guidelines and help make Langflow more accessible.
|
||||
👋 Hello there! We welcome contributions from developers of all levels to our open-source project on [GitHub](https://github.com/langflow-ai/langflow). If you'd like to contribute, please check our contributing guidelines and help make Langflow more accessible.
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open
|
||||
to contributions, whether in the form of a new feature, improved infra, or better documentation.
|
||||
|
|
@ -10,6 +10,7 @@ To contribute to this project, please follow a ["fork and pull request"](https:/
|
|||
Please do not try to push directly to this repo unless you are a maintainer.
|
||||
|
||||
---
|
||||
|
||||
## Local development
|
||||
|
||||
You can develop Langflow using docker compose, or locally.
|
||||
|
|
@ -17,6 +18,7 @@ You can develop Langflow using docker compose, or locally.
|
|||
We provide a .vscode/launch.json file for debugging the backend in VSCode, which is a lot faster than using docker compose.
|
||||
|
||||
Setting up hooks:
|
||||
|
||||
```bash
|
||||
make init
|
||||
```
|
||||
|
|
@ -48,7 +50,6 @@ And the frontend:
|
|||
make frontend
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Docker compose
|
||||
|
|
|
|||
|
|
@ -6,10 +6,9 @@ This guide will help you set up a Langflow development VM in a Google Cloud Plat
|
|||
|
||||
> Note: When Cloud Shell opens, be sure to select **Trust repo**. Some `gcloud` commands might not run in an ephemeral Cloud Shell environment.
|
||||
|
||||
## Standard VM
|
||||
|
||||
|
||||
## Standard VM
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial.md)
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial.md)
|
||||
|
||||
This script sets up a Debian-based VM with the Langflow package, Nginx, and the necessary configurations to run the Langflow Dev environment.
|
||||
|
||||
|
|
@ -17,18 +16,18 @@ This script sets up a Debian-based VM with the Langflow package, Nginx, and the
|
|||
|
||||
## Spot/Preemptible Instance
|
||||
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/genome21/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial_spot.md)
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/genome21/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
|
||||
|
||||
When running as a [spot (preemptible) instance](https://cloud.google.com/compute/docs/instances/preemptible), the code and VM will behave the same way as in a regular instance, executing the startup script to configure the environment, install necessary dependencies, and run the Langflow application. However, **due to the nature of spot instances, the VM may be terminated at any time if Google Cloud needs to reclaim the resources**. This makes spot instances suitable for fault-tolerant, stateless, or interruptible workloads that can handle unexpected terminations and restarts.
|
||||
|
||||
---
|
||||
|
||||
## Pricing (approximate)
|
||||
|
||||
> For a more accurate breakdown of costs, please use the [**GCP Pricing Calculator**](https://cloud.google.com/products/calculator)
|
||||
|
||||
|
||||
| Component | Regular Cost (Hourly) | Regular Cost (Monthly) | Spot/Preemptible Cost (Hourly) | Spot/Preemptible Cost (Monthly) | Notes |
|
||||
| -------------- | --------------------- | ---------------------- | ------------------------------ | ------------------------------- | ----- |
|
||||
| 100 GB Disk | - | $10/month | - | $10/month | Disk cost remains the same for both regular and Spot/Preemptible VMs |
|
||||
| VM (n1-standard-4) | $0.15/hr | ~$108/month | ~$0.04/hr | ~$29/month | The VM cost can be significantly reduced using a Spot/Preemptible instance |
|
||||
| **Total** | **$0.15/hr** | **~$118/month** | **~$0.04/hr** | **~$39/month** | Total costs for running the VM and disk 24/7 for an entire month |
|
||||
| Component | Regular Cost (Hourly) | Regular Cost (Monthly) | Spot/Preemptible Cost (Hourly) | Spot/Preemptible Cost (Monthly) | Notes |
|
||||
| ------------------ | --------------------- | ---------------------- | ------------------------------ | ------------------------------- | -------------------------------------------------------------------------- |
|
||||
| 100 GB Disk | - | $10/month | - | $10/month | Disk cost remains the same for both regular and Spot/Preemptible VMs |
|
||||
| VM (n1-standard-4) | $0.15/hr | ~$108/month | ~$0.04/hr | ~$29/month | The VM cost can be significantly reduced using a Spot/Preemptible instance |
|
||||
| **Total** | **$0.15/hr** | **~$118/month** | **~$0.04/hr** | **~$39/month** | Total costs for running the VM and disk 24/7 for an entire month |
|
||||
|
|
|
|||
|
|
@ -16,6 +16,12 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
light: "img/buffer-memory.png",
|
||||
dark: "img/buffer-memory.png",
|
||||
}}
|
||||
style={{
|
||||
width: "80%",
|
||||
margin: "20px auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
|
||||
#### <a target="\_blank" href="json_files/Buffer_Memory.json" download>Download Flow</a>
|
||||
|
|
|
|||
|
|
@ -22,6 +22,13 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
light: "img/basic-chat.png",
|
||||
dark: "img/basic-chat.png",
|
||||
}}
|
||||
|
||||
style={{
|
||||
width: "80%",
|
||||
margin: "20px auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
|
||||
#### <a target="\_blank" href="json_files/Basic_Chat.json" download>Download Flow</a>
|
||||
|
|
|
|||
|
|
@ -34,6 +34,12 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
light: "img/csv-loader.png",
|
||||
dark: "img/csv-loader.png",
|
||||
}}
|
||||
style={{
|
||||
width: "80%",
|
||||
margin: "20px auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
|
||||
#### <a target="\_blank" href="json_files/CSV_Loader.json" download>Download Flow</a>
|
||||
|
|
|
|||
|
|
@ -3,9 +3,6 @@ description: Custom Components
|
|||
hide_table_of_contents: true
|
||||
---
|
||||
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# FlowRunner Component
|
||||
|
||||
The CustomComponent class allows us to create components that interact with Langflow itself. In this example, we will make a component that runs other flows available in "My Collection".
|
||||
|
|
@ -18,7 +15,7 @@ The CustomComponent class allows us to create components that interact with Lang
|
|||
}}
|
||||
style={{
|
||||
width: "30%",
|
||||
margin: "0 auto",
|
||||
margin: "20px auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
|
|
@ -35,7 +32,7 @@ We will cover how to:
|
|||
<summary>Example Code</summary>
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class FlowRunner(CustomComponent):
|
||||
|
|
@ -75,7 +72,7 @@ class FlowRunner(CustomComponent):
|
|||
<CH.Scrollycoding rows={20} className={""}>
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
|
|
@ -95,7 +92,7 @@ The typical structure of a Custom Component is composed of _`display_name`_ and
|
|||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
|
||||
|
||||
# focus
|
||||
|
|
@ -118,7 +115,7 @@ Let's start by defining our component's _`display_name`_ and _`description`_.
|
|||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
# focus
|
||||
from langchain.schema import Document
|
||||
|
||||
|
|
@ -140,7 +137,7 @@ Second, we will import _`Document`_ from the [_langchain.schema_](https://docs.l
|
|||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
# focus
|
||||
from langchain.schema import Document
|
||||
|
||||
|
|
@ -162,12 +159,12 @@ Now, let's add the [parameters](focus://11[20:55]) and the [return type](focus:/
|
|||
|
||||
- _`flow_name`_ is the name of the flow we want to run.
|
||||
- _`document`_ is the input document to be passed to that flow.
|
||||
- Since _`Document`_ is a Langchain type, it will add an input [handle](../guidelines/components) to the component ([see more](../components/custom)).
|
||||
- Since _`Document`_ is a Langchain type, it will add an input [handle](../administration/components) to the component ([see more](../components/custom)).
|
||||
|
||||
---
|
||||
|
||||
```python focus=13:14
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
|
|
@ -189,7 +186,7 @@ We can now start writing the _`build`_ method. Let's list available flows in "My
|
|||
---
|
||||
|
||||
```python focus=15:18
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
|
|
@ -222,7 +219,7 @@ And retrieve a flow that matches the selected name (we'll make a dropdown input
|
|||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
|
|
@ -245,12 +242,12 @@ class FlowRunner(CustomComponent):
|
|||
|
||||
```
|
||||
|
||||
You can load this flow using _`get_flow`_ and set a _`tweaks`_ dictionary to customize it. Find more about tweaks in our [features guidelines](../guidelines/features#code).
|
||||
You can load this flow using _`get_flow`_ and set a _`tweaks`_ dictionary to customize it. Find more about tweaks in our [features guidelines](../administration/features#code).
|
||||
|
||||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
|
|
@ -287,7 +284,7 @@ The content of a document can be extracted using the _`page_content`_ attribute,
|
|||
---
|
||||
|
||||
```python focus=9:16
|
||||
from langflow import CustomComponent
|
||||
from langflow.custom import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
|
|
@ -366,3 +363,6 @@ Done! This is what our script and custom component looks like:
|
|||
/>
|
||||
|
||||
</div>
|
||||
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
|
|
|||
|
|
@ -1,28 +0,0 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
|
||||
# 📚 How to Upload Examples?
|
||||
|
||||
We welcome all examples that can help our community learn and explore Langflow's capabilities.
|
||||
Langflow Examples is a repository on [GitHub](https://github.com/logspace-ai/langflow_examples) that contains examples of flows that people can use for inspiration and learning.
|
||||
|
||||
{" "}
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/community-examples.png",
|
||||
dark: "img/community-examples.png",
|
||||
}}
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
||||
To upload examples, please follow these steps:
|
||||
|
||||
1. **Create a Flow:** First, create a flow using Langflow. You can use any of the available templates or create a new flow from scratch.
|
||||
|
||||
2. **Export the Flow:** Once you have created a flow, export it as a JSON file. Make sure to give your file a descriptive name and include a brief description of what it does.
|
||||
|
||||
3. **Submit a Pull Request:** Finally, submit a pull request (PR) to the examples repo. Make sure to include your JSON file in the PR.
|
||||
|
||||
If your example uses any third-party libraries or packages, please include them in your PR and make sure that your example follows the [**⛓️ Langflow Code Of Conduct**](https://github.com/logspace-ai/langflow/blob/dev/CODE_OF_CONDUCT.md).
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# MidJourney Prompt Chain
|
||||
|
||||
The `MidJourneyPromptChain` can be used to generate imaginative and detailed MidJourney prompts.
|
||||
|
||||
For example, type something like:
|
||||
|
||||
```bash
|
||||
Dragon
|
||||
```
|
||||
|
||||
And get a response such as:
|
||||
|
||||
```text
|
||||
Imagine a mysterious forest, the trees are tall and ancient, their branches reaching up to the sky. Through the darkness, a dragon emerges from the shadows, its scales shimmering in the moonlight. Its wingspan is immense, and its eyes glow with a fierce intensity. It is a majestic and powerful creature, one that commands both respect and fear.
|
||||
```
|
||||
|
||||
<Admonition type="tip">
|
||||
Notice that the `ConversationSummaryMemory` stores a summary of the
|
||||
conversation over time. Try using it to create better prompts as the
|
||||
conversation goes on.
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
||||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/midjourney-prompt-chain.png",
|
||||
dark: "img/midjourney-prompt-chain.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
#### <a target="\_blank" href="json_files/MidJourney_Prompt_Chain.json" download>Download Flow</a>
|
||||
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`ConversationSummaryMemory`](https://python.langchain.com/docs/modules/memory/types/summary)
|
||||
|
||||
</Admonition>
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Multiple Vector Stores
|
||||
|
||||
The example below shows an agent operating with two vector stores built upon different data sources.
|
||||
|
||||
The `TextLoader` loads a TXT file, while the `WebBaseLoader` pulls text from webpages into a document format to accessed downstream. The `Chroma` vector stores are created analogous to what we have demonstrated in our [CSV Loader](/examples/csv-loader.mdx) example. Finally, the `VectorStoreRouterAgent` constructs an agent that routes between the vector stores.
|
||||
|
||||
<Admonition type="info">
|
||||
Get the TXT file used
|
||||
[here](https://github.com/hwchase17/chat-your-data/blob/master/state_of_the_union.txt).
|
||||
</Admonition>
|
||||
|
||||
URL used by the `WebBaseLoader`:
|
||||
|
||||
```text
|
||||
https://pt.wikipedia.org/wiki/Harry_Potter
|
||||
```
|
||||
|
||||
<Admonition type="tip">
|
||||
When you build the flow, request information about one of the sources. The
|
||||
agent should be able to use the correct source to generate a response.
|
||||
</Admonition>
|
||||
|
||||
<Admonition type="info">
|
||||
Learn more about Multiple Vector Stores
|
||||
[here](https://python.langchain.com/docs/modules/data_connection/vectorstores/).
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
||||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/multiple-vectorstores.png",
|
||||
dark: "img/multiple-vectorstores.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
#### <a target="\_blank" href="json_files/Multiple_Vector_Stores.json" download>Download Flow</a>
|
||||
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`WebBaseLoader`](https://python.langchain.com/docs/integrations/document_loaders/web_base)
|
||||
- [`TextLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/)
|
||||
- [`CharacterTextSplitter`](https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter)
|
||||
- [`OpenAIEmbedding`](https://python.langchain.com/docs/integrations/text_embedding/openai)
|
||||
- [`Chroma`](https://python.langchain.com/docs/integrations/vectorstores/chroma)
|
||||
- [`VectorStoreInfo`](https://python.langchain.com/docs/modules/data_connection/vectorstores/)
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`VectorStoreRouterToolkit`](https://js.langchain.com/docs/modules/agents/tools/how_to/agents_with_vectorstores)
|
||||
- [`VectorStoreRouterAgent`](https://js.langchain.com/docs/modules/agents/tools/how_to/agents_with_vectorstores)
|
||||
|
||||
</Admonition>
|
||||
|
|
@ -43,6 +43,12 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
light: "img/python-function.png",
|
||||
dark: "img/python-function.png",
|
||||
}}
|
||||
style={{
|
||||
width: "80%",
|
||||
margin: "20px auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
|
||||
#### <a target="\_blank" href="json_files/Python_Function.json" download>Download Flow</a>
|
||||
|
|
|
|||
|
|
@ -37,6 +37,12 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
light: "img/serp-api-tool.png",
|
||||
dark: "img/serp-api-tool.png",
|
||||
}}
|
||||
style={{
|
||||
width: "80%",
|
||||
margin: "20px auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
|
||||
#### <a target="\_blank" href="json_files/SerpAPI_Tool.json" download>Download Flow</a>
|
||||
|
|
|
|||
284
docs/docs/getting-started/canvas.mdx
Normal file
284
docs/docs/getting-started/canvas.mdx
Normal file
|
|
@ -0,0 +1,284 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# 🎨 Langflow Canvas
|
||||
|
||||
The **Langflow canvas** is the central hub of Langflow, where you'll assemble new flows from components, run them, and see the results.
|
||||
|
||||
To get a feel for the canvas, we'll examine a basic prompting flow.
|
||||
You can either build this flow yourself, or select **New Project** > **Basic prompting** to open a canvas with the flow pre-built.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/basic-prompting.png",
|
||||
dark: "img/basic-prompting.png",
|
||||
}}
|
||||
style={{ width: "30%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
## Flows, components, collections, and projects
|
||||
|
||||
A [flow](#flow) is a pipeline of components connected together in the Langflow canvas.
|
||||
|
||||
A [component](#component) is a single building block within a flow. A component has inputs, outputs, and parameters that define its functionality.
|
||||
|
||||
A [collection](#collection) is a snapshot of the flows available in your database. Collections can be downloaded to local storage and uploaded for future use.
|
||||
|
||||
A [project](#project) can be a component or a flow. Projects are saved as part of your collection.
|
||||
|
||||
For example, the **OpenAI LLM** is a **component** of the **Basic prompting** flow, and the **flow** is stored in a **collection**.
|
||||
|
||||
## Flow
|
||||
|
||||
A **flow** is a pipeline of components connected together in the Langflow canvas.
|
||||
|
||||
For example, the [Basic prompting](../starter-projects/basic-prompting.mdx) flow is a pipeline of four components:
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/basic-prompting.png",
|
||||
dark: "img/basic-prompting.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
In this flow, the **OpenAI LLM component** receives input (left side) and produces output (right side) - in this case, receiving input from the **Chat Input** and **Prompt** components and producing output to the **Chat Output** component.
|
||||
|
||||
## Component
|
||||
|
||||
Components are the building blocks of flows. They consist of inputs, outputs, and parameters that define their functionality. These elements provide a convenient and straightforward way to compose LLM-based applications. Learn more about components and how they work in the LangChain [documentation](https://python.langchain.com/docs/integrations/components).
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
During the flow creation process, you will notice handles (colored circles)
|
||||
attached to one or both sides of a component. These handles represent the
|
||||
availability to connect to other components. Hover over a handle to see connection details.
|
||||
</div>
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
For example, if you select a <code>ConversationChain</code> component, you
|
||||
will see orange <span style={{ color: "orange" }}>o</span> and purple{" "}
|
||||
<span style={{ color: "purple" }}>o</span> input handles. They indicate that
|
||||
this component accepts an LLM and a Memory component as inputs. The red
|
||||
asterisk <span style={{ color: "red" }}>*</span> means that at least one input
|
||||
of that type is required.
|
||||
</div>
|
||||
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/single-component.png"),
|
||||
dark: useBaseUrl("img/single-component.png"),
|
||||
}}
|
||||
style={{ width: "50%", maxWidth: "800px", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
In the top right corner of the component, you'll find the component status icon ().
|
||||
Build the flow by clicking the **Playground** at the bottom right of the canvas.
|
||||
|
||||
Once the validation is complete, the status of each validated component should turn green ().
|
||||
To debug, hover over the component status to see the outputs.
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
### Component Parameters
|
||||
|
||||
Langflow components can be edited by clicking the component settings button. Hide parameters to reduce complexity and keep the canvas clean and intuitive for experimentation.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_parameters.mp4" />
|
||||
</div>
|
||||
|
||||
### Component menu
|
||||
|
||||
Each component is a little unique, but they will all have a menu bar on top that looks something like this.
|
||||
The menu options are **Code**, **Save**, **Duplicate**, and **More**.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/chat-input-with-menu.png",
|
||||
dark: "img/chat-input-with-menu.png",
|
||||
}}
|
||||
style={{ width: "30%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
### Code menu
|
||||
|
||||
The **Code** button displays your component's Python code.
|
||||
You can modify the code and save it.
|
||||
|
||||
#### Save
|
||||
|
||||
Save your component to the **Saved** components folder for re-use.
|
||||
|
||||
#### Duplicate
|
||||
|
||||
Duplicate your component in the canvas.
|
||||
|
||||
#### More
|
||||
|
||||
**Advanced** - modify the parameters of your component.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_parameters.mp4" />
|
||||
</div>
|
||||
|
||||
**Copy** - copy your component.
|
||||
|
||||
**Share** - share your component to the Langflow store.
|
||||
|
||||
**Docs** - view documentation for your component.
|
||||
|
||||
**Delete** - delete your component.
|
||||
|
||||
### Group multiple components
|
||||
|
||||
Components without input or output nodes can be grouped into a single component for re-use.
|
||||
This is useful for combining large flows into single components (like RAG with a vector database, for example) and saves space in the canvas.
|
||||
|
||||
1. Hold **Shift** and drag to select the **Prompt** and **OpenAI** components.
|
||||
2. Select **Group**.
|
||||
3. The components merge into a single component.
|
||||
4. To save the new component, select **Save**. It can now be re-used from the **Saved** components folder.
|
||||
|
||||
## Playground
|
||||
|
||||
Run your flow by clicking the **Playground** button.
|
||||
|
||||
For more, see [Playground](../administration/playground.mdx).
|
||||
|
||||
## API
|
||||
|
||||
The **API** button opens the API window, where Langflow presents code for integrating your flow into external applications.
|
||||
|
||||
Modify the call's parameters in the **Tweaks** window, click the **Copy Code** or **Download** buttons, and paste your code where you want to use it.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/api-window.png",
|
||||
dark: "img/api-window.png",
|
||||
}}
|
||||
style={{ width: "50%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
### curl
|
||||
|
||||
The **curl** tab displays sample code for posting a query to your flow.
|
||||
Modify the `input_value` to change your input message.
|
||||
|
||||
```curl
|
||||
curl -X POST \
|
||||
http://127.0.0.1:7863/api/v1/run/f2eefd80-bb91-4190-9279-0d6ffafeaac4\?stream\=false \
|
||||
-H 'Content-Type: application/json'\
|
||||
-d '{"input_value": "is anybody there?",
|
||||
"output_type": "chat",
|
||||
"input_type": "chat",
|
||||
"tweaks": {
|
||||
"Prompt-uxBqP": {},
|
||||
"OpenAIModel-k39HS": {},
|
||||
"ChatOutput-njtka": {},
|
||||
"ChatInput-P3fgL": {}
|
||||
}}'
|
||||
```
|
||||
|
||||
Result:
|
||||
```
|
||||
{"session_id":"f2eefd80-bb91-4190-9279-0d6ffafeaac4:53856a772b8e1cfcb3dd2e71576b5215399e95bae318d3c02101c81b7c252da3","outputs":[{"inputs":{"input_value":"is anybody there?"},"outputs":[{"results":{"result":"Arrr, me hearties! Aye, this be Captain [Your Name] speakin'. What be ye needin', matey?"},"artifacts":{"message":"Arrr, me hearties! Aye, this be Captain [Your Name] speakin'. What be ye needin', matey?","sender":"Machine","sender_name":"AI"},"messages":[{"message":"Arrr, me hearties! Aye, this be Captain [Your Name] speakin'. What be ye needin', matey?","sender":"Machine","sender_name":"AI","component_id":"ChatOutput-njtka"}],"component_display_name":"Chat Output","component_id":"ChatOutput-njtka"}]}]}%
|
||||
```
|
||||
|
||||
### Python API
|
||||
|
||||
The **Python API** tab displays code to interact with your flow using the Python HTTP requests library.
|
||||
|
||||
### Python Code
|
||||
|
||||
The **Python Code** tab displays code to interact with your flow's `.json` file using the Langflow runtime.
|
||||
|
||||
### Chat Widget HTML
|
||||
|
||||
The **Chat Widget HTML** tab displays code that can be inserted in the `<body>` of your HTML to interact with your flow.
|
||||
For more, see the [Chat widget documentation](../administration/chat-widget.mdx).
|
||||
|
||||
### Tweaks
|
||||
|
||||
The **Tweaks** tab displays the available parameters for your flow.
|
||||
Modifying the parameters changes the code parameters across all windows.
|
||||
For example, changing the **Chat Input** component's `input_value` will change that value across all API calls.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_api.mp4" />
|
||||
</div>
|
||||
|
||||
## Collection
|
||||
|
||||
A collection is a snapshot of flows available in a database.
|
||||
|
||||
Collections can be downloaded to local storage and uploaded for future use.
|
||||
|
||||
<div style={{ marginBottom: '20px', display: 'flex', justifyContent: 'center' }}>
|
||||
<ReactPlayer playing controls url='/videos/langflow_collection.mp4'
|
||||
/>
|
||||
</div>
|
||||
|
||||
## Project
|
||||
|
||||
A **Project** can be a flow or a component. To view your saved projects, select **My Collection**.
|
||||
|
||||
Your **Projects** are displayed.
|
||||
|
||||
Click the ** Playground** button to run a flow from the **My Collection** screen.
|
||||
|
||||
In the top left corner of the screen are options for **Download Collection**, **Upload Collection**, and **New Project**.
|
||||
|
||||
Select **Download Collection** to save your project to your local machine. This downloads all flows and components as a `.json` file.
|
||||
|
||||
Select **Upload Collection** to upload a flow or component `.json` file from your local machine.
|
||||
|
||||
Select **New Project** to create a new project. In addition to a blank canvas, [starter projects](../starter-projects/basic-prompting.mdx) are also available.
|
||||
|
||||
## Project options menu
|
||||
|
||||
To see options for your project, in the upper left corner of the canvas, select the dropdown menu.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/project-options-menu.png",
|
||||
dark: "img/project-options-menu.png",
|
||||
}}
|
||||
style={{ width: "30%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
**New** - Start a new project.
|
||||
|
||||
**Duplicate** - Duplicate the current flow as a new project.
|
||||
|
||||
**Settings** - Modify the project's **Name** or **Description**.
|
||||
|
||||
**Import** - Upload a flow `.json` file from your local machine.
|
||||
|
||||
**Export** - Download your current project to your local machine as a `.json` file.
|
||||
|
||||
**Undo** or **Redo** - Undo or redo your last action.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import ReactPlayer from "react-player";
|
||||
|
||||
# 🎨 Creating Flows
|
||||
|
||||
## Compose
|
||||
|
||||
Creating flows with Langflow is easy. Drag sidebar components onto the canvas and connect them together to create your pipeline. Langflow provides a range of [LangChain components](https://python.langchain.com/docs/modules/) to choose from, including LLMs, prompt serializers, agents, and chains.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/langflow_canvas.png",
|
||||
dark: "img/langflow_canvas.png"
|
||||
}}
|
||||
/>
|
||||
|
||||
## Fork
|
||||
|
||||
The easiest way to start with Langflow is by forking a **community example**. Forking an example stores a copy in your project collection, allowing you to edit and save the modified version as a new flow.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_fork.mp4" />
|
||||
</div>
|
||||
|
||||
## Build
|
||||
|
||||
Building a flow means validating if the components have prerequisites fulfilled and are properly instantiated. When a chat message is sent, the flow will run for the first time, executing the pipeline.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_build.mp4" />
|
||||
</div>
|
||||
26
docs/docs/getting-started/flows-components-collections.mdx
Normal file
26
docs/docs/getting-started/flows-components-collections.mdx
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
import ThemedImage from '@theme/ThemedImage';
|
||||
import useBaseUrl from '@docusaurus/useBaseUrl';
|
||||
import ZoomableImage from '/src/theme/ZoomableImage.js';
|
||||
import ReactPlayer from 'react-player';
|
||||
|
||||
# 🖥️ Flows, components, collections, and projects
|
||||
|
||||
## TL;DR
|
||||
|
||||
A [flow](#flow) is a pipeline of components connected together in the Langflow canvas.
|
||||
|
||||
A [component](#component) is a single building block within a flow. A component has inputs, outputs, and parameters that define its functionality.
|
||||
|
||||
A [collection](#collection) is a snapshot of the flows available in your database. Collections can be downloaded to local storage and uploaded for future use.
|
||||
|
||||
A [project](#project) can be a component or a flow. Projects are saved as part of your collection.
|
||||
|
||||
For example, the **OpenAI LLM** is a **component** of the **Basic prompting** flow, and the **flow** is stored in a **collection**.
|
||||
|
||||
|
||||
|
||||
## Component
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
# 🤗 HuggingFace Spaces
|
||||
|
||||
A fully featured version of Langflow can be accessed via HuggingFace spaces with no installation required.
|
||||
|
||||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/hugging-face.png",
|
||||
dark: "img/hugging-face.png",
|
||||
}}
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
||||
Check out Langflow on [HuggingFace Spaces](https://huggingface.co/spaces/Logspace/Langflow).
|
||||
86
docs/docs/getting-started/install-langflow.mdx
Normal file
86
docs/docs/getting-started/install-langflow.mdx
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# 📦 Install Langflow
|
||||
|
||||
<Admonition type="info">
|
||||
Langflow v1.0 alpha is also available in HuggingFace Spaces. [Clone the space using this link](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true), to create your own Langflow workspace in minutes.
|
||||
</Admonition>
|
||||
|
||||
Langflow requires [Python 3.10](https://www.python.org/downloads/release/python-3100/) and [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/) to be installed on your system.
|
||||
|
||||
Install Langflow with pip:
|
||||
```bash
|
||||
python -m pip install langflow -U
|
||||
```
|
||||
|
||||
Install Langflow with pipx:
|
||||
```bash
|
||||
pipx install langflow --python python3.10 --fetch-missing-python
|
||||
```
|
||||
Pipx can fetch the missing Python version for you with `--fetch-missing-python`, but you can also install the Python version manually.
|
||||
|
||||
|
||||
## Install Langflow pre-release
|
||||
|
||||
To install a pre-release version of Langflow:
|
||||
|
||||
pip:
|
||||
```bash
|
||||
python -m pip install langflow --pre --force-reinstall
|
||||
```
|
||||
|
||||
pipx:
|
||||
```bash
|
||||
pipx install langflow --python python3.10 --fetch-missing-python --pip-args="--pre --force-reinstall"
|
||||
```
|
||||
|
||||
Use `--force-reinstall` to ensure you have the latest version of Langflow and its dependencies.
|
||||
|
||||
## Having a problem?
|
||||
|
||||
If you encounter a problem, see [Common Installation Issues](/migration/possible-installation-issues).
|
||||
|
||||
To get help in the Langflow CLI:
|
||||
|
||||
```bash
|
||||
python -m langflow --help
|
||||
```
|
||||
|
||||
## ⛓️ Run Langflow
|
||||
|
||||
1. To run Langflow, enter the following command.
|
||||
```bash
|
||||
python -m langflow run
|
||||
```
|
||||
|
||||
2. Confirm that a local Langflow instance starts by visiting `http://127.0.0.1:7860` in a Chromium-based browser.
|
||||
```bash
|
||||
│ Welcome to ⛓ Langflow │
|
||||
│ │
|
||||
│ Access http://127.0.0.1:7860 │
|
||||
│ Collaborate, and contribute at our GitHub Repo 🚀 │
|
||||
```
|
||||
|
||||
3. Continue on to the [Quickstart](./quickstart.mdx).
|
||||
|
||||
## HuggingFace Spaces
|
||||
|
||||
HuggingFace provides a great alternative for running Langflow in their Spaces environment. This means you can run Langflow without any local installation required.
|
||||
|
||||
In a Chromium-based browser, go to the [Langflow Space](https://huggingface.co/spaces/Langflow/Langflow?duplicate=true) or [Langflow v1.0 alpha Preview Space](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true).
|
||||
|
||||
You'll be presented with the following screen:
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/duplicate-space.png",
|
||||
dark: "img/duplicate-space.png",
|
||||
}}
|
||||
style={{ width: "100%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
Name your Space, define the visibility (Public or Private), and click on **Duplicate Space** to start the installation process. When installation is finished, you'll be redirected to the Space's main page to start using Langflow right away!
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
# 📦 How to install?
|
||||
|
||||
## Installation
|
||||
|
||||
You can install Langflow from pip:
|
||||
|
||||
```bash
|
||||
pip install langflow
|
||||
```
|
||||
|
||||
Next, run:
|
||||
|
||||
```bash
|
||||
langflow
|
||||
```
|
||||
10
docs/docs/getting-started/new-to-llms.mdx
Normal file
10
docs/docs/getting-started/new-to-llms.mdx
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
# 📚 New to LLMs?
|
||||
|
||||
Large Language Models, or LLMs, are part of an exciting new world in computing.
|
||||
|
||||
We made Langflow for anyone to create with LLMs, and hope you'll feel comfortable installing Langflow and [getting started](./quickstart.mdx).
|
||||
|
||||
If you want to learn more about LLMs, prompt engineering, and AI models, Langflow recommends [promptingguide.ai](https://promptingguide.ai), an open-source repository of prompt engineering content maintained by AI experts.
|
||||
PromptingGuide offers content for [beginners](https://www.promptingguide.ai/introduction/basics) and [experts](https://www.promptingguide.ai/techniques/cot), as well as the latest [research papers](https://www.promptingguide.ai/papers) and [test results](https://www.promptingguide.ai/research) fueling AI's progress.
|
||||
|
||||
Wherever you are on your AI journey, it's helpful to keep Prompting Guide open in a tab.
|
||||
79
docs/docs/getting-started/quickstart.mdx
Normal file
79
docs/docs/getting-started/quickstart.mdx
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# ⚡️ Quickstart
|
||||
|
||||
This guide demonstrates how to build a basic prompt flow and modify that prompt for different outcomes.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* [Langflow installed and running](./install-langflow.mdx)
|
||||
|
||||
* [OpenAI API key](https://platform.openai.com)
|
||||
|
||||
<Admonition type="info">
|
||||
Langflow v1.0 alpha is also available in HuggingFace Spaces. [Clone the space using this link](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true) to create your own Langflow workspace in minutes.
|
||||
</Admonition>
|
||||
|
||||
## Hello World - Basic Prompting
|
||||
|
||||
Let's start with a Prompt component to instruct an OpenAI Model.
|
||||
|
||||
Prompts serve as the inputs to a large language model (LLM), acting as the interface between human instructions and computational tasks.
|
||||
|
||||
By submitting natural language requests in a prompt to an LLM, you can obtain answers, generate text, and solve problems.
|
||||
|
||||
1. From the Langflow dashboard, click **New Project**.
|
||||
2. Select **Basic Prompting**.
|
||||
3. The **Basic Prompting** flow is created.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/quickstart.png",
|
||||
dark: "img/quickstart.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
This flow allows you to chat with the **OpenAI** component via a **Prompt**.
|
||||
Examine the **Prompt** component. The **Template** field instructs the LLM to `Answer the user as if you were a pirate.`
|
||||
This should be interesting...
|
||||
|
||||
4. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
|
||||
1. In the **Variable Name** field, enter `openai_api_key`.
|
||||
2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
|
||||
3. Click **Save Variable**.
|
||||
|
||||
## Run the basic prompting flow
|
||||
|
||||
1. Click the **Run** button.
|
||||
The **Interaction Panel** opens, where you can chat with your bot.
|
||||
2. Type a message and press Enter.
|
||||
And... Ahoy! 🏴☠️
|
||||
The bot responds in a piratical manner!
|
||||
|
||||
## Modify the prompt for a different result
|
||||
|
||||
1. To modify your prompt results, in the **Prompt** template, click the **Template** field.
|
||||
The **Edit Prompt** window opens.
|
||||
2. Change `Answer the user as if you were a pirate` to a different character, perhaps `Answer the user as if you were Harold Abelson.`
|
||||
3. Run the basic prompting flow again.
|
||||
The response will be markedly different.
|
||||
|
||||
## Next steps
|
||||
|
||||
Well done! You've built your first prompt in Langflow. 🎉
|
||||
|
||||
By adding Langflow components to your flow, you can create all sorts of interesting behaviors.
|
||||
|
||||
Here are a couple of examples:
|
||||
|
||||
* [Memory chatbot](/starter-projects/memory-chatbot.mdx)
|
||||
* [Blog writer](/starter-projects/blog-writer.mdx)
|
||||
* [Document QA](/starter-projects/document-qa.mdx)
|
||||
|
||||
|
||||
195
docs/docs/getting-started/rag-with-astradb.mdx
Normal file
195
docs/docs/getting-started/rag-with-astradb.mdx
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# 🌟 RAG with Astra DB
|
||||
|
||||
This guide will walk you through how to build a RAG (Retrieval Augmented Generation) application using **Astra DB** and **Langflow**.
|
||||
|
||||
[Astra DB](https://www.datastax.com/products/datastax-astra?utm_source=langflow-pre-release&utm_medium=referral&utm_campaign=langflow-announcement&utm_content=astradb) is a cloud-native database built on Apache Cassandra that is optimized for the cloud. It is a fully managed database-as-a-service that simplifies operations and reduces costs. Astra DB is built on the same technology that powers the largest Cassandra deployments in the world.
|
||||
|
||||
In this guide, we will use Astra DB as a vector store to store and retrieve the documents that will be used by the RAG application to generate responses.
|
||||
|
||||
<Admonition type="tip">
|
||||
This guide assumes that you have Langflow up and running. If you are new to
|
||||
Langflow, you can check out the [Getting Started](/) guide.
|
||||
</Admonition>
|
||||
|
||||
TLDR;
|
||||
|
||||
- [Create a free Astra DB account](https://astra.datastax.com/signup?utm_source=langflow-pre-release&utm_medium=referral&utm_campaign=langflow-announcement&utm_content=create-a-free-astra-db-account)
|
||||
- Duplicate our [Langflow 1.0 Space](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true)
|
||||
- Create a new database, get a **Token** and the **API Endpoint**
|
||||
- Click on the **New Project** button and look for Vector Store RAG. This will create a new project with the necessary components
|
||||
- Import the project into Langflow by dropping it on the Canvas or My Collection page
|
||||
- Update the **Token** and **API Endpoint** in the **Astra DB** components
|
||||
- Update the OpenAI API key in the **OpenAI** components
|
||||
- Run the ingestion flow which is the one that uses the **Astra DB** component
|
||||
- Click on the ⚡ _Run_ button and start interacting with your RAG application
|
||||
|
||||
# First things first
|
||||
|
||||
## Create an Astra DB Database
|
||||
|
||||
To get started, you will need to [create an Astra DB database](https://astra.datastax.com/signup?utm_source=langflow-pre-release&utm_medium=referral&utm_campaign=langflow-announcement&utm_content=create-an-astradb-database).
|
||||
|
||||
Once you have created an account, you will be taken to the Astra DB dashboard. Click on the **Create Database** button.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-create-database.png",
|
||||
dark: "img/astra-create-database.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
Now you will need to configure your database. Choose the **Serverless (Vector)** deployment type, and pick a Database name, provider and region.
|
||||
|
||||
After you have configured your database, click on the **Create Database** button.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-configure-deployment.png",
|
||||
dark: "img/astra-configure-deployment.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
Once your database is initialized, to the right of the page, you will see the _Database Details_ section which contains a button for you to copy the **API Endpoint** and another to generate a **Token**.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-generate-token.png",
|
||||
dark: "img/astra-generate-token.png",
|
||||
}}
|
||||
style={{ width: "50%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
Now we are all set to start building our RAG application using Astra DB and Langflow.
|
||||
|
||||
## (Optional) Duplicate the Langflow 1.0 HuggingFace Space
|
||||
|
||||
If you haven't already, now is the time to launch Langflow. To make things easier, you can duplicate our [Langflow 1.0 Space](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true) which sets up a Langflow instance just for you.
|
||||
|
||||
## Open the Vector Store RAG Project
|
||||
|
||||
To get started, click on the **New Project** button and look for the **Vector Store RAG** project. This will open a starter project with the necessary components to run a RAG application using Astra DB.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/drag-and-drop-flow.png",
|
||||
dark: "img/drag-and-drop-flow.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
This project consists of two flows. The simpler one is the **Ingestion Flow** which is responsible for ingesting the documents into the Astra DB database.
|
||||
|
||||
Your first step should be to understand what each flow does and how they interact with each other.
|
||||
|
||||
The ingestion flow consists of:
|
||||
|
||||
- **Files** component that uploads a text file to Langflow
|
||||
- **Recursive Character Text Splitter** component that splits the text into smaller chunks
|
||||
- **OpenAIEmbeddings** component that generates embeddings for the text chunks
|
||||
- **Astra DB** component that stores the text chunks in the Astra DB database
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-ingestion-flow.png",
|
||||
dark: "img/astra-ingestion-flow.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
Now, let's update the **Astra DB** and **Astra DB Search** components with the **Token** and **API Endpoint** that we generated earlier, and the OpenAI Embeddings components with your OpenAI API key.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-ingestion-fields.png",
|
||||
dark: "img/astra-ingestion-fields.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
And run it! This will ingest the Text data from your file into the Astra DB database.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-ingestion-run.png",
|
||||
dark: "img/astra-ingestion-run.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
Now, on to the **RAG Flow**. This flow is responsible for generating responses to your queries. It will define all of the steps from getting the User's input to generating a response and displaying it in the Playground.
|
||||
|
||||
The RAG flow is a bit more complex. It consists of:
|
||||
|
||||
- **Chat Input** component that defines where to put the user input coming from the Playground
|
||||
- **OpenAI Embeddings** component that generates embeddings from the user input
|
||||
- **Astra DB Search** component that retrieves the most relevant Records from the Astra DB database
|
||||
- **Text Output** component that turns the Records into Text by concatenating them and also displays it in the Playground
|
||||
- One interesting point you'll see here is that this component is named `Extracted Chunks`, and that is how it will appear in the Playground
|
||||
- **Prompt** component that takes in the user input and the retrieved Records as text and builds a prompt for the OpenAI model
|
||||
- **OpenAI** component that generates a response to the prompt
|
||||
- **Chat Output** component that displays the response in the Playground
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-rag-flow.png",
|
||||
dark: "img/astra-rag-flow.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
To run it all we have to do is click on the ⚡ _Run_ button and start interacting with your RAG application.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-rag-flow-run.png",
|
||||
dark: "img/astra-rag-flow-run.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
This opens the Playground where you can chat your data.
|
||||
|
||||
Because this flow has a **Chat Input** and a **Text Output** component, the Panel displays a chat input at the bottom and the Extracted Chunks section on the left.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-rag-flow-interaction-panel.png",
|
||||
dark: "img/astra-rag-flow-interaction-panel.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
Once we interact with it we get a response and the Extracted Chunks section is updated with the retrieved records.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/astra-rag-flow-interaction-panel-interaction.png",
|
||||
dark: "img/astra-rag-flow-interaction-panel-interaction.png",
|
||||
}}
|
||||
style={{ width: "80%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
And that's it! You have successfully ran a RAG application using Astra DB and Langflow.
|
||||
|
||||
# Conclusion
|
||||
|
||||
In this guide, we have learned how to run a RAG application using Astra DB and Langflow.
|
||||
We have seen how to create an Astra DB database, import the Astra DB RAG Flows project into Langflow, and run the ingestion and RAG flows.
|
||||
|
|
@ -1,73 +0,0 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Asynchronous Processing
|
||||
|
||||
## Introduction
|
||||
|
||||
Starting from version 0.5, Langflow introduces a new feature to its API: the _`sync`_ flag. This flag allows users to opt for asynchronous processing of their flows, freeing up resources and enabling better control over long-running tasks.
|
||||
This feature supports running tasks in a Celery worker queue and AnyIO task groups for now.
|
||||
|
||||
<Admonition type="warning" caption="Experimental Feature">
|
||||
This is an experimental feature. The default behavior of the API is still
|
||||
synchronous processing. The API may change in the future.
|
||||
</Admonition>
|
||||
|
||||
## The _`sync`_ Flag
|
||||
|
||||
The _`sync`_ flag can be included in the payload of your POST request to the _`/api/v1/process/<your_flow_id>`_ endpoint.
|
||||
When set to _`false`_, the API will initiate an asynchronous task instead of processing the flow synchronously.
|
||||
|
||||
### API Request with _`sync`_ flag
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
http://localhost:3000/api/v1/process/<your_flow_id> \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'x-api-key: <your_api_key>' \
|
||||
-d '{"inputs": {"text": ""}, "tweaks": {}, "sync": false}'
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"result": {
|
||||
"output": "..."
|
||||
},
|
||||
"task": {
|
||||
"id": "...",
|
||||
"href": "api/v1/task/<task_id>"
|
||||
},
|
||||
"session_id": "...",
|
||||
"backend": "..." // celery or anyio
|
||||
}
|
||||
```
|
||||
|
||||
## Checking Task Status
|
||||
|
||||
You can check the status of an asynchronous task by making a GET request to the `/task/{task_id}` endpoint.
|
||||
|
||||
```bash
|
||||
curl -X GET \
|
||||
http://localhost:3000/api/v1/task/<task_id> \
|
||||
-H 'x-api-key: <your_api_key>'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The endpoint will return the current status of the task and, if completed, the result of the task. Possible statuses include:
|
||||
|
||||
- _`PENDING`_: The task is waiting for execution.
|
||||
- _`SUCCESS`_: The task has completed successfully.
|
||||
- _`FAILURE`_: The task has failed.
|
||||
|
||||
Example response for a completed task:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "SUCCESS",
|
||||
"result": {
|
||||
"output": "..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Async API
|
||||
|
||||
## Introduction
|
||||
|
||||
<Admonition type="info" caption="In development">
|
||||
This implementation is still in development. Contributions are welcome!
|
||||
</Admonition>
|
||||
|
||||
The Async API is an implementation of the Langflow API that uses [Celery](https://docs.celeryproject.org/en/stable/)
|
||||
to run the tasks asynchronously, using a message broker to send and receive messages, a result backend to store the results and a cache to store the task states and session data.
|
||||
|
||||
### Configuration
|
||||
|
||||
The folder _`./deploy`_ in the [Github repository](https://github.com/logspace-ai/langflow) contains a _`.env.example`_ file that can be used to configure a Langflow deployment.
|
||||
The file contains the variables required to configure a Celery worker queue, Redis cache and result backend and a RabbitMQ message broker.
|
||||
|
||||
To set it up locally you can copy the file to _`.env`_ and run the following command:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This will set up the following containers:
|
||||
|
||||
- Langflow API
|
||||
- Celery worker
|
||||
- RabbitMQ message broker
|
||||
- Redis cache
|
||||
- PostgreSQL database
|
||||
- PGAdmin
|
||||
- Flower
|
||||
- Traefik
|
||||
- Grafana
|
||||
- Prometheus
|
||||
|
||||
### Testing
|
||||
|
||||
To run the tests for the Async API, you can run the following command:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.with_tests.yml up --exit-code-from tests tests result_backend broker celeryworker db --build
|
||||
```
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue