Merge branch 'dev' into updateMakefile
This commit is contained in:
commit
993cea0c0b
255 changed files with 12396 additions and 4433 deletions
1
.dockerignore
Normal file
1
.dockerignore
Normal file
|
|
@ -0,0 +1 @@
|
|||
.venv/
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
|
|
@ -14,9 +14,7 @@ env:
|
|||
|
||||
jobs:
|
||||
if_release:
|
||||
if: |
|
||||
${{ github.event.pull_request.merged == true }}
|
||||
&& ${{ contains(github.event.pull_request.labels.*.name, 'Release') }}
|
||||
if: ${{ (github.event.pull_request.merged == true) && contains(github.event.pull_request.labels.*.name, 'Release') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
|||
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -251,3 +251,5 @@ langflow.db
|
|||
|
||||
# docusaurus
|
||||
.docusaurus/
|
||||
|
||||
/tmp/*
|
||||
|
|
|
|||
13
.vscode/launch.json
vendored
13
.vscode/launch.json
vendored
|
|
@ -1,4 +1,5 @@
|
|||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Debug Backend",
|
||||
|
|
@ -6,7 +7,8 @@
|
|||
"request": "launch",
|
||||
"module": "uvicorn",
|
||||
"args": [
|
||||
"langflow.main:app",
|
||||
"--factory",
|
||||
"langflow.main:create_app",
|
||||
"--port",
|
||||
"7860",
|
||||
"--reload",
|
||||
|
|
@ -37,6 +39,15 @@
|
|||
"request": "launch",
|
||||
"url": "http://localhost:3000/",
|
||||
"webRoot": "${workspaceRoot}/src/frontend"
|
||||
},
|
||||
{
|
||||
"name": "Python: Debug Tests",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "${file}",
|
||||
"purpose": ["debug-test"],
|
||||
"console": "integratedTerminal",
|
||||
"justMyCode": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,9 +7,14 @@ to contributions, whether it be in the form of a new feature, improved infra, or
|
|||
To contribute to this project, please follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
|
||||
Please do not try to push directly to this repo unless you are a maintainer.
|
||||
|
||||
The branch structure is as follows:
|
||||
|
||||
- `main`: The stable version of Langflow
|
||||
- `dev`: The development version of Langflow. This branch is used to test new features before they are merged into `main` and, as such, may be unstable.
|
||||
|
||||
## 🗺️Contributing Guidelines
|
||||
|
||||
### 🚩GitHub Issues
|
||||
## 🚩GitHub Issues
|
||||
|
||||
Our [issues](https://github.com/logspace-ai/langflow/issues) page is kept up to date
|
||||
with bugs, improvements, and feature requests. There is a taxonomy of labels to help
|
||||
|
|
@ -33,18 +38,19 @@ so that more people can benefit from it.
|
|||
[collapses the content](https://developer.mozilla.org/en/docs/Web/HTML/Element/details)
|
||||
so it only becomes visible on click, making the issue easier to read and follow.
|
||||
|
||||
### Issue labels
|
||||
## Issue labels
|
||||
|
||||
[See this page](https://github.com/logspace-ai/langflow/labels) for an overview of
|
||||
the system we use to tag our issues and pull requests.
|
||||
|
||||
## Local development
|
||||
|
||||
### Local development
|
||||
You can develop Langflow using docker compose, or locally.
|
||||
|
||||
We provide a .vscode/launch.json file for debugging the backend in VSCode, which is a lot faster than using docker compose.
|
||||
|
||||
Setting up hooks:
|
||||
|
||||
```bash
|
||||
make init
|
||||
```
|
||||
|
|
@ -53,30 +59,46 @@ This will install the pre-commit hooks, which will run `make format` on every co
|
|||
|
||||
It is advised to run `make lint` before pushing to the repository.
|
||||
|
||||
#### **Locally**
|
||||
Run locally by cloning the repository and installing the dependencies. We recommend using a virtual environment to isolate the dependencies from your system.
|
||||
## Run locally
|
||||
|
||||
Langflow can run locally by cloning the repository and installing the dependencies. We recommend using a virtual environment to isolate the dependencies from your system.
|
||||
|
||||
Before you start, make sure you have the following installed:
|
||||
- Poetry (>=1.4)
|
||||
- Node.js
|
||||
|
||||
For the backend, you will need to install the dependencies and start the development server.
|
||||
- Poetry (>=1.4)
|
||||
- Node.js
|
||||
|
||||
Then, in the root folder, install the dependencies and start the development server for the backend:
|
||||
|
||||
```bash
|
||||
make install_backend
|
||||
make backend
|
||||
```
|
||||
For the frontend, you will need to install the dependencies and start the development server.
|
||||
|
||||
And the frontend:
|
||||
|
||||
```bash
|
||||
make frontend
|
||||
```
|
||||
|
||||
## Docker compose
|
||||
|
||||
The following snippet will run the backend and frontend in separate containers. The frontend will be available at `localhost:3000` and the backend at `localhost:7860`.
|
||||
|
||||
#### **Docker compose**
|
||||
This will run the backend and frontend in separate containers. The frontend will be available at `localhost:3000` and the backend at `localhost:7860`.
|
||||
```bash
|
||||
docker compose up --build
|
||||
# or
|
||||
make dev build=1
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is built using [Docusaurus](https://docusaurus.io/). To run the documentation locally, run the following commands:
|
||||
|
||||
```bash
|
||||
cd docs
|
||||
npm install
|
||||
npm run start
|
||||
```
|
||||
|
||||
The documentation will be available at `localhost:3000` and all the files are located in the `docs/docs` folder.
|
||||
Once you are done with your changes, you can create a Pull Request to the `main` branch.
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -46,7 +46,7 @@ install_backend:
|
|||
|
||||
backend:
|
||||
make install_backend
|
||||
poetry run uvicorn src.backend.langflow.main:app --port 7860 --reload --log-level debug
|
||||
poetry run uvicorn --factory src.backend.langflow.main:create_app --port 7860 --reload --log-level debug
|
||||
|
||||
build_and_run:
|
||||
echo 'Removing dist folder'
|
||||
|
|
|
|||
142
README.md
142
README.md
|
|
@ -13,7 +13,6 @@
|
|||
<img alt="Github License" src="https://img.shields.io/github/license/logspace-ai/langflow" />
|
||||
</p>
|
||||
|
||||
|
||||
<p>
|
||||
<a href="https://discord.gg/EqksyE2EX9"><img alt="Discord Server" src="https://dcbadge.vercel.app/api/server/EqksyE2EX9?compact=true&style=flat"/></a>
|
||||
<a href="https://huggingface.co/spaces/Logspace/Langflow"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
|
||||
|
|
@ -22,54 +21,75 @@
|
|||
<a href="https://github.com/logspace-ai/langflow">
|
||||
<img width="100%" src="https://github.com/logspace-ai/langflow/blob/main/img/langflow-demo.gif?raw=true"></a>
|
||||
|
||||
|
||||
<p>
|
||||
</p>
|
||||
|
||||
# Table of Contents
|
||||
|
||||
- [⛓️ Langflow](#️-langflow)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [📦 Installation](#-installation)
|
||||
- [Locally](#locally)
|
||||
- [HuggingFace Spaces](#huggingface-spaces)
|
||||
- [Locally](#locally)
|
||||
- [HuggingFace Spaces](#huggingface-spaces)
|
||||
- [🖥️ Command Line Interface (CLI)](#️-command-line-interface-cli)
|
||||
- [Usage](#usage)
|
||||
- [Usage](#usage)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Deployment](#deployment)
|
||||
- [Deploy Langflow on Google Cloud Platform](#deploy-langflow-on-google-cloud-platform)
|
||||
- [Deploy Langflow on Jina AI Cloud](#deploy-langflow-on-jina-ai-cloud)
|
||||
- [API Usage](#api-usage)
|
||||
- [API Usage](#api-usage)
|
||||
- [Deploy on Railway](#deploy-on-railway)
|
||||
- [Deploy on Render](#deploy-on-render)
|
||||
- [🎨 Creating Flows](#-creating-flows)
|
||||
- [👋 Contributing](#-contributing)
|
||||
- [📄 License](#-license)
|
||||
|
||||
|
||||
# 📦 Installation
|
||||
|
||||
### <b>Locally</b>
|
||||
|
||||
You can install Langflow from pip:
|
||||
|
||||
```shell
|
||||
# This installs the package without dependencies for local models
|
||||
pip install langflow
|
||||
```
|
||||
|
||||
To use local models (e.g llama-cpp-python) run:
|
||||
|
||||
```shell
|
||||
pip install langflow[local]
|
||||
```
|
||||
|
||||
This will install the following dependencies:
|
||||
|
||||
- [CTransformers](https://github.com/marella/ctransformers)
|
||||
- [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
|
||||
- [sentence-transformers](https://github.com/UKPLab/sentence-transformers)
|
||||
|
||||
You can still use models from projects like LocalAI
|
||||
|
||||
Next, run:
|
||||
|
||||
```shell
|
||||
python -m langflow
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```shell
|
||||
langflow # or langflow --help
|
||||
```
|
||||
|
||||
### HuggingFace Spaces
|
||||
|
||||
You can also check it out on [HuggingFace Spaces](https://huggingface.co/spaces/Logspace/Langflow) and run it in your browser! You can even clone it and have your own copy of Langflow to play with.
|
||||
|
||||
# 🖥️ Command Line Interface (CLI)
|
||||
|
||||
Langflow provides a command-line interface (CLI) for easy management and configuration.
|
||||
|
||||
### Usage
|
||||
## Usage
|
||||
|
||||
You can run the Langflow using the following command:
|
||||
|
||||
|
|
@ -87,11 +107,11 @@ Each option is detailed below:
|
|||
- `--config`: Defines the path to the configuration file. The default is `config.yaml`.
|
||||
- `--env-file`: Specifies the path to the .env file containing environment variables. The default is `.env`.
|
||||
- `--log-level`: Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`.
|
||||
- `--components-path`: Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`.
|
||||
- `--log-file`: Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`.
|
||||
- `--cache`: Selects the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`.
|
||||
- `--jcloud/--no-jcloud`: Toggles the option to deploy on Jina AI Cloud. The default is `no-jcloud`.
|
||||
- `--dev/--no-dev`: Toggles the development mode. The default is `no-dev`.
|
||||
- `--database-url`: Sets the database URL to connect to. If not provided, a local SQLite database will be used. Can be set using the `LANGFLOW_DATABASE_URL` environment variable.
|
||||
- `--path`: Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable.
|
||||
- `--open-browser/--no-open-browser`: Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`.
|
||||
- `--remove-api-keys/--no-remove-api-keys`: Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`.
|
||||
|
|
@ -114,7 +134,6 @@ Alternatively, click the **"Open in Cloud Shell"** button below to launch Google
|
|||
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial_spot.md)
|
||||
|
||||
|
||||
## Deploy Langflow on [Jina AI Cloud](https://github.com/jina-ai/langchain-serve)
|
||||
|
||||
Langflow integrates with langchain-serve to provide a one-command deployment to Jina AI Cloud.
|
||||
|
|
@ -122,6 +141,8 @@ Langflow integrates with langchain-serve to provide a one-command deployment to
|
|||
Start by installing `langchain-serve` with
|
||||
|
||||
```bash
|
||||
pip install langflow[deploy]
|
||||
# or
|
||||
pip install -U langchain-serve
|
||||
```
|
||||
|
||||
|
|
@ -140,33 +161,33 @@ langflow --jcloud
|
|||
<details>
|
||||
<summary>Show complete (example) output</summary>
|
||||
|
||||
```text
|
||||
🚀 Deploying Langflow server on Jina AI Cloud
|
||||
╭───────────────────────── 🎉 Flow is available! ──────────────────────────╮
|
||||
│ │
|
||||
│ ID langflow-e3dd8820ec │
|
||||
│ Gateway (Websocket) wss://langflow-e3dd8820ec.wolf.jina.ai │
|
||||
│ Dashboard https://dashboard.wolf.jina.ai/flow/e3dd8820ec │
|
||||
│ │
|
||||
╰──────────────────────────────────────────────────────────────────────────╯
|
||||
╭──────────────┬──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ App ID │ langflow-e3dd8820ec │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ Phase │ Serving │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ Endpoint │ wss://langflow-e3dd8820ec.wolf.jina.ai │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ App logs │ dashboards.wolf.jina.ai │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ Swagger UI │ https://langflow-e3dd8820ec.wolf.jina.ai/docs │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ OpenAPI JSON │ https://langflow-e3dd8820ec.wolf.jina.ai/openapi.json │
|
||||
╰──────────────┴──────────────────────────────────────────────────────────────────────────────╯
|
||||
```text
|
||||
🚀 Deploying Langflow server on Jina AI Cloud
|
||||
╭───────────────────────── 🎉 Flow is available! ──────────────────────────╮
|
||||
│ │
|
||||
│ ID langflow-e3dd8820ec │
|
||||
│ Gateway (Websocket) wss://langflow-e3dd8820ec.wolf.jina.ai │
|
||||
│ Dashboard https://dashboard.wolf.jina.ai/flow/e3dd8820ec │
|
||||
│ │
|
||||
╰──────────────────────────────────────────────────────────────────────────╯
|
||||
╭──────────────┬──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ App ID │ langflow-e3dd8820ec │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ Phase │ Serving │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ Endpoint │ wss://langflow-e3dd8820ec.wolf.jina.ai │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ App logs │ dashboards.wolf.jina.ai │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ Swagger UI │ https://langflow-e3dd8820ec.wolf.jina.ai/docs │
|
||||
├──────────────┼──────────────────────────────────────────────────────────────────────────────┤
|
||||
│ OpenAPI JSON │ https://langflow-e3dd8820ec.wolf.jina.ai/openapi.json │
|
||||
╰──────────────┴──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
🎉 Langflow server successfully deployed on Jina AI Cloud 🎉
|
||||
🔗 Click on the link to open the server (please allow ~1-2 minutes for the server to startup): https://langflow-e3dd8820ec.wolf.jina.ai/
|
||||
📖 Read more about managing the server: https://github.com/jina-ai/langchain-serve
|
||||
```
|
||||
🎉 Langflow server successfully deployed on Jina AI Cloud 🎉
|
||||
🔗 Click on the link to open the server (please allow ~1-2 minutes for the server to startup): https://langflow-e3dd8820ec.wolf.jina.ai/
|
||||
📖 Read more about managing the server: https://github.com/jina-ai/langchain-serve
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
|
@ -177,7 +198,7 @@ You can use Langflow directly on your browser, or use the API endpoints on Jina
|
|||
<details>
|
||||
<summary>Show API usage (with python)</summary>
|
||||
|
||||
```python
|
||||
```python
|
||||
import requests
|
||||
|
||||
BASE_API_URL = "https://langflow-e3dd8820ec.wolf.jina.ai/api/v1/predict"
|
||||
|
|
@ -185,47 +206,49 @@ FLOW_ID = "864c4f98-2e59-468b-8e13-79cd8da07468"
|
|||
# You can tweak the flow by adding a tweaks dictionary
|
||||
# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}}
|
||||
TWEAKS = {
|
||||
"ChatOpenAI-g4jEr": {},
|
||||
"ConversationChain-UidfJ": {}
|
||||
"ChatOpenAI-g4jEr": {},
|
||||
"ConversationChain-UidfJ": {}
|
||||
}
|
||||
|
||||
def run_flow(message: str, flow_id: str, tweaks: dict = None) -> dict:
|
||||
"""
|
||||
Run a flow with a given message and optional tweaks.
|
||||
"""
|
||||
Run a flow with a given message and optional tweaks.
|
||||
|
||||
:param message: The message to send to the flow
|
||||
:param flow_id: The ID of the flow to run
|
||||
:param tweaks: Optional tweaks to customize the flow
|
||||
:return: The JSON response from the flow
|
||||
"""
|
||||
api_url = f"{BASE_API_URL}/{flow_id}"
|
||||
:param message: The message to send to the flow
|
||||
:param flow_id: The ID of the flow to run
|
||||
:param tweaks: Optional tweaks to customize the flow
|
||||
:return: The JSON response from the flow
|
||||
"""
|
||||
api_url = f"{BASE_API_URL}/{flow_id}"
|
||||
|
||||
payload = {"message": message}
|
||||
payload = {"message": message}
|
||||
|
||||
if tweaks:
|
||||
payload["tweaks"] = tweaks
|
||||
if tweaks:
|
||||
payload["tweaks"] = tweaks
|
||||
|
||||
response = requests.post(api_url, json=payload)
|
||||
return response.json()
|
||||
response = requests.post(api_url, json=payload)
|
||||
return response.json()
|
||||
|
||||
# Setup any tweaks you want to apply to the flow
|
||||
print(run_flow("Your message", flow_id=FLOW_ID, tweaks=TWEAKS))
|
||||
```
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"result": "Great choice! Bangalore in the 1920s was a vibrant city with a rich cultural and political scene. Here are some suggestions for things to see and do:\n\n1. Visit the Bangalore Palace - built in 1887, this stunning palace is a perfect example of Tudor-style architecture. It was home to the Maharaja of Mysore and is now open to the public.\n\n2. Attend a performance at the Ravindra Kalakshetra - this cultural center was built in the 1920s and is still a popular venue for music and dance performances.\n\n3. Explore the neighborhoods of Basavanagudi and Malleswaram - both of these areas have retained much of their old-world charm and are great places to walk around and soak up the atmosphere.\n\n4. Check out the Bangalore Club - founded in 1868, this exclusive social club was a favorite haunt of the British expat community in the 1920s.\n\n5. Attend a meeting of the Indian National Congress - founded in 1885, the INC was a major force in the Indian independence movement and held many meetings and rallies in Bangalore in the 1920s.\n\nHope you enjoy your trip to 1920s Bangalore!"
|
||||
}
|
||||
```
|
||||
```json
|
||||
{
|
||||
"result": "Great choice! Bangalore in the 1920s was a vibrant city with a rich cultural and political scene. Here are some suggestions for things to see and do:\n\n1. Visit the Bangalore Palace - built in 1887, this stunning palace is a perfect example of Tudor-style architecture. It was home to the Maharaja of Mysore and is now open to the public.\n\n2. Attend a performance at the Ravindra Kalakshetra - this cultural center was built in the 1920s and is still a popular venue for music and dance performances.\n\n3. Explore the neighborhoods of Basavanagudi and Malleswaram - both of these areas have retained much of their old-world charm and are great places to walk around and soak up the atmosphere.\n\n4. Check out the Bangalore Club - founded in 1868, this exclusive social club was a favorite haunt of the British expat community in the 1920s.\n\n5. Attend a meeting of the Indian National Congress - founded in 1885, the INC was a major force in the Indian independence movement and held many meetings and rallies in Bangalore in the 1920s.\n\nHope you enjoy your trip to 1920s Bangalore!"
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
> Read more about resource customization, cost, and management of Langflow apps on Jina AI Cloud in the **[langchain-serve](https://github.com/jina-ai/langchain-serve)** repository.
|
||||
|
||||
## Deploy on Railway
|
||||
|
||||
[](https://railway.app/template/Emy2sU?referralCode=MnPSdg)
|
||||
|
||||
## Deploy on Render
|
||||
|
||||
<a href="https://render.com/deploy?repo=https://github.com/logspace-ai/langflow/tree/main">
|
||||
<img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
|
||||
</a>
|
||||
|
|
@ -248,11 +271,11 @@ flow = load_flow_from_json("path/to/flow.json")
|
|||
flow("Hey, have you heard of Langflow?")
|
||||
```
|
||||
|
||||
|
||||
# 👋 Contributing
|
||||
|
||||
We welcome contributions from developers of all levels to our open-source project on GitHub. If you'd like to contribute, please check our [contributing guidelines](./CONTRIBUTING.md) and help make Langflow more accessible.
|
||||
|
||||
---
|
||||
|
||||
Join our [Discord](https://discord.com/invite/EqksyE2EX9) server to ask questions, make suggestions and showcase your projects! 🦾
|
||||
|
||||
|
|
@ -261,7 +284,6 @@ Join our [Discord](https://discord.com/invite/EqksyE2EX9) server to ask question
|
|||
|
||||
[](https://star-history.com/#logspace-ai/langflow&Date)
|
||||
|
||||
|
||||
# 📄 License
|
||||
|
||||
Langflow is released under the MIT License. See the LICENSE file for details.
|
||||
|
|
|
|||
|
|
@ -15,4 +15,4 @@ COPY ./ ./
|
|||
# Install dependencies
|
||||
RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi
|
||||
|
||||
CMD ["uvicorn", "langflow.main:app", "--host", "0.0.0.0", "--port", "5003", "--reload", "log-level", "debug"]
|
||||
CMD ["uvicorn", "--factory", "src.backend.langflow.main:create_app", "--host", "0.0.0.0", "--port", "7860", "--reload", "--log-level", "debug"]
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
version: '3.4'
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
backend:
|
||||
|
|
@ -7,7 +7,12 @@ services:
|
|||
build:
|
||||
context: ./
|
||||
dockerfile: ./dev.Dockerfile
|
||||
command: ["sh", "-c", "pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 -m uvicorn langflow.main:app --host 0.0.0.0 --port 7860 --reload"]
|
||||
command:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 -m uvicorn --factory src.backend.langflow.main:create_app --host 0.0.0.0 --port 7860 --reload",
|
||||
]
|
||||
ports:
|
||||
- 7860:7860
|
||||
- 5678:5678
|
||||
|
|
@ -22,7 +27,7 @@ services:
|
|||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./src/frontend/public:/home/node/app/public
|
||||
- ./src/frontend/src:/home/node/app/src
|
||||
- ./src/frontend/package.json:/home/node/app/package.json
|
||||
restart: on-failure
|
||||
- ./src/frontend/public:/home/node/app/public
|
||||
- ./src/frontend/src:/home/node/app/src
|
||||
- ./src/frontend/package.json:/home/node/app/package.json
|
||||
restart: on-failure
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
version: '3'
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
backend:
|
||||
|
|
@ -9,7 +9,7 @@ services:
|
|||
- "7860:7860"
|
||||
volumes:
|
||||
- ./:/app
|
||||
command: bash -c "uvicorn langflow.main:app --host 0.0.0.0 --port 7860 --reload"
|
||||
command: bash -c "uvicorn --factory src.backend.langflow.main:create_app --host 0.0.0.0 --port 7860 --reload"
|
||||
|
||||
frontend:
|
||||
build:
|
||||
|
|
@ -22,7 +22,7 @@ services:
|
|||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./src/frontend/public:/home/node/app/public
|
||||
- ./src/frontend/src:/home/node/app/src
|
||||
- ./src/frontend/package.json:/home/node/app/package.json
|
||||
restart: on-failure
|
||||
- ./src/frontend/public:/home/node/app/public
|
||||
- ./src/frontend/src:/home/node/app/src
|
||||
- ./src/frontend/package.json:/home/node/app/package.json
|
||||
restart: on-failure
|
||||
|
|
|
|||
|
|
@ -6,5 +6,5 @@ services:
|
|||
context: .
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- "5003:5003"
|
||||
- "7860:7860"
|
||||
command: langflow --host 0.0.0.0
|
||||
|
|
|
|||
|
|
@ -1,5 +1,14 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Agents
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
|
||||
Agents are components that use reasoning to make decisions and take actions, designed to autonomously perform tasks or provide services with some degree of “freedom” (or agency). They combine the power of LLM chaining processes with access to external tools such as APIs to interact with applications and accomplish tasks.
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -1,9 +1,18 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Chains
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may
|
||||
contain some rough edges. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
Chains, in the context of language models, refer to a series of calls made to a language model. It allows for the output of one call to be used as the input for another call. Different types of chains allow for different levels of complexity. Chains are useful for creating pipelines and executing specific scenarios.
|
||||
|
||||
---
|
||||
|
|
@ -12,22 +21,23 @@ Chains, in the context of language models, refer to a series of calls made to a
|
|||
|
||||
The `CombineDocsChain` incorporates methods to combine or aggregate loaded documents for question-answering functionality.
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
|
||||
Works as a proxy of LangChain’s [documents](https://python.langchain.com/docs/modules/chains/document/) chains generated by the `load_qa_chain` function.
|
||||
|
||||
:::
|
||||
</Admonition>
|
||||
|
||||
**Params**
|
||||
|
||||
- **LLM:** Language Model to use in the chain.
|
||||
- **chain_type:** The chain type to be used. Each one of them applies a different “combination strategy”.
|
||||
- **stuff**: The stuff [documents](https://python.langchain.com/docs/modules/chains/document/stuff) chain (“stuff" as in "to stuff" or "to fill") is the most straightforward of *the* document chains. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. This chain is well-suited for applications where documents are small and only a few are passed in for most calls.
|
||||
- **map_reduce**: The map-reduce [documents](https://python.langchain.com/docs/modules/chains/document/map_reduce) chain first applies an LLM chain to each document individually (the Map step), treating the chain output as a new document. It then passes all the new documents to a separate combined documents chain to get a single output (the Reduce step). It can optionally first compress or collapse the mapped documents to make sure that they fit in the combined documents chain (which will often pass them to an LLM). This compression step is performed recursively if necessary.
|
||||
- **map_rerank**: The map re-rank [documents](https://python.langchain.com/docs/modules/chains/document/map_rerank) chain runs an initial prompt on each document that not only tries to complete a task but also gives a score for how certain it is in its answer. The highest-scoring response is returned.
|
||||
- **refine**: The refine [documents](https://python.langchain.com/docs/modules/chains/document/refine) chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
|
||||
|
||||
Since the Refine chain only passes a single document to the LLM at a time, it is well-suited for tasks that require analyzing more documents than can fit in the model's context. The obvious tradeoff is that this chain will make far more LLM calls than, for example, the Stuff documents chain. There are also certain tasks that are difficult to accomplish iteratively. For example, the Refine chain can perform poorly when documents frequently cross-reference one another or when a task requires detailed information from many documents.
|
||||
- **stuff**: The stuff [documents](https://python.langchain.com/docs/modules/chains/document/stuff) chain (“stuff" as in "to stuff" or "to fill") is the most straightforward of _the_ document chains. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. This chain is well-suited for applications where documents are small and only a few are passed in for most calls.
|
||||
- **map_reduce**: The map-reduce [documents](https://python.langchain.com/docs/modules/chains/document/map_reduce) chain first applies an LLM chain to each document individually (the Map step), treating the chain output as a new document. It then passes all the new documents to a separate combined documents chain to get a single output (the Reduce step). It can optionally first compress or collapse the mapped documents to make sure that they fit in the combined documents chain (which will often pass them to an LLM). This compression step is performed recursively if necessary.
|
||||
- **map_rerank**: The map re-rank [documents](https://python.langchain.com/docs/modules/chains/document/map_rerank) chain runs an initial prompt on each document that not only tries to complete a task but also gives a score for how certain it is in its answer. The highest-scoring response is returned.
|
||||
- **refine**: The refine [documents](https://python.langchain.com/docs/modules/chains/document/refine) chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
|
||||
|
||||
Since the Refine chain only passes a single document to the LLM at a time, it is well-suited for tasks that require analyzing more documents than can fit in the model's context. The obvious tradeoff is that this chain will make far more LLM calls than, for example, the Stuff documents chain. There are also certain tasks that are difficult to accomplish iteratively. For example, the Refine chain can perform poorly when documents frequently cross-reference one another or when a task requires detailed information from many documents.
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -41,7 +51,7 @@ The `ConversationChain` is a straightforward chain for interactive conversations
|
|||
- **Memory:** Default memory store.
|
||||
- **input_key:** Used to specify the key under which the user input will be stored in the conversation memory. It allows you to provide the user's input to the chain for processing and generating a response.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can be helpful for debugging and understanding the chain's behavior. If set to False, it will suppress the verbose output — defaults to `False`.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can be helpful for debugging and understanding the chain's behavior. If set to False, it will suppress the verbose output — defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -49,11 +59,11 @@ The `ConversationChain` is a straightforward chain for interactive conversations
|
|||
|
||||
The `ConversationalRetrievalChain` extracts information and provides answers by combining document search and question-answering abilities.
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
|
||||
A retriever is a component that finds documents based on a query. It doesn't store the documents themselves, but it returns the ones that match the query.
|
||||
|
||||
:::
|
||||
</Admonition >
|
||||
|
||||
**Params**
|
||||
|
||||
|
|
@ -61,12 +71,13 @@ A retriever is a component that finds documents based on a query. It doesn't sto
|
|||
- **Memory:** Default memory store.
|
||||
- **Retriever:** The retriever used to fetch relevant documents.
|
||||
- **chain_type:** The chain type to be used. Each one of them applies a different “combination strategy”.
|
||||
- **stuff**: The stuff [documents](https://python.langchain.com/docs/modules/chains/document/stuff) chain (“stuff" as in "to stuff" or "to fill") is the most straightforward of *the* document chains. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. This chain is well-suited for applications where documents are small and only a few are passed in for most calls.
|
||||
- **map_reduce**: The map-reduce [documents](https://python.langchain.com/docs/modules/chains/document/map_reduce) chain first applies an LLM chain to each document individually (the Map step), treating the chain output as a new document. It then passes all the new documents to a separate combined documents chain to get a single output (the Reduce step). It can optionally first compress or collapse the mapped documents to make sure that they fit in the combined documents chain (which will often pass them to an LLM). This compression step is performed recursively if necessary.
|
||||
- **map_rerank**: The map re-rank [documents](https://python.langchain.com/docs/modules/chains/document/map_rerank) chain runs an initial prompt on each document that not only tries to complete a task but also gives a score for how certain it is in its answer. The highest-scoring response is returned.
|
||||
- **refine**: The refine [documents](https://python.langchain.com/docs/modules/chains/document/refine) chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
|
||||
|
||||
Since the Refine chain only passes a single document to the LLM at a time, it is well-suited for tasks that require analyzing more documents than can fit in the model's context. The obvious tradeoff is that this chain will make far more LLM calls than, for example, the Stuff documents chain. There are also certain tasks that are difficult to accomplish iteratively. For example, the Refine chain can perform poorly when documents frequently cross-reference one another or when a task requires detailed information from many documents.
|
||||
- **stuff**: The stuff [documents](https://python.langchain.com/docs/modules/chains/document/stuff) chain (“stuff" as in "to stuff" or "to fill") is the most straightforward of _the_ document chains. It takes a list of documents, inserts them all into a prompt, and passes that prompt to an LLM. This chain is well-suited for applications where documents are small and only a few are passed in for most calls.
|
||||
- **map_reduce**: The map-reduce [documents](https://python.langchain.com/docs/modules/chains/document/map_reduce) chain first applies an LLM chain to each document individually (the Map step), treating the chain output as a new document. It then passes all the new documents to a separate combined documents chain to get a single output (the Reduce step). It can optionally first compress or collapse the mapped documents to make sure that they fit in the combined documents chain (which will often pass them to an LLM). This compression step is performed recursively if necessary.
|
||||
- **map_rerank**: The map re-rank [documents](https://python.langchain.com/docs/modules/chains/document/map_rerank) chain runs an initial prompt on each document that not only tries to complete a task but also gives a score for how certain it is in its answer. The highest-scoring response is returned.
|
||||
- **refine**: The refine [documents](https://python.langchain.com/docs/modules/chains/document/refine) chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
|
||||
|
||||
Since the Refine chain only passes a single document to the LLM at a time, it is well-suited for tasks that require analyzing more documents than can fit in the model's context. The obvious tradeoff is that this chain will make far more LLM calls than, for example, the Stuff documents chain. There are also certain tasks that are difficult to accomplish iteratively. For example, the Refine chain can perform poorly when documents frequently cross-reference one another or when a task requires detailed information from many documents.
|
||||
|
||||
- **return_source_documents:** Used to specify whether or not to include the source documents that were used to answer the question in the output. When set to `True`, source documents will be included in the output along with the generated answer. This can be useful for providing additional context or references to the user — defaults to `True`.
|
||||
- **verbose:** Whether or not to run in verbose mode. In verbose mode, intermediate logs will be printed to the console — defaults to `False`.
|
||||
|
|
@ -108,17 +119,17 @@ The `LLMMathChain` works by using the language model with an `LLMChain` to under
|
|||
|
||||
`RetrievalQA` is a chain used to find relevant documents or information to answer a given query. The retriever is responsible for returning the relevant documents based on the query, and the QA component then extracts the answer from those documents. The retrieval QA system combines the capabilities of both the retriever and the QA component to provide accurate and relevant answers to user queries.
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
|
||||
A retriever is a component that finds documents based on a query. It doesn't store the documents themselves, but it returns the ones that match the query.
|
||||
|
||||
:::
|
||||
</Admonition >
|
||||
|
||||
**Params**
|
||||
|
||||
- **Combine Documents Chain:** Chain to use to combine the documents.
|
||||
- **Memory:** Default memory store.
|
||||
- **Retriever:** The retriever used to fetch relevant documents.
|
||||
- **Retriever:** The retriever used to fetch relevant documents.
|
||||
- **input_key:** This parameter is used to specify the key in the input data that contains the question. It is used to retrieve the question from the input data and pass it to the question-answering model for generating the answer — defaults to `query`.
|
||||
- **output_key:** This parameter is used to specify the key in the output data where the generated answer will be stored. It is used to retrieve the answer from the output data after the question-answering model has generated it — defaults to `result`.
|
||||
- **return_source_documents:** Used to specify whether or not to include the source documents that were used to answer the question in the output. When set to `True`, source documents will be included in the output along with the generated answer. This can be useful for providing additional context or references to the user — defaults to `True`.
|
||||
|
|
@ -134,4 +145,4 @@ The `SQLDatabaseChain` finds answers to questions using a SQL database. It works
|
|||
|
||||
- **Db:** SQL Database to connect to.
|
||||
- **LLM:** Language Model to use in the chain.
|
||||
- **Prompt:** Prompt template to translate natural language to SQL.
|
||||
- **Prompt:** Prompt template to translate natural language to SQL.
|
||||
|
|
|
|||
92
docs/docs/components/custom.mdx
Normal file
92
docs/docs/components/custom.mdx
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Custom Components
|
||||
|
||||
Used to create a custom component, a special type of Langflow component that allows users to extend the functionality of the platform by creating their own reusable and configurable components from a Python script.
|
||||
|
||||
To use a custom component, follow these steps:
|
||||
|
||||
- Create a class that inherits from _`langflow.CustomComponent`_ and contains a _`build`_ method.
|
||||
- Use arguments with [Type Annotations (or Type Hints)](https://docs.python.org/3/library/typing.html) of the _`build`_ method to create component fields.
|
||||
- If applicable, use the _`build_config`_ method to customize how these fields look and behave.
|
||||
|
||||
<Admonition type="info" label="Tip">
|
||||
|
||||
For an in-depth explanation of custom components, their rules, and applications, make sure to read [Custom Component guidelines](../guidelines/custom-component).
|
||||
|
||||
</Admonition>
|
||||
|
||||
**Params**
|
||||
|
||||
- **Code:** The Python code to define the component.
|
||||
|
||||
## The CustomComponent Class
|
||||
|
||||
The CustomComponent class serves as the foundation for creating custom components. By inheriting this class, users can create new, configurable components, tailored to their specific requirements.
|
||||
|
||||
**Methods**
|
||||
|
||||
- **build**: This method is required within a Custom Component class. It defines the component's functionality and specifies how it processes input data to produce output data. This method is called when the component is built (i.e., when you click the _Build_ ⚡ button in the canvas).
|
||||
|
||||
The type annotations of the _`build`_ instance method are used to create the fields of the component.
|
||||
|
||||
| Supported Types |
|
||||
| --------------------------------------------------------- |
|
||||
| _`str`_, _`int`_, _`float`_, _`bool`_, _`list`_, _`dict`_ |
|
||||
| _`langchain.chains.base.Chain`_ |
|
||||
| _`langchain.PromptTemplate`_ |
|
||||
| _`langchain.llms.base.BaseLLM`_ |
|
||||
| _`langchain.Tool`_ |
|
||||
| _`langchain.document_loaders.base.BaseLoader`_ |
|
||||
| _`langchain.schema.Document`_ |
|
||||
| _`langchain.text_splitters.TextSplitter`_ |
|
||||
| _`langchain.vectorstores.base.VectorStore`_ |
|
||||
| _`langchain.embeddings.base.Embeddings`_ |
|
||||
| _`langchain.schema.BaseRetriever`_ |
|
||||
|
||||
<Admonition type="info">
|
||||
Unlike Langchain types, base Python types do not add a
|
||||
[handle](../guidelines/components) to the field by default. To add handles,
|
||||
use the _`input_types`_ key in the _`build_config`_ method.
|
||||
</Admonition>
|
||||
|
||||
- **build_config**: Used to define the configuration fields of the component (if applicable). It should always return a dictionary with specific keys representing the field names and corresponding configurations. This method is called when the code is processed (i.e., when you click _Check and Save_ in the code editor). It must follow the format described below:
|
||||
|
||||
- Top-level keys are field names.
|
||||
- Their values are also of type _`dict`_. They specify the behavior of the generated fields.
|
||||
|
||||
Below are the available keys used to configure component fields:
|
||||
|
||||
| Key | Description |
|
||||
| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| _`field_type: str`_ | The type of the field (can be any of the types supported by the _`build`_ method). |
|
||||
| _`is_list: bool`_ | If the field can be a list of values, meaning that the user can manually add more inputs to the same field. |
|
||||
| _`options: List[str]`_ | When defined, the field becomes a dropdown menu where a list of strings defines the options to be displayed. If the _`value`_ attribute is set to one of the options, that option becomes default. For this parameter to work, _`field_type`_ should invariably be _`str`_. |
|
||||
| _`multiline: bool`_ | Defines if a string field opens a text editor. Useful for longer texts. |
|
||||
| _`input_types: List[str]`_ | Used when you want a _`str`_ field to have connectable handles. |
|
||||
| _`display_name: str`_ | Defines the name of the field. |
|
||||
| _`advanced: bool`_ | Hide the field in the canvas view (displayed component settings only). Useful when a field is for advanced users. |
|
||||
| _`password: bool`_ | To mask the input text. Useful to hide sensitive text (e.g. API keys). |
|
||||
| _`required: bool`_ | Makes the field required. |
|
||||
| _`info: str`_ | Adds a tooltip to the field. |
|
||||
| _`file_types: List[str]`_ | This is a requirement if the _`field_type`_ is _file_. Defines which file types will be accepted. For example, _json_, _yaml_ or _yml_. |
|
||||
|
||||
- The CustomComponent class also provides helpful methods for specific tasks (e.g., to load and use other flows from the Langflow platform):
|
||||
|
||||
| Method Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------- |
|
||||
| _`list_flows`_ | Returns a list of Flow objects with an _`id`_ and a _`name`_. |
|
||||
| _`get_flow`_ | Returns a Flow object. Parameters are _`flow_name`_ or _`flow_id`_. |
|
||||
| _`load_flow`_ | Loads a flow from a given _`id`_. |
|
||||
|
||||
- Useful attributes:
|
||||
|
||||
| Attribute Name | Description |
|
||||
| -------------- | ----------------------------------------------------------------------------- |
|
||||
| _`repr_value`_ | Displays the value it receives in the _`build`_ method. Useful for debugging. |
|
||||
|
||||
<Admonition type="info" label="Tip">
|
||||
|
||||
Check out the [FlowRunner](../examples/flow-runner) example to understand how to call a flow from a custom component.
|
||||
|
||||
</Admonition>
|
||||
|
|
@ -1,5 +1,13 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Embeddings
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
Embeddings are vector representations of text that capture the semantic meaning of the text. They are created using text embedding models and allow us to think about the text in a vector space, enabling us to perform tasks like semantic search, where we look for pieces of text that are most similar in the vector space.
|
||||
|
||||
---
|
||||
|
|
@ -65,3 +73,25 @@ Used to load [OpenAI’s](https://openai.com/) embedding models.
|
|||
- **request_timeout:** Used to specify the maximum amount of time, in milliseconds, to wait for a response from the OpenAI API when generating embeddings for a given text.
|
||||
|
||||
- **tiktoken_model_name:** Used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name.
|
||||
|
||||
---
|
||||
|
||||
### VertexAIEmbeddings
|
||||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings).
|
||||
|
||||
:::info
|
||||
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
|
||||
:::
|
||||
|
||||
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
|
||||
- **location:** The default location to use when making API calls – defaults to `us-central1`.
|
||||
- **max_output_tokens:** Token limit determines the maximum amount of text output from one prompt – defaults to `128`.
|
||||
- **model_name:** The name of the Vertex AI large language model – defaults to `text-bison`.
|
||||
- **project:** The default GCP project to use when making Vertex API calls.
|
||||
- **request_parallelism:** The amount of parallelism allowed for requests issued to VertexAI models – defaults to `5`.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0`.
|
||||
- **top_k:** How the model selects tokens for output, the next token is selected from – defaults to `40`.
|
||||
- **top_p:** Tokens are selected from most probable to least until the sum of their – defaults to `0.95`.
|
||||
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
|
|
@ -1,2 +1,220 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# LLMs
|
||||
(coming soon)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
An LLM stands for Large Language Model. It is a core component of Langflow and provides a standard interface for interacting with different LLMs from various providers such as OpenAI, Cohere, and HuggingFace. LLMs are used widely throughout Langflow, including in chains and agents. They can be used to generate text based on a given prompt (or input).
|
||||
|
||||
---
|
||||
|
||||
### Anthropic
|
||||
|
||||
Wrapper around Anthropic's large language models. Find out more at [Anthropic](https://www.anthropic.com).
|
||||
|
||||
- **anthropic_api_key:** Used to authenticate and authorize access to the Anthropic API.
|
||||
|
||||
- **anthropic_api_url:** Specifies the URL of the Anthropic API to connect to.
|
||||
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value.
|
||||
|
||||
---
|
||||
|
||||
### ChatAnthropic
|
||||
|
||||
Wrapper around Anthropic's large language model used for chat-based interactions. Find out more at [Anthropic](https://www.anthropic.com).
|
||||
|
||||
- **anthropic_api_key:** Used to authenticate and authorize access to the Anthropic API.
|
||||
|
||||
- **anthropic_api_url:** Specifies the URL of the Anthropic API to connect to.
|
||||
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value.
|
||||
|
||||
---
|
||||
|
||||
### CTransformers
|
||||
|
||||
The `CTransformers` component provides access to the Transformer models implemented in C/C++ using the [GGML](https://github.com/ggerganov/ggml) library.
|
||||
|
||||
:::info
|
||||
Make sure to have the `ctransformers` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/marella/ctransformers).
|
||||
:::
|
||||
|
||||
**config:** Configuration for the Transformer models. Check out [config](https://github.com/marella/ctransformers#config). Defaults to:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
"top_k": 40,
|
||||
|
||||
"top_p": 0.95,
|
||||
|
||||
"temperature": 0.8,
|
||||
|
||||
"repetition_penalty": 1.1,
|
||||
|
||||
"last_n_tokens": 64,
|
||||
|
||||
"seed": -1,
|
||||
|
||||
"max_new_tokens": 256,
|
||||
|
||||
"stop": null,
|
||||
|
||||
"stream": false,
|
||||
|
||||
"reset": true,
|
||||
|
||||
"batch_size": 8,
|
||||
|
||||
"threads": -1,
|
||||
|
||||
"context_length": -1,
|
||||
|
||||
"gpu_layers": 0
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
**model:** The path to a model file or directory or the name of a Hugging Face Hub model repo.
|
||||
|
||||
**model_file:** The name of the model file in the repo or directory.
|
||||
|
||||
**model_type:** Transformer model to be used. Learn more [here](https://github.com/marella/ctransformers).
|
||||
|
||||
---
|
||||
|
||||
### ChatOpenAI
|
||||
|
||||
Wrapper around [OpenAI's](https://openai.com) chat large language models. This component supports some of the LLMs (Large Language Models) available by OpenAI and is used for tasks such as chatbots, Generative Question-Answering (GQA), and summarization.
|
||||
|
||||
- **max_tokens:** The maximum number of tokens to generate in the completion. `-1` returns as many tokens as possible, given the prompt and the model's maximal context size – defaults to `256`.
|
||||
- **model_kwargs:** Holds any model parameters valid for creating non-specified calls.
|
||||
- **model_name:** Defines the OpenAI chat model to be used.
|
||||
- **openai_api_base:** Used to specify the base URL for the OpenAI API. It is typically set to the API endpoint provided by the OpenAI service.
|
||||
- **openai_api_key:** Key used to authenticate and access the OpenAI API.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0.7`.
|
||||
|
||||
---
|
||||
|
||||
### Cohere
|
||||
|
||||
Wrapper around [Cohere's](https://cohere.com) large language models.
|
||||
|
||||
- **cohere_api_key:** Holds the API key required to authenticate with the Cohere service.
|
||||
- **max_tokens:** Maximum number of tokens to predict per generation – defaults to `256`.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0.75`.
|
||||
|
||||
---
|
||||
|
||||
### HuggingFaceHub
|
||||
|
||||
Wrapper around [HuggingFace](https://www.huggingface.co/models) models.
|
||||
|
||||
:::info
|
||||
The HuggingFace Hub is an online platform that hosts over 120k models, 20k datasets, and 50k demo apps, all of which are open-source and publicly available. Discover more at [HuggingFace](http://www.huggingface.co).
|
||||
:::
|
||||
|
||||
- **huggingfacehub_api_token:** Token needed to authenticate the API.
|
||||
- **model_kwargs:** Keyword arguments to pass to the model.
|
||||
- **repo_id:** Model name to use – defaults to `gpt2`.
|
||||
- **task:** Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.
|
||||
|
||||
---
|
||||
|
||||
### LlamaCpp
|
||||
|
||||
The `LlamaCpp` component provides access to the `llama.cpp` models.
|
||||
|
||||
:::info
|
||||
Make sure to have the `llama.cpp` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/ggerganov/llama.cpp).
|
||||
:::
|
||||
|
||||
- **echo:** Whether to echo the prompt – defaults to `False`.
|
||||
- **f16_kv:** Use half-precision for key/value cache – defaults to `True`.
|
||||
- **last_n_tokens_size:** The number of tokens to look back at when applying the repeat_penalty. Defaults to `64`.
|
||||
- **logits_all:** Return logits for all tokens, not just the last token Defaults to `False`.
|
||||
- **logprobs:** The number of logprobs to return. If None, no logprobs are returned.
|
||||
- **lora_base:** The path to the Llama LoRA base model.
|
||||
- **lora_path:** The path to the Llama LoRA. If None, no LoRa is loaded.
|
||||
- **max_tokens:** The maximum number of tokens to generate. Defaults to `256`.
|
||||
- **model_path:** The path to the Llama model file.
|
||||
- **n_batch:** Number of tokens to process in parallel. Should be a number between 1 and n_ctx. Defaults to `8`.
|
||||
- **n_ctx:** Token context window. Defaults to `512`.
|
||||
- **n_gpu_layers:** Number of layers to be loaded into GPU memory. Default None.
|
||||
- **n_parts:**Number of parts to split the model into. If -1, the number of parts is automatically determined. Defaults to `-1`.
|
||||
- **n_threads:** Number of threads to use. If None, the number of threads is automatically determined.
|
||||
- **repeat_penalty:** The penalty to apply to repeated tokens. Defaults to `1.1`.
|
||||
- **seed:** Seed. If -1, a random seed is used. Defaults to `-1`.
|
||||
- **stop:** A list of strings to stop generation when encountered.
|
||||
- **streaming:** Whether to stream the results, token by token. Defaults to `True`.
|
||||
- **suffix:** A suffix to append to the generated text. If None, no suffix is appended.
|
||||
- **tags:** Tags to add to the run trace.
|
||||
- **temperature:** The temperature to use for sampling. Defaults to `0.8`.
|
||||
- **top_k:** The top-k value to use for sampling. Defaults to `40`.
|
||||
- **top_p:** The top-p value to use for sampling. Defaults to `0.95`.
|
||||
- **use_mlock:** Force the system to keep the model in RAM. Defaults to `False`.
|
||||
- **use_mmap:** Whether to keep the model loaded in RAM. Defaults to `True`.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output. Defaults to `False`.
|
||||
- **vocab_only:** Only load the vocabulary, no weights. Defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### OpenAI
|
||||
|
||||
Wrapper around [OpenAI's](https://openai.com) large language models.
|
||||
|
||||
- **max_tokens:** The maximum number of tokens to generate in the completion. `-1` returns as many tokens as possible, given the prompt and the model's maximal context size – defaults to `256`.
|
||||
- **model_kwargs:** Holds any model parameters valid for creating non-specified calls.
|
||||
- **model_name:** Defines the OpenAI model to be used.
|
||||
- **openai_api_base:** Used to specify the base URL for the OpenAI API. It is typically set to the API endpoint provided by the OpenAI service.
|
||||
- **openai_api_key:** Key used to authenticate and access the OpenAI API.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0.7`.
|
||||
|
||||
---
|
||||
|
||||
### VertexAI
|
||||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
|
||||
|
||||
:::info
|
||||
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
|
||||
:::
|
||||
|
||||
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
|
||||
- **location:** The default location to use when making API calls – defaults to `us-central1`.
|
||||
- **max_output_tokens:** Token limit determines the maximum amount of text output from one prompt – defaults to `128`.
|
||||
- **model_name:** The name of the Vertex AI large language model – defaults to `text-bison`.
|
||||
- **project:** The default GCP project to use when making Vertex API calls.
|
||||
- **request_parallelism:** The amount of parallelism allowed for requests issued to VertexAI models – defaults to `5`.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0`.
|
||||
- **top_k:** How the model selects tokens for output, the next token is selected from – defaults to `40`.
|
||||
- **top_p:** Tokens are selected from most probable to least until the sum of their – defaults to `0.95`.
|
||||
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### ChatVertexAI
|
||||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
|
||||
|
||||
:::info
|
||||
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
|
||||
:::
|
||||
|
||||
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
|
||||
- **location:** The default location to use when making API calls – defaults to `us-central1`.
|
||||
- **max_output_tokens:** Token limit determines the maximum amount of text output from one prompt – defaults to `128`.
|
||||
- **model_name:** The name of the Vertex AI large language model – defaults to `text-bison`.
|
||||
- **project:** The default GCP project to use when making Vertex API calls.
|
||||
- **request_parallelism:** The amount of parallelism allowed for requests issued to VertexAI models – defaults to `5`.
|
||||
- **temperature:** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0`.
|
||||
- **top_k:** How the model selects tokens for output, the next token is selected from – defaults to `40`.
|
||||
- **top_p:** Tokens are selected from most probable to least until the sum of their – defaults to `0.95`.
|
||||
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
|
|
@ -1,2 +1,10 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Loaders
|
||||
(coming soon)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
|
|
|
|||
|
|
@ -1,2 +1,108 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Memories
|
||||
(coming soon)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
Memory is a concept in chat-based applications that allows the system to remember previous interactions. It helps in maintaining the context of the conversation and enables the system to understand new messages in relation to past messages.
|
||||
|
||||
---
|
||||
|
||||
### ConversationBufferMemory
|
||||
|
||||
The `ConversationBufferMemory` component is a type of memory system that plainly stores the last few inputs and outputs of a conversation.
|
||||
|
||||
**Params**
|
||||
|
||||
- **input_key:** Used to specify the key under which the user input will be stored in the conversation memory. It allows you to provide the user's input to the chain for processing and generating a response.
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model – defaults to `chat_history`.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
|
||||
---
|
||||
|
||||
### ConversationBufferWindowMemory
|
||||
|
||||
`ConversationBufferWindowMemory` is a variation of the `ConversationBufferMemory` that maintains a list of the recent interactions in a conversation. It only keeps the last K interactions in memory, which can be useful for maintaining a sliding window of the most recent interactions without letting the buffer get too large.
|
||||
|
||||
**Params**
|
||||
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model. Defaults to `chat_history`.
|
||||
- **k:** Used to specify the number of interactions or messages that should be stored in the conversation buffer. It determines the size of the sliding window that keeps track of the most recent interactions.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
|
||||
---
|
||||
|
||||
### ConversationEntityMemory
|
||||
|
||||
The `ConversationEntityMemory` component incorporates intricate memory structures, specifically a key-value store, for entities referenced in a conversation. This facilitates the storage and retrieval of information related to entities that have been mentioned throughout the conversation.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Entity Store:** Structure that stores information about specific entities mentioned in a conversation.
|
||||
- **LLM:** Language Model to use in the `ConversationEntityMemory`.
|
||||
- **chat_history_key:** Specify a unique identifier for the chat history data associated with a particular entity. This allows for organizing and accessing the chat history data for each entity within the conversation entity memory. Defaults to `history`
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **k:** Refers to the number of entities that can be stored in the memory. It determines the maximum number of entities that can be stored and retrieved from the memory object. Defaults to `10`
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
|
||||
---
|
||||
|
||||
### ConversationKGMemory
|
||||
|
||||
`ConversationKGMemory` is a type of memory that uses a knowledge graph to recreate memory. It allows the extraction of entities and knowledge triplets from a new message, using previous messages as context.
|
||||
|
||||
**Params**
|
||||
|
||||
- **LLM:** Language Model to use in the `ConversationKGMemory`.
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **k:** Represents the number of previous conversation turns that will be stored in the memory. By setting "k" to 2, it means that the memory will retain the previous 2 conversation turns, allowing the model to access and utilize the information from those turns during the conversation. Defaults to `10`
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model. Defaults to `chat_history`.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
|
||||
---
|
||||
|
||||
### ConversationSummaryMemory
|
||||
|
||||
The `ConversationSummaryMemory` is a memory component that creates a summary of the conversation over time. It condenses information from the conversation and stores the current summary in memory. It is particularly useful for longer conversations where keeping the entire message history in the prompt would take up too many tokens.
|
||||
|
||||
**Params**
|
||||
|
||||
- **LLM:** Language Model to use in the `ConversationSummaryMemory`.
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model. Defaults to `chat_history`.
|
||||
- **output_key:** Used to specify the key under which the generated response will be stored in the conversation memory. It allows you to retrieve the response using the specified key.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string. The default is `False`.
|
||||
|
||||
---
|
||||
|
||||
### PostgresChatMessageHistory
|
||||
|
||||
The `PostgresChatMessageHistory` is a memory component that allows for the storage and retrieval of chat message history using a PostgreSQL database. The connection to the PostgreSQL database is established using a connection string, which includes the necessary authentication and database information.
|
||||
|
||||
**Params**
|
||||
|
||||
- **connection_string:** Refers to a string that contains the necessary information to establish a connection to a PostgreSQL database. The `connection_string` typically includes details such as the username, password, host, port, and database name required to connect to the PostgreSQL database. Defaults to `postgresql://postgres:mypassword@localhost/chat_history`
|
||||
- **session_id:** It is a unique identifier that is used to associate chat message history with a specific session or conversation.
|
||||
- **table_name:** Refers to the name of the table in the PostgreSQL database where the chat message history will be stored. Defaults to `message_store`
|
||||
|
||||
---
|
||||
|
||||
### VectorRetrieverMemory
|
||||
|
||||
The `VectorRetrieverMemory` is a memory component that allows for the retrieval of vectors based on a given query. It is used to perform vector-based searches and retrievals.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Retriever:** The retriever used to fetch documents.
|
||||
- **input_key:** Used to specify the keys in the memory object where the input messages should be stored. It allows for the retrieval and manipulation of input messages.
|
||||
- **memory_key:** Specifies the prompt variable name where the memory will store and retrieve the chat messages. It allows for the preservation of the conversation history throughout the interaction with the language model – defaults to `chat_history`.
|
||||
- **return_messages:** Determines whether the history should be returned as a string or as a list of messages. If `return_messages` is set to True, the history will be returned as a list of messages. If `return_messages` is set to False or not specified, the history will be returned as a string – defaults to `False`.
|
||||
|
|
@ -1,5 +1,15 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Prompts
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may
|
||||
contain some rough edges. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
A prompt refers to the input given to a language model. It is constructed from multiple components and can be parametrized using prompt templates. A prompt template is a reproducible way to generate prompts and allow for easy customization through input variables.
|
||||
|
||||
---
|
||||
|
|
@ -8,8 +18,10 @@ A prompt refers to the input given to a language model. It is constructed from m
|
|||
|
||||
The `PromptTemplate` component allows users to create prompts and define variables that provide control over instructing the model. The template can take in a set of variables from the end user and generates the prompt once the conversation is initiated.
|
||||
|
||||
:::info
|
||||
Once a variable is defined in the prompt template, it becomes a component input of its own. Check out [Prompt Customization](../guidelines/prompt-customization.mdx) to learn more.
|
||||
:::
|
||||
<Admonition type="info">
|
||||
Once a variable is defined in the prompt template, it becomes a component
|
||||
input of its own. Check out [Prompt
|
||||
Customization](../guidelines/prompt-customization.mdx) to learn more.
|
||||
</Admonition>
|
||||
|
||||
- **template:** Template used to format an individual request.
|
||||
- **template:** Template used to format an individual request.
|
||||
|
|
|
|||
24
docs/docs/components/retrievers.mdx
Normal file
24
docs/docs/components/retrievers.mdx
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Retrievers
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
A retriever is an interface that returns documents given an unstructured query. It is more general than a vector store and does not need to be able to store documents, only to return or retrieve them.
|
||||
|
||||
---
|
||||
|
||||
### MultiQueryRetriever
|
||||
|
||||
The `MultiQueryRetriever` component automates the process of generating multiple queries, retrieves relevant documents for each query, and combines the results to provide a more extensive and diverse set of potentially relevant documents. This approach enhances the effectiveness of the retrieval process and helps overcome the limitations of traditional distance-based retrieval methods.
|
||||
|
||||
**Params**
|
||||
|
||||
- **LLM:** Language Model to use in the `MultiQueryRetriever`.
|
||||
- **Prompt:** Prompt to represent a schema for an LLM.
|
||||
- **Retriever:** The retriever used to fetch documents.
|
||||
- **parser_key:** This parameter is used to specify the key or attribute name of the parsed output that will be used for retrieval. It determines how the results from the language model are split into a list of queries. Defaults to `lines`, which means that the output from the language model will be split into a list of lines of text. This allows the retriever to retrieve relevant documents based on each line of text separately.
|
||||
|
|
@ -1,2 +1,49 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Text Splitters
|
||||
(coming soon)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
A text splitter is a tool that divides a document or text into smaller chunks or segments. It is used to break down large texts into more manageable pieces for analysis or processing.
|
||||
|
||||
---
|
||||
|
||||
### CharacterTextSplitter
|
||||
|
||||
The `CharacterTextSplitter` is used to split a long text into smaller chunks based on a specified character. It splits the text by trying to keep paragraphs, sentences, and words together as long as possible, as these are semantically related pieces of text.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Documents:** Input documents to split.
|
||||
|
||||
- **chunk_overlap:** Determines the number of characters that overlap between consecutive chunks when splitting text. It specifies how much of the previous chunk should be included in the next chunk.
|
||||
|
||||
For example, if the `chunk_overlap` is set to 20 and the `chunk_size` is set to 100, the splitter will create chunks of 100 characters each, but the last 20 characters of each chunk will overlap with the first 20 characters of the next chunk. This allows for a smoother transition between chunks and ensures that no information is lost – defaults to `200`.
|
||||
|
||||
- **chunk_size:** Determines the maximum number of characters in each chunk when splitting a text. It specifies the size or length of each chunk.
|
||||
|
||||
For example, if the chunk_size is set to 100, the splitter will create chunks of 100 characters each. If the text is longer than 100 characters, it will be divided into multiple chunks of equal size, except for the last chunk, which may be smaller if there are remaining characters –defaults to `1000`.
|
||||
|
||||
- **separator:** Specifies the character that will be used to split the text into chunks – defaults to `.`
|
||||
|
||||
---
|
||||
|
||||
### RecursiveCharacterTextSplitter
|
||||
|
||||
The `RecursiveCharacterTextSplitter` splits the text by trying to keep paragraphs, sentences, and words together as long as possible, similar to the `CharacterTextSplitter`. However, it also recursively splits the text into smaller chunks if the chunk size exceeds a specified threshold.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Documents:** Input documents to split.
|
||||
|
||||
- **chunk_overlap:** Determines the number of characters that overlap between consecutive chunks when splitting text. It specifies how much of the previous chunk should be included in the next chunk.
|
||||
|
||||
- **chunk_size:** Determines the maximum number of characters in each chunk when splitting a text. It specifies the size or length of each chunk.
|
||||
|
||||
- **separator_type:** The parameter allows the user to split the code with multiple language support. It supports various languages such as Text, Ruby, Python, Solidity, Java, and more. Defaults to `Text`.
|
||||
|
||||
- **separators:** The `separators` in RecursiveCharacterTextSplitter are the characters used to split the text into chunks. The text splitter tries to create chunks based on splitting on the first character in the list of `separators`. If any chunks are too large, it moves on to the next character in the list and continues splitting. Defaults to `.`
|
||||
|
|
@ -1,2 +1,9 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Toolkits
|
||||
(coming soon)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
|
@ -1,2 +1,9 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Tools
|
||||
(coming soon)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
|
@ -1,2 +1,10 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Utilities
|
||||
(coming soon)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
|
|
|
|||
|
|
@ -1,2 +1,9 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Vector Stores
|
||||
(coming soon)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
|
@ -1,2 +1,20 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
|
||||
# Wrappers
|
||||
(coming soon)
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
|
||||
### TextRequestsWrapper
|
||||
|
||||
This component is designed to work with the Python Requests module, which is a popular tool for making web requests. Used to fetch data from a particular website.
|
||||
|
||||
**Params**
|
||||
|
||||
- **header:** specifies the headers to be included in the HTTP request. Defaults to `{'Authorization': 'Bearer <token>'}`.
|
||||
|
||||
Headers are key-value pairs that provide additional information about the request or the client making the request. They can be used to send authentication credentials, specify the content type of the request, set cookies, and more. They allow the client and the server to communicate additional information beyond the basic request.
|
||||
|
|
@ -36,10 +36,9 @@ Before you start, make sure you have the following installed:
|
|||
- Poetry (>=1.4)
|
||||
- Node.js
|
||||
|
||||
Then install the dependencies and start the development server for the backend:
|
||||
Then, in the root folder, install the dependencies and start the development server for the backend:
|
||||
|
||||
```bash
|
||||
make install_backend
|
||||
make backend
|
||||
```
|
||||
|
||||
|
|
@ -49,6 +48,7 @@ And the frontend:
|
|||
make frontend
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Docker compose
|
||||
|
|
@ -59,4 +59,19 @@ The following snippet will run the backend and frontend in separate containers.
|
|||
docker compose up --build
|
||||
# or
|
||||
make dev build=1
|
||||
```
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is built using [Docusaurus](https://docusaurus.io/). To run the documentation locally, run the following commands:
|
||||
|
||||
```bash
|
||||
cd docs
|
||||
npm install
|
||||
npm run start
|
||||
```
|
||||
|
||||
The documentation will be available at `localhost:3000` and all the files are located in the `docs/docs` folder.
|
||||
Once you are done with your changes, you can create a Pull Request to the `main` branch.
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Buffer Memory
|
||||
|
||||
For certain applications, retaining past interactions is crucial. For that, chains and agents may accept a memory component as one of their input parameters. The `ConversationBufferMemory` component is one of them. It stores messages and extracts them into variables.
|
||||
|
|
@ -17,9 +19,10 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
#### <a target="\_blank" href="json_files/Buffer_Memory.json" download>Download Flow</a>
|
||||
|
||||
:::note LangChain Components 🦜🔗
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`ConversationBufferMemory`](https://python.langchain.com/docs/modules/memory/how_to/buffer)
|
||||
- [`ConversationChain`](https://python.langchain.com/docs/modules/chains/)
|
||||
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)
|
||||
:::
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -1,10 +1,14 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Conversation Chain
|
||||
|
||||
This example shows how to instantiate a simple `ConversationChain` component using a Language Model (LLM). Once the Node Status turns green 🟢, the chat will be ready to take in user messages. Here, we used `ChatOpenAI` to act as the required LLM input, but you can use any LLM for this purpose.
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
|
||||
Make sure to always get the API key from the provider.
|
||||
:::
|
||||
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
||||
|
|
@ -21,8 +25,9 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
#### <a target="\_blank" href="json_files/Basic_Chat.json" download>Download Flow</a>
|
||||
|
||||
:::note LangChain Components 🦜🔗
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`ConversationChain`](https://python.langchain.com/docs/modules/chains/)
|
||||
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)
|
||||
:::
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# CSV Loader
|
||||
|
||||
The `VectoStoreAgent` component retrieves information from one or more vector stores. This example shows a `VectoStoreAgent` connected to a CSV file through the `Chroma` vector store. Process description:
|
||||
|
|
@ -7,13 +9,18 @@ The `VectoStoreAgent` component retrieves information from one or more vector st
|
|||
- These chunks feed the `Chroma` vector store, which converts them into vectors and stores them for fast indexing.
|
||||
- Finally, the agent accesses the information of the vector store through the `VectorStoreInfo` tool.
|
||||
|
||||
:::info
|
||||
The vector store is used for efficient semantic search, while `VectorStoreInfo` carries information about it, such as its name and description. Embeddings are a way to represent words, phrases, or any entities in a vector space. Learn more about them [here](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings).
|
||||
:::
|
||||
<Admonition type="info">
|
||||
The vector store is used for efficient semantic search, while
|
||||
`VectorStoreInfo` carries information about it, such as its name and
|
||||
description. Embeddings are a way to represent words, phrases, or any entities
|
||||
in a vector space. Learn more about them
|
||||
[here](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings).
|
||||
</Admonition>
|
||||
|
||||
:::tip
|
||||
Once you build this flow, ask questions about the data in the chat interface (e.g., number of rows or columns).
|
||||
:::
|
||||
<Admonition type="tip">
|
||||
Once you build this flow, ask questions about the data in the chat interface
|
||||
(e.g., number of rows or columns).
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
||||
|
|
@ -30,7 +37,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
#### <a target="\_blank" href="json_files/CSV_Loader.json" download>Download Flow</a>
|
||||
|
||||
:::note LangChain Components 🦜🔗
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`CSVLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/csv)
|
||||
- [`CharacterTextSplitter`](https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter)
|
||||
|
|
@ -39,4 +46,5 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
- [`VectorStoreInfo`](https://python.langchain.com/docs/modules/data_connection/vectorstores/)
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`VectorStoreAgent`](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore)
|
||||
:::
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
365
docs/docs/examples/flow-runner.mdx
Normal file
365
docs/docs/examples/flow-runner.mdx
Normal file
|
|
@ -0,0 +1,365 @@
|
|||
---
|
||||
description: Custom Components
|
||||
hide_table_of_contents: true
|
||||
---
|
||||
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# FlowRunner Component
|
||||
|
||||
The CustomComponent class allows us to create components that interact with Langflow itself. In this example, we will make a component that runs other flows available in "My Collection".
|
||||
|
||||
<ZoomableImage
|
||||
alt="Document Processor Component"
|
||||
sources={{
|
||||
light: "img/flow_runner.png",
|
||||
}}
|
||||
style={{
|
||||
width: "30%",
|
||||
margin: "0 auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
|
||||
We will cover how to:
|
||||
|
||||
- List Collection flows using the _`list_flows`_ method.
|
||||
- Load a flow using the _`load_flow`_ method.
|
||||
- Configure a dropdown input field using the _`options`_ parameter.
|
||||
|
||||
<details open>
|
||||
|
||||
<summary>Example Code</summary>
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class FlowRunner(CustomComponent):
|
||||
display_name = "Flow Runner"
|
||||
description = "Run other flows using a document as input."
|
||||
|
||||
def build_config(self):
|
||||
flows = self.list_flows()
|
||||
flow_names = [f.name for f in flows]
|
||||
return {"flow_name": {"options": flow_names,
|
||||
"display_name": "Flow Name",
|
||||
},
|
||||
"document": {"display_name": "Document"}
|
||||
}
|
||||
|
||||
|
||||
def build(self, flow_name: str, document: Document) -> Document:
|
||||
# List the flows
|
||||
flows = self.list_flows()
|
||||
# Get the flow that matches the selected name
|
||||
# You can also get the flow by id
|
||||
# using self.get_flow(flow_id=flow_id)
|
||||
tweaks = {}
|
||||
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
|
||||
# Get the page_content from the document
|
||||
if document and isinstance(document, list):
|
||||
document = document[0]
|
||||
page_content = document.page_content
|
||||
# Use it in the flow
|
||||
result = flow(page_content)
|
||||
return Document(page_content=str(result))
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<CH.Scrollycoding rows={20} className={""}>
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
display_name = "Custom Component"
|
||||
description = "This is a custom component"
|
||||
|
||||
def build_config(self):
|
||||
...
|
||||
|
||||
def build(self):
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
The typical structure of a Custom Component is composed of _`display_name`_ and _`description`_ attributes, _`build`_ and _`build_config`_ methods.
|
||||
|
||||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
# focus
|
||||
class FlowRunner(CustomComponent):
|
||||
# focus
|
||||
display_name = "Flow Runner"
|
||||
# focus
|
||||
description = "Run other flows"
|
||||
|
||||
def build_config(self):
|
||||
...
|
||||
|
||||
def build(self):
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
Let's start by defining our component's _`display_name`_ and _`description`_.
|
||||
|
||||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
# focus
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
class FlowRunner(CustomComponent):
|
||||
display_name = "Flow Runner"
|
||||
description = "Run other flows using a document as input."
|
||||
|
||||
def build_config(self):
|
||||
...
|
||||
|
||||
def build(self):
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
Second, we will import _`Document`_ from the [_langchain.schema_](https://docs.langchain.com/docs/components/schema/) module. This will be the return type of the _`build`_ method.
|
||||
|
||||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
# focus
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
class FlowRunner(CustomComponent):
|
||||
display_name = "Flow Runner"
|
||||
description = "Run other flows using a document as input."
|
||||
|
||||
def build_config(self):
|
||||
...
|
||||
|
||||
# focus
|
||||
def build(self, flow_name: str, document: Document) -> Document:
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
Now, let's add the [parameters](focus://11[20:55]) and the [return type](focus://11[60:69]) to the _`build`_ method. The parameters added are:
|
||||
|
||||
- _`flow_name`_ is the name of the flow we want to run.
|
||||
- _`document`_ is the input document to be passed to that flow.
|
||||
- Since _`Document`_ is a Langchain type, it will add an input [handle](../guidelines/components) to the component ([see more](../components/custom)).
|
||||
|
||||
---
|
||||
|
||||
```python focus=13:14
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
class FlowRunner(CustomComponent):
|
||||
display_name = "Flow Runner"
|
||||
description = "Run other flows using a document as input."
|
||||
|
||||
def build_config(self):
|
||||
...
|
||||
|
||||
def build(self, flow_name: str, document: Document) -> Document:
|
||||
# List the flows
|
||||
flows = self.list_flows()
|
||||
|
||||
```
|
||||
|
||||
We can now start writing the _`build`_ method. Let's list available flows in "My Collection" using the _`list_flows`_ method.
|
||||
|
||||
---
|
||||
|
||||
```python focus=15:18
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
class FlowRunner(CustomComponent):
|
||||
display_name = "Flow Runner"
|
||||
description = "Run other flows using a document as input."
|
||||
|
||||
def build_config(self):
|
||||
...
|
||||
|
||||
def build(self, flow_name: str, document: Document) -> Document:
|
||||
# List the flows
|
||||
flows = self.list_flows()
|
||||
# Get the flow that matches the selected name
|
||||
# You can also get the flow by id
|
||||
# using self.get_flow(flow_id=flow_id)
|
||||
tweaks = {}
|
||||
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
|
||||
|
||||
```
|
||||
|
||||
And retrieve a flow that matches the selected name (we'll make a dropdown input field for the user to choose among flow names).
|
||||
|
||||
<Admonition type="caution">
|
||||
From version 0.4.0, names are unique, which was not the case in previous
|
||||
versions. This might lead to unexpected results if using flows with the same
|
||||
name.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
class FlowRunner(CustomComponent):
|
||||
display_name = "Flow Runner"
|
||||
description = "Run other flows using a document as input."
|
||||
|
||||
def build_config(self):
|
||||
...
|
||||
|
||||
def build(self, flow_name: str, document: Document) -> Document:
|
||||
# List the flows
|
||||
flows = self.list_flows()
|
||||
# Get the flow that matches the selected name
|
||||
# You can also get the flow by id
|
||||
# using self.get_flow(flow_id=flow_id)
|
||||
tweaks = {}
|
||||
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
|
||||
|
||||
|
||||
```
|
||||
|
||||
You can load this flow using _`get_flow`_ and set a _`tweaks`_ dictionary to customize it. Find more about tweaks in our [features guidelines](../guidelines/features#code).
|
||||
|
||||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
class FlowRunner(CustomComponent):
|
||||
display_name = "Flow Runner"
|
||||
description = "Run other flows using a document as input."
|
||||
|
||||
def build_config(self):
|
||||
...
|
||||
|
||||
def build(self, flow_name: str, document: Document) -> Document:
|
||||
# List the flows
|
||||
flows = self.list_flows()
|
||||
# Get the flow that matches the selected name
|
||||
# You can also get the flow by id
|
||||
# using self.get_flow(flow_id=flow_id)
|
||||
tweaks = {}
|
||||
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
|
||||
# Get the page_content from the document
|
||||
if document and isinstance(document, list):
|
||||
document = document[0]
|
||||
page_content = document.page_content
|
||||
# Use it in the flow
|
||||
result = flow(page_content)
|
||||
return Document(page_content=str(result))
|
||||
```
|
||||
|
||||
We are using a _`Document`_ as input because it is a straightforward way to pass text data in Langflow (specifically because you can connect it to many [loaders](../components/loaders)).
|
||||
Generally, a flow will take a string or a dictionary as input because that's what LangChain components expect.
|
||||
In case you are passing a dictionary, you need to build it according to the needs of the flow you are using.
|
||||
|
||||
The content of a document can be extracted using the _`page_content`_ attribute, which is a string, and passed as an argument to the selected flow.
|
||||
|
||||
---
|
||||
|
||||
```python focus=9:16
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
class FlowRunner(CustomComponent):
|
||||
display_name = "Flow Runner"
|
||||
description = "Run other flows using a document as input."
|
||||
|
||||
def build_config(self):
|
||||
flows = self.list_flows()
|
||||
flow_names = [f.name for f in flows]
|
||||
return {"flow_name": {"options": flow_names,
|
||||
"display_name": "Flow Name",
|
||||
},
|
||||
"document": {"display_name": "Document"}
|
||||
}
|
||||
|
||||
def build(self, flow_name: str, document: Document) -> Document:
|
||||
# List the flows
|
||||
flows = self.list_flows()
|
||||
# Get the flow that matches the selected name
|
||||
# You can also get the flow by id
|
||||
# using self.get_flow(flow_id=flow_id)
|
||||
tweaks = {}
|
||||
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
|
||||
# Get the page_content from the document
|
||||
if document and isinstance(document, list):
|
||||
document = document[0]
|
||||
page_content = document.page_content
|
||||
# Use it in the flow
|
||||
result = flow(page_content)
|
||||
return Document(page_content=str(result))
|
||||
```
|
||||
|
||||
Finally, we can add field customizations through the _`build_config`_ method. Here we added the _`options`_ key to make the _`flow_name`_ field a dropdown menu. Check out the [custom component reference](../components/custom) for a list of available keys.
|
||||
|
||||
<Admonition type="caution">
|
||||
Make sure that the field type is _`str`_ and _`options`_ values are strings.
|
||||
</Admonition>
|
||||
|
||||
</CH.Scrollycoding>
|
||||
|
||||
Done! This is what our script and custom component looks like:
|
||||
|
||||
<div style={{
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}>
|
||||
|
||||
<ZoomableImage
|
||||
alt="Document Processor Code"
|
||||
sources={{
|
||||
light: "img/flow_runner_code.png",
|
||||
}}
|
||||
style={{
|
||||
maxWidth: "100%",
|
||||
margin: "0 auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
|
||||
/>
|
||||
|
||||
<ZoomableImage
|
||||
alt="Document Processor Component"
|
||||
sources={{
|
||||
light: "img/flow_runner.png",
|
||||
}}
|
||||
style={{
|
||||
width: "40%",
|
||||
margin: "0 auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
|
||||
</div>
|
||||
|
|
@ -7,16 +7,14 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
We welcome all examples that can help our community learn and explore Langflow's capabilities.
|
||||
Langflow Examples is a repository on [GitHub](https://github.com/logspace-ai/langflow_examples) that contains examples of flows that people can use for inspiration and learning.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/community-examples.png",
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/community-examples.png",
|
||||
}}
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
||||
To upload examples, please follow these steps:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# MidJourney Prompt Chain
|
||||
|
||||
The `MidJourneyPromptChain` can be used to generate imaginative and detailed MidJourney prompts.
|
||||
|
|
@ -14,9 +16,11 @@ And get a response such as:
|
|||
Imagine a mysterious forest, the trees are tall and ancient, their branches reaching up to the sky. Through the darkness, a dragon emerges from the shadows, its scales shimmering in the moonlight. Its wingspan is immense, and its eyes glow with a fierce intensity. It is a majestic and powerful creature, one that commands both respect and fear.
|
||||
```
|
||||
|
||||
:::tip
|
||||
Notice that the `ConversationSummaryMemory` stores a summary of the conversation over time. Try using it to create better prompts as the conversation goes on.
|
||||
:::
|
||||
<Admonition type="tip">
|
||||
Notice that the `ConversationSummaryMemory` stores a summary of the
|
||||
conversation over time. Try using it to create better prompts as the
|
||||
conversation goes on.
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
||||
|
|
@ -33,8 +37,9 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
#### <a target="\_blank" href="json_files/MidJourney_Prompt_Chain.json" download>Download Flow</a>
|
||||
|
||||
:::note LangChain Components 🦜🔗
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`ConversationSummaryMemory`](https://python.langchain.com/docs/modules/memory/how_to/summary)
|
||||
:::
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -1,26 +1,31 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Multiple Vector Stores
|
||||
|
||||
The example below shows an agent operating with two vector stores built upon different data sources.
|
||||
|
||||
The `TextLoader` loads a TXT file, while the `WebBaseLoader` pulls text from webpages into a document format to accessed downstream. The `Chroma` vector stores are created analogous to what we have demonstrated in our [CSV Loader](/examples/csv-loader.mdx) example. Finally, the `VectorStoreRouterAgent` constructs an agent that routes between the vector stores.
|
||||
|
||||
:::info
|
||||
Get the TXT file used [here](https://github.com/hwchase17/chat-your-data/blob/master/state_of_the_union.txt).
|
||||
:::
|
||||
<Admonition type="info">
|
||||
Get the TXT file used
|
||||
[here](https://github.com/hwchase17/chat-your-data/blob/master/state_of_the_union.txt).
|
||||
</Admonition>
|
||||
|
||||
URL used by the `WebBaseLoader`:
|
||||
|
||||
```txt
|
||||
```text
|
||||
https://pt.wikipedia.org/wiki/Harry_Potter
|
||||
```
|
||||
|
||||
:::tip
|
||||
When you build the flow, request information about one of the sources. The agent should be able to use the correct source to generate a response.
|
||||
:::
|
||||
<Admonition type="tip">
|
||||
When you build the flow, request information about one of the sources. The
|
||||
agent should be able to use the correct source to generate a response.
|
||||
</Admonition>
|
||||
|
||||
:::info
|
||||
Learn more about Multiple Vector Stores [here](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore?highlight=Multiple%20Vector%20Stores#multiple-vectorstores).
|
||||
:::
|
||||
<Admonition type="info">
|
||||
Learn more about Multiple Vector Stores
|
||||
[here](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore?highlight=Multiple%20Vector%20Stores#multiple-vectorstores).
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
||||
|
|
@ -37,7 +42,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
#### <a target="\_blank" href="json_files/Multiple_Vector_Stores.json" download>Download Flow</a>
|
||||
|
||||
:::note LangChain Components 🦜🔗
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`WebBaseLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/web_base)
|
||||
- [`TextLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/unstructured_file)
|
||||
|
|
@ -49,4 +54,4 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
- [`VectorStoreRouterToolkit`](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore)
|
||||
- [`VectorStoreRouterAgent`](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore)
|
||||
|
||||
:::
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Python Function
|
||||
|
||||
Langflow allows you to create a customized tool using the `PythonFunction` connected to a `Tool` component. In this example, Regex is used in Python to validate a pattern.
|
||||
|
|
@ -15,15 +17,19 @@ def is_brazilian_zipcode(zipcode: str) -> bool:
|
|||
return False
|
||||
```
|
||||
|
||||
:::tip
|
||||
When a tool is called, it is often desirable to have its output returned directly to the user. You can do this by setting the **return_direct** flag for a tool to be True.
|
||||
:::
|
||||
<Admonition type="tip">
|
||||
When a tool is called, it is often desirable to have its output returned
|
||||
directly to the user. You can do this by setting the **return_direct** flag
|
||||
for a tool to be True.
|
||||
</Admonition>
|
||||
|
||||
The `AgentInitializer` component is a quick way to construct an agent from the model and tools.
|
||||
|
||||
:::info
|
||||
The `PythonFunction` is a custom component that uses the LangChain 🦜🔗 tool decorator. Learn more about it [here](https://python.langchain.com/docs/modules/agents/tools/how_to/custom_tools).
|
||||
:::
|
||||
<Admonition type="info">
|
||||
The `PythonFunction` is a custom component that uses the LangChain 🦜🔗 tool
|
||||
decorator. Learn more about it
|
||||
[here](https://python.langchain.com/docs/modules/agents/tools/how_to/custom_tools).
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
||||
|
|
@ -40,9 +46,10 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
#### <a target="\_blank" href="json_files/Python_Function.json" download>Download Flow</a>
|
||||
|
||||
:::note LangChain Components 🦜🔗
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`PythonFunctionTool`](https://python.langchain.com/docs/modules/agents/tools/how_to/custom_tools)
|
||||
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)
|
||||
- [`AgentInitializer`](https://python.langchain.com/docs/modules/agents/)
|
||||
:::
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -1,24 +1,29 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Serp API Tool
|
||||
|
||||
The [Serp API](https://serpapi.com/) (Search Engine Results Page) allows developers to scrape results from search engines such as Google, Bing and Yahoo, and can be used as in Langflow through the `Search` component.
|
||||
|
||||
:::info
|
||||
To use the Serp API, you first need to sign up [Serp API](https://serpapi.com/) for an API key on the provider's website.
|
||||
:::
|
||||
<Admonition type="info">
|
||||
To use the Serp API, you first need to sign up [Serp
|
||||
API](https://serpapi.com/) for an API key on the provider's website.
|
||||
</Admonition>
|
||||
|
||||
Here, the `ZeroShotPrompt` component specifies a prompt template for the `ZeroShotAgent`. Set a _Prefix_ and _Suffix_ with rules for the agent to obey. In the example, we used default templates.
|
||||
|
||||
The `LLMChain` is a simple chain that takes in a prompt template, formats it with the user input, and returns the response from an LLM.
|
||||
|
||||
:::tip
|
||||
In this example, we used [`ChatOpenAI`](https://platform.openai.com/) as the LLM, but feel free to experiment with other Language Models!
|
||||
:::
|
||||
<Admonition type="tip">
|
||||
In this example, we used [`ChatOpenAI`](https://platform.openai.com/) as the
|
||||
LLM, but feel free to experiment with other Language Models!
|
||||
</Admonition>
|
||||
|
||||
The `ZeroShotAgent` takes the `LLMChain` and the `Search` tool as inputs, using the tool to find information when necessary.
|
||||
|
||||
:::info
|
||||
Learn more about the Serp API [here](https://python.langchain.com/docs/modules/agents/tools/integrations/serpapi).
|
||||
:::
|
||||
<Admonition type="info">
|
||||
Learn more about the Serp API
|
||||
[here](https://python.langchain.com/docs/modules/agents/tools/integrations/serpapi).
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
||||
|
|
@ -35,11 +40,12 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
#### <a target="\_blank" href="json_files/SerpAPI_Tool.json" download>Download Flow</a>
|
||||
|
||||
:::note LangChain Components 🦜🔗
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`ZeroShotPrompt`](https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/)
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`LLMChain`](https://python.langchain.com/docs/modules/chains/foundational/llm_chain)
|
||||
- [`Search`](https://python.langchain.com/docs/modules/agents/tools/integrations/serpapi)
|
||||
- [`ZeroShotAgent`](https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent)
|
||||
:::
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -6,15 +6,14 @@ import ThemedImage from "@theme/ThemedImage";
|
|||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/hugging-face.png",
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/hugging-face.png",
|
||||
}}
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
||||
Check out Langflow on [HuggingFace Spaces](https://huggingface.co/spaces/Logspace/Langflow).
|
||||
|
|
|
|||
|
|
@ -7,58 +7,46 @@ import ReactPlayer from "react-player";
|
|||
|
||||
Langflow’s chat interface provides a user-friendly experience and functionality to interact with the model and customize the prompt. The sidebar brings options that allow users to view and edit pre-defined prompt variables. This feature facilitates quick experimentation by enabling the modification of variable values right in the chat.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
Notice that editing variables in the chat interface take place temporarily and won’t change their original value in the components once the chat is closed.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface2.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface2.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
To view the complete prompt in its original, structured format, click the "Display Prompt" option. This feature lets you see the prompt exactly as it entered the model.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface3.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{" "}
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface3.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
In the chat interface, you can redefine which variable should be interpreted as the chat input. This gives you control over these inputs and allows dynamic and creative interactions.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface4.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface4.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
|
|||
209
docs/docs/guidelines/chat-widget.mdx
Normal file
209
docs/docs/guidelines/chat-widget.mdx
Normal file
|
|
@ -0,0 +1,209 @@
|
|||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Chat Widget
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
The <b>Langflow Chat Widget</b> is a powerful web component that enables
|
||||
communication with a Langflow project. This widget allows for a chat interface
|
||||
embedding, allowing the integration of Langflow into web applications
|
||||
effortlessly.
|
||||
</div>
|
||||
|
||||
## Features
|
||||
|
||||
🌟 **Seamless Integration:** Easily integrate the Langflow Chat Widget into your website or web application with just a few lines of JavaScript.
|
||||
|
||||
🚀 **Interactive Chat Interface:** Engage your users with a user-friendly conversation, powered by Langflow's advanced language understanding capabilities.
|
||||
|
||||
🎛️ **Customizable Styling:** Customize the appearance of the chat widget to match your application's design and branding.
|
||||
|
||||
🌐 **Multilingual Support:** Communicate with users in multiple languages, opening up your application to a global audience.
|
||||
|
||||
---
|
||||
|
||||
## Usage
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
You can get the HTML code embedded with the chat by clicking the Code button
|
||||
at the Sidebar after building a flow.
|
||||
</div>
|
||||
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/widget-sidebar.png"),
|
||||
}}
|
||||
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
Clicking the Chat Widget HTML tab, you'll get the code to be inserted. Read
|
||||
below to learn how to use it with HTML, React and Angular.
|
||||
</div>
|
||||
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/widget-code.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
---
|
||||
|
||||
### HTML
|
||||
|
||||
The Chat Widget can be embedded into any HTML page, inside a _`<body>`_ tag, as demonstrated in the video below.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_widget.mp4" />
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
### React
|
||||
|
||||
To embed the Chat Widget using React, you'll need to insert this _`<script>`_ tag into the React _index.html_ file, inside the _`<body>`_ tag:
|
||||
|
||||
```html
|
||||
<script src="https://cdn.jsdelivr.net/gh/logspace-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
|
||||
```
|
||||
|
||||
Then, declare your Web Component and encapsulate it in a React component.
|
||||
|
||||
```jsx
|
||||
declare global {
|
||||
namespace JSX {
|
||||
interface IntrinsicElements {
|
||||
"langflow-chat": any;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default function ChatWidget({ className }) {
|
||||
return (
|
||||
<div className={className}>
|
||||
<langflow-chat
|
||||
chat_inputs='{"your_key":"value"}'
|
||||
chat_input_field="your_chat_key"
|
||||
flow_id="your_flow_id"
|
||||
host_url="langflow_url"
|
||||
></langflow-chat>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
Finally, you can place the component anywhere in your code to display the Chat Widget.
|
||||
|
||||
---
|
||||
|
||||
### Angular
|
||||
|
||||
To use it in Angular, first add this _`<script>`_ tag into the Angular _index.html_ file, inside the _`<body>`_ tag.
|
||||
|
||||
```html
|
||||
<script src="https://cdn.jsdelivr.net/gh/logspace-ai/langflow-embedded-chat@main/dist/build/static/js/bundle.min.js"></script>
|
||||
```
|
||||
|
||||
When you use a custom web component in an Angular template, the Angular compiler might show a warning when it doesn't recognize the custom elements by default. To suppress this warning, add _`CUSTOM_ELEMENTS_SCHEMA`_ to the module's _`@NgModule.schemas`_.
|
||||
|
||||
- Open the module file (it typically ends with _.module.ts_) where you'd add the _`langflow-chat`_ web component.
|
||||
- Import _`CUSTOM_ELEMENTS_SCHEMA`_ at the top of the file:
|
||||
|
||||
```ts
|
||||
import { NgModule, CUSTOM_ELEMENTS_SCHEMA } from "@angular/core";
|
||||
```
|
||||
|
||||
- Add _`CUSTOM_ELEMENTS_SCHEMA`_ to the 'schemas' array inside the '@NgModule' decorator:
|
||||
|
||||
```ts
|
||||
@NgModule({
|
||||
declarations: [
|
||||
// ... Other components and directives ...
|
||||
],
|
||||
imports: [
|
||||
// ... Other imported modules ...
|
||||
],
|
||||
schemas: [CUSTOM_ELEMENTS_SCHEMA], // Add the CUSTOM_ELEMENTS_SCHEMA here
|
||||
})
|
||||
export class YourModule {}
|
||||
```
|
||||
|
||||
In your Angular project, find the component belonging to the module where _`CUSTOM_ELEMENTS_SCHEMA`_ was added.
|
||||
|
||||
- Inside the template, add the _`langflow-chat`_ tag to include the Chat Widget in your component's view:
|
||||
|
||||
```jsx
|
||||
<langflow-chat
|
||||
chat_inputs='{"your_key":"value"}'
|
||||
chat_input_field="your_chat_key"
|
||||
flow_id="your_flow_id"
|
||||
host_url="langflow_url"
|
||||
></langflow-chat>
|
||||
```
|
||||
|
||||
<Admonition type="info">
|
||||
<ul>
|
||||
<li>
|
||||
_`CUSTOM_ELEMENTS_SCHEMA`_ is a built-in schema that allows Angular to
|
||||
recognize custom elements.
|
||||
</li>
|
||||
<li>
|
||||
Adding _`CUSTOM_ELEMENTS_SCHEMA`_ tells Angular to allow custom elements
|
||||
in your templates, and it will suppress the warning related to unknown
|
||||
elements like _`langflow-chat`_.
|
||||
</li>
|
||||
<li>
|
||||
Notice that you can only use the Chat Widget in components that are part
|
||||
of the module where you added _`CUSTOM_ELEMENTS_SCHEMA`_.
|
||||
</li>
|
||||
</ul>
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
Use the widget API to customize your Chat Widget:
|
||||
|
||||
<Admonition type="caution">
|
||||
Props with the type JSON need to be passed as Stringified JSONs, with the
|
||||
format {<span>"key":"value"</span>}.
|
||||
</Admonition>
|
||||
|
||||
| Prop | Type | Required | Description |
|
||||
| --------------------- | ------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| bot_message_style | JSON | No | Applies custom formatting to bot messages. |
|
||||
| chat_input_field | String | Yes | Defines the type of the input field for chat messages. |
|
||||
| chat_inputs | JSON | Yes | Determines the chat input elements and their respective values. |
|
||||
| chat_output_key | String | No | Specifies which output to display if multiple outputs are available. |
|
||||
| chat_position | String | No | Positions the chat window on the screen (options include: top-left, top-center, top-right, center-left, center-right, bottom-right, bottom-center, bottom-left). |
|
||||
| chat_trigger_style | JSON | No | Styles the chat trigger button. |
|
||||
| chat_window_style | JSON | No | Customizes the overall appearance of the chat window. |
|
||||
| error_message_style | JSON | No | Sets the format for error messages within the chat window. |
|
||||
| flow_id | String | Yes | Identifies the flow that the component is associated with. |
|
||||
| height | Number | No | Sets the height of the chat window in pixels. |
|
||||
| host_url | String | Yes | Specifies the URL of the host for chat component communication. |
|
||||
| input_container_style | JSON | No | Applies styling to the container where chat messages are entered. |
|
||||
| input_style | JSON | No | Sets the style for the chat input field. |
|
||||
| online | Boolean | No | Toggles the online status of the chat component. |
|
||||
| online_message | String | No | Sets a custom message to display when the chat component is online. |
|
||||
| placeholder | String | No | Sets the placeholder text for the chat input field. |
|
||||
| placeholder_sending | String | No | Sets the placeholder text to display while a message is being sent. |
|
||||
| send_button_style | JSON | No | Sets the style for the send button in the chat window. |
|
||||
| send_icon_style | JSON | No | Sets the style for the send icon in the chat window. |
|
||||
| tweaks | JSON | No | Applies additional custom adjustments for the associated flow. |
|
||||
| user_message_style | JSON | No | Determines the formatting for user messages in the chat window. |
|
||||
| width | Number | No | Sets the width of the chat window in pixels. |
|
||||
| window_title | String | No | Sets the title displayed in the chat window's header or title bar. |
|
||||
|
|
@ -25,17 +25,14 @@ Components are the building blocks of the flows. They are made of inputs, output
|
|||
of that type is required.
|
||||
</div>
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/single-compenent.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/single-compenent.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
On the top right corner, you will find the component status icon 🔴. Make the
|
||||
|
|
|
|||
406
docs/docs/guidelines/custom-component.mdx
Normal file
406
docs/docs/guidelines/custom-component.mdx
Normal file
|
|
@ -0,0 +1,406 @@
|
|||
---
|
||||
description: Custom Components
|
||||
hide_table_of_contents: true
|
||||
---
|
||||
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Custom Components
|
||||
|
||||
In Langflow, a Custom Component is a special component type that allows users to extend the platform's functionality by creating their own reusable and configurable components.
|
||||
|
||||
A Custom Component is created from a user-defined Python script that uses the _`CustomComponent`_ class provided by the Langflow library. These components can be as simple as a basic function that takes and returns a string or as complex as a combination of multiple sub-components and API calls.
|
||||
|
||||
Let's take a look at the basic rules and features. Then we'll go over an example.
|
||||
|
||||
## TL;DR
|
||||
|
||||
- Create a class that inherits from _`CustomComponent`_ and contains a _`build`_ method.
|
||||
- Use arguments with [Type Annotations (or Type Hints)](https://docs.python.org/3/library/typing.html) of the _`build`_ method to create component fields.
|
||||
- Use the _`build_config`_ method to customize how these fields look and behave.
|
||||
- Set up a folder with your components to load them up in Langflow's sidebar.
|
||||
|
||||
Here is an example:
|
||||
|
||||
<div style={{
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}>
|
||||
<CH.Code linuNumbers={false}>
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class DocumentProcessor(CustomComponent):
|
||||
display_name = "Document Processor"
|
||||
description = "This component processes a document"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
options = ["Uppercase", "Lowercase", "Titlecase"]
|
||||
return {
|
||||
"function": {"options": options,
|
||||
"value": options[0]}}
|
||||
|
||||
def build(self, document: Document, function: str) -> Document:
|
||||
if isinstance(document, list):
|
||||
document = document[0]
|
||||
page_content = document.page_content
|
||||
if function == "Uppercase":
|
||||
page_content = page_content.upper()
|
||||
elif function == "Lowercase":
|
||||
page_content = page_content.lower()
|
||||
elif function == "Titlecase":
|
||||
page_content = page_content.title()
|
||||
self.repr_value = f"Result of {function} function: {page_content}"
|
||||
return Document(page_content=page_content)
|
||||
```
|
||||
|
||||
</CH.Code>
|
||||
|
||||
<ZoomableImage
|
||||
alt="Document Processor Component"
|
||||
sources={{
|
||||
light: "img/document_processor.png",
|
||||
}}
|
||||
style={{
|
||||
margin: "0 auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
|
||||
</div>
|
||||
|
||||
<Admonition type="tip">
|
||||
Check out [FlowRunner Component](../examples/flow-runner) for a more complex
|
||||
example.
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
## Rules
|
||||
|
||||
The Python script for every Custom Component should follow a set of rules. Let's go over them one by one:
|
||||
|
||||
<CH.Scrollycoding rows={20} className={""}>
|
||||
|
||||
### Rule 1
|
||||
|
||||
The script must contain a **single class** that inherits from _`CustomComponent`_.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
display_name = "Custom Component"
|
||||
description = "This is a custom component"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
...
|
||||
|
||||
def build(self, document: Document, function: str) -> Document:
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Rule 2
|
||||
|
||||
This class requires a _`build`_ method used to run the component and define its fields.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
display_name = "Custom Component"
|
||||
description = "This is a custom component"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
...
|
||||
|
||||
# focus
|
||||
# mark
|
||||
def build(self) -> Document:
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
The [Return Type Annotation](https://docs.python.org/3/library/typing.html) of the _`build`_ method defines the component type (e.g., Chain, BaseLLM, or basic Python types). Check out all supported types in the [component reference](../components/custom).
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
display_name = "Custom Component"
|
||||
description = "This is a custom component"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
...
|
||||
|
||||
# focus[20:31]
|
||||
# mark
|
||||
def build(self) -> Document:
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
display_name = "Custom Component"
|
||||
description = "This is a custom component"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
...
|
||||
|
||||
def build(self) -> Document:
|
||||
...
|
||||
```
|
||||
|
||||
### Rule 3
|
||||
|
||||
The class can have a [_`build_config`_](focus://8) method, which defines configuration fields for the component. The [_`build_config`_](focus://8) method should always return a dictionary with specific keys representing the field names and their corresponding configurations. It must follow the format described below:
|
||||
|
||||
- Top-level keys are field names.
|
||||
- Their values are also of type _`dict`_. They specify the behavior of the generated fields.
|
||||
|
||||
Check out the [component reference](../components/custom) for more details on the available field configurations.
|
||||
|
||||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class MyComponent(CustomComponent):
|
||||
display_name = "Custom Component"
|
||||
description = "This is a custom component"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
...
|
||||
|
||||
def build(self) -> Document:
|
||||
...
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
Let's create a custom component that processes a document (_`langchain.schema.Document`_) using a simple function.
|
||||
|
||||
---
|
||||
|
||||
### Pick a display name
|
||||
|
||||
To start, let's choose a name for our component by adding a _`display_name`_ attribute. This name will appear on the canvas. The name of the class is not relevant, but let's call it _`DocumentProcessor`_.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
# focus
|
||||
class DocumentProcessor(CustomComponent):
|
||||
# focus
|
||||
display_name = "Document Processor"
|
||||
description = "This is a custom component"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
...
|
||||
|
||||
def build(self) -> Document:
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Write a description
|
||||
|
||||
We can also write a description for it using a _`description`_ attribute.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class DocumentProcessor(CustomComponent):
|
||||
display_name = "Document Processor"
|
||||
description = "This component processes a document"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
...
|
||||
|
||||
def build(self) -> Document:
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class DocumentProcessor(CustomComponent):
|
||||
display_name = "Document Processor"
|
||||
description = "This component processes a document"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
...
|
||||
|
||||
def build(self, document: Document, function: str) -> Document:
|
||||
if isinstance(document, list):
|
||||
document = document[0]
|
||||
page_content = document.page_content
|
||||
if function == "Uppercase":
|
||||
page_content = page_content.upper()
|
||||
elif function == "Lowercase":
|
||||
page_content = page_content.lower()
|
||||
elif function == "Titlecase":
|
||||
page_content = page_content.title()
|
||||
self.repr_value = f"Result of {function} function: {page_content}"
|
||||
return Document(page_content=page_content)
|
||||
```
|
||||
|
||||
### Add the build method
|
||||
|
||||
Here, the build method takes two input parameters: _`document`_, representing the input document to be processed, and _`function`_, a string representing the selected text transformation to be applied (either "Uppercase," "Lowercase," or "Titlecase"). The method processes the text content of the input Document based on the selected function.
|
||||
|
||||
The attribute _`repr_value`_ is used to display the result of the component on the canvas. It is optional and can be used to display any string value.
|
||||
|
||||
The return type is _`Document`_.
|
||||
|
||||
---
|
||||
|
||||
### Customize the component fields
|
||||
|
||||
The _`build_config`_ method is here defined to customize the component fields.
|
||||
|
||||
- _`options`_ determines that the field will be a dropdown menu. The list values and field type must be _`str`_.
|
||||
- _`value`_ is the default option of the dropdown menu.
|
||||
- _`display_name`_ is the name of the field to be displayed.
|
||||
|
||||
```python
|
||||
from langflow import CustomComponent
|
||||
from langchain.schema import Document
|
||||
|
||||
class DocumentProcessor(CustomComponent):
|
||||
display_name = "Document Processor"
|
||||
description = "This component processes a document"
|
||||
|
||||
def build_config(self) -> dict:
|
||||
options = ["Uppercase", "Lowercase", "Titlecase"]
|
||||
return {
|
||||
"function": {"options": options,
|
||||
"value": options[0],
|
||||
"display_name": "Function"
|
||||
},
|
||||
"document": {"display_name": "Document"}
|
||||
}
|
||||
|
||||
def build(self, document: Document, function: str) -> Document:
|
||||
if isinstance(document, list):
|
||||
document = document[0]
|
||||
page_content = document.page_content
|
||||
if function == "Uppercase":
|
||||
page_content = page_content.upper()
|
||||
elif function == "Lowercase":
|
||||
page_content = page_content.lower()
|
||||
elif function == "Titlecase":
|
||||
page_content = page_content.title()
|
||||
self.repr_value = f"Result of {function} function: {page_content}"
|
||||
return Document(page_content=page_content)
|
||||
```
|
||||
|
||||
</CH.Scrollycoding>
|
||||
|
||||
All done! This is what our script and brand-new custom component look like:
|
||||
|
||||
<div style={{
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}>
|
||||
|
||||
<ZoomableImage
|
||||
alt="Document Processor Code"
|
||||
sources={{
|
||||
light: "img/document_processor_code.png",
|
||||
}}
|
||||
style={{
|
||||
maxWidth: "100%",
|
||||
margin: "0 auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
|
||||
/>
|
||||
|
||||
<ZoomableImage
|
||||
alt="Document Processor Component"
|
||||
sources={{
|
||||
light: "img/document_processor.png",
|
||||
}}
|
||||
style={{
|
||||
width: "40%",
|
||||
margin: "0 auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
}}
|
||||
/>
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
## Loading Custom Components
|
||||
|
||||
For advanced customization, Langflow offers the option to create and load custom components outside of the standard interface. This process involves creating the desired components using a text editor and loading them using the Langflow CLI.
|
||||
|
||||
### Folder Structure
|
||||
|
||||
Create a folder that follows the same structural conventions as the [config.yaml](https://github.com/logspace-ai/langflow/blob/dev/src/backend/langflow/config.yaml) file. Inside this main directory, use a `custom_components` subdirectory for your custom components.
|
||||
|
||||
Inside `custom_components`, you can create a Python file for each component. Similarly, any custom agents should be housed in an `agents` subdirectory.
|
||||
|
||||
If you use a subdirectory name that is not in our config.yaml file, your component will appear in an `Other` category in the sidebar.
|
||||
|
||||
Your structure should look something like this:
|
||||
|
||||
```
|
||||
.
|
||||
└── custom_components
|
||||
├── document_processor.py
|
||||
└── ...
|
||||
└── agents
|
||||
└── ...
|
||||
└── my_agents <-- Other category
|
||||
└── ...
|
||||
```
|
||||
|
||||
### Loading Custom Components
|
||||
|
||||
The recommended way to load custom components is to set the _`LANGFLOW_COMPONENTS_PATH`_ environment variable to the path of your custom components directory. Then, run the Langflow CLI as usual.
|
||||
|
||||
```bash
|
||||
export LANGFLOW_COMPONENTS_PATH=/path/to/components
|
||||
langflow
|
||||
```
|
||||
|
||||
Alternatively, you can specify the path to your custom components using the _`--components-path`_ argument when running the Langflow CLI, as shown below:
|
||||
|
||||
```bash
|
||||
langflow --components-path /path/to/components
|
||||
```
|
||||
|
||||
Langflow will attempt to load all of the components found in the specified directory. If a component fails to load due to errors in the component's code, Langflow will print an error message to the console but will continue loading the rest of the components.
|
||||
|
||||
### Interacting with Custom Components
|
||||
|
||||
Once your custom components have been loaded successfully, they will appear in Langflow's sidebar. From there, you can add them to your Langflow canvas for use. However, please note that components with errors will not be available for addition to the canvas. Always ensure your code is error-free before attempting to load components.
|
||||
|
||||
Remember, creating custom components allows you to extend the functionality of Langflow to better suit your unique needs. Happy coding!
|
||||
|
|
@ -2,6 +2,7 @@ import ThemedImage from "@theme/ThemedImage";
|
|||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Features
|
||||
|
||||
|
|
@ -12,17 +13,14 @@ import ReactPlayer from "react-player";
|
|||
below:
|
||||
</div>
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/features.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/features.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
<div style={{ marginBottom: "20px" }}>
|
||||
Further down, we will explain each of these options.
|
||||
|
|
@ -34,9 +32,10 @@ import ReactPlayer from "react-player";
|
|||
|
||||
Flows can be exported and imported as JSON files.
|
||||
|
||||
:::caution
|
||||
<Admonition type="caution">
|
||||
Watch out for API keys being stored in local files.
|
||||
:::
|
||||
|
||||
</Admonition>
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -7,80 +7,62 @@ import ReactPlayer from "react-player";
|
|||
|
||||
The prompt template allows users to create prompts and define variables that provide control over instructing the model.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
Variables can be used to define instructions, questions, context, inputs, or examples for the model and can be created with any chosen name in curly brackets, e.g., `{variable_name}`. They act as placeholders for parts of the text that can be easily modified.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization2.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization2.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
Once inserted, these variables are immediately recognized as new fields in the prompt component. Here, you can define their values within the component itself or leave a field empty to be adjusted over the chat interface.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization3.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization3.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
You can also use documents or output parsers as prompt variables. By plugging them into prompt handles, they’ll disable and feed that input field.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization4.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization4.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
||||
With this, users can interact with documents, webpages, or any other type of content directly from the prompt, which allows for seamless integration of external resources with the language model.
|
||||
|
||||
|
||||
|
||||
If working with an interactive (chat-like) flow, remember to keep one of the input variables empty to behave as the chat input.
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization5.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{" "}
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization5.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -39,8 +39,7 @@ In this guide, we will modify the "Basic Chat with Prompt and History" example,
|
|||
|
||||
5. Open the "Prompt" field on the SystemMessagePromptTemplate component.
|
||||
|
||||
6. Enter the text: `You are a {role} that {behavior}.`
|
||||
|
||||
6. Enter the text: _`You are a {role} that {behavior}.`_
|
||||
7. Save your changes by clicking on "Check & Save".
|
||||
|
||||
8. Define the 'role' variable by typing "obedient assistant".
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ This guide takes you through the process of augmenting the "Basic Chat with Prom
|
|||
|
||||
8. Connect this loader to the `{context}` variable that we just added.
|
||||
|
||||
9. In the "Web Page" field, enter "https://langflow.org/how-upload-examples".
|
||||
9. In the "Web Page" field, enter "https://docs.langflow.org/how-upload-examples".
|
||||
|
||||
10. Now, click on "ConversationBufferMemory".
|
||||
|
||||
|
|
|
|||
|
|
@ -6,13 +6,11 @@ import ThemedImage from "@theme/ThemedImage";
|
|||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/new_langflow2.gif",
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
{" "}
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/new_langflow2.gif",
|
||||
}}
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -1,127 +1,145 @@
|
|||
const lightCodeTheme = require("prism-react-renderer/themes/github");
|
||||
|
||||
const { remarkCodeHike } = require("@code-hike/mdx");
|
||||
// With JSDoc @type annotations, IDEs can provide config autocompletion
|
||||
/** @type {import('@docusaurus/types').DocusaurusConfig} */
|
||||
(
|
||||
module.exports = {
|
||||
title: "Langflow Documentation",
|
||||
tagline: "Langflow is a GUI for LangChain, designed with react-flow",
|
||||
favicon: "img/favicon.ico",
|
||||
url: "https://logspace-ai.github.io",
|
||||
baseUrl: "/",
|
||||
onBrokenLinks: "throw",
|
||||
onBrokenMarkdownLinks: "warn",
|
||||
organizationName: "logspace-ai",
|
||||
projectName: "langflow",
|
||||
trailingSlash: false,
|
||||
customFields: {
|
||||
mendableAnonKey: process.env.MENDABLE_ANON_KEY,
|
||||
},
|
||||
i18n: {
|
||||
defaultLocale: "en",
|
||||
locales: ["en"],
|
||||
},
|
||||
presets: [
|
||||
[
|
||||
"@docusaurus/preset-classic",
|
||||
/** @type {import('@docusaurus/preset-classic').Options} */
|
||||
({
|
||||
docs: {
|
||||
routeBasePath: "/",
|
||||
sidebarPath: require.resolve("./sidebars.js"),
|
||||
path: "docs",
|
||||
// sidebarPath: 'sidebars.js',
|
||||
},
|
||||
theme: {
|
||||
customCss: require.resolve("./src/css/custom.css"),
|
||||
},
|
||||
}),
|
||||
],
|
||||
],
|
||||
plugins: [
|
||||
["docusaurus-node-polyfills", { excludeAliases: ["console"] }],
|
||||
"docusaurus-plugin-image-zoom",
|
||||
// ....
|
||||
async function myPlugin(context, options) {
|
||||
return {
|
||||
name: "docusaurus-tailwindcss",
|
||||
configurePostCss(postcssOptions) {
|
||||
// Appends TailwindCSS and AutoPrefixer.
|
||||
postcssOptions.plugins.push(require("tailwindcss"));
|
||||
postcssOptions.plugins.push(require("autoprefixer"));
|
||||
return postcssOptions;
|
||||
},
|
||||
};
|
||||
},
|
||||
],
|
||||
themeConfig:
|
||||
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
|
||||
module.exports = {
|
||||
title: "Langflow Documentation",
|
||||
tagline: "Langflow is a GUI for LangChain, designed with react-flow",
|
||||
favicon: "img/favicon.ico",
|
||||
url: "https://logspace-ai.github.io",
|
||||
baseUrl: "/",
|
||||
onBrokenLinks: "throw",
|
||||
onBrokenMarkdownLinks: "warn",
|
||||
organizationName: "logspace-ai",
|
||||
projectName: "langflow",
|
||||
trailingSlash: false,
|
||||
customFields: {
|
||||
mendableAnonKey: process.env.MENDABLE_ANON_KEY,
|
||||
},
|
||||
i18n: {
|
||||
defaultLocale: "en",
|
||||
locales: ["en"],
|
||||
},
|
||||
presets: [
|
||||
[
|
||||
"@docusaurus/preset-classic",
|
||||
/** @type {import('@docusaurus/preset-classic').Options} */
|
||||
({
|
||||
navbar: {
|
||||
hideOnScroll: true,
|
||||
title: "Langflow",
|
||||
logo: {
|
||||
alt: "Langflow",
|
||||
src: "img/chain.png",
|
||||
},
|
||||
items: [
|
||||
// right
|
||||
{
|
||||
position: "right",
|
||||
href: "https://github.com/logspace-ai/langflow",
|
||||
position: "right",
|
||||
className: "header-github-link",
|
||||
target: "_blank",
|
||||
rel: null,
|
||||
},
|
||||
{
|
||||
position: "right",
|
||||
href: "https://twitter.com/logspace_ai",
|
||||
position: "right",
|
||||
className: "header-twitter-link",
|
||||
target: "_blank",
|
||||
rel: null,
|
||||
},
|
||||
{
|
||||
position: "right",
|
||||
href: "https://discord.gg/EqksyE2EX9",
|
||||
position: "right",
|
||||
className: "header-discord-link",
|
||||
target: "_blank",
|
||||
rel: null,
|
||||
},
|
||||
docs: {
|
||||
beforeDefaultRemarkPlugins: [
|
||||
[
|
||||
remarkCodeHike,
|
||||
{
|
||||
theme: "github-light",
|
||||
showCopyButton: true,
|
||||
lineNumbers: true,
|
||||
},
|
||||
],
|
||||
],
|
||||
routeBasePath: "/",
|
||||
sidebarPath: require.resolve("./sidebars.js"),
|
||||
path: "docs",
|
||||
// sidebarPath: 'sidebars.js',
|
||||
},
|
||||
theme: {
|
||||
customCss: [
|
||||
require.resolve("@code-hike/mdx/styles.css"),
|
||||
require.resolve("./src/css/custom.css"),
|
||||
],
|
||||
},
|
||||
tableOfContents: {
|
||||
minHeadingLevel: 2,
|
||||
maxHeadingLevel: 5,
|
||||
},
|
||||
colorMode: {
|
||||
defaultMode: "light",
|
||||
disableSwitch: true,
|
||||
respectPrefersColorScheme: false,
|
||||
},
|
||||
announcementBar: {
|
||||
content:
|
||||
'⭐️ If you like ⛓️Langflow, star it on <a target="_blank" rel="noopener noreferrer" href="https://github.com/logspace-ai/langflow">GitHub</a>! ⭐️',
|
||||
backgroundColor: "#B53D38", //Mustard Yellow #D19900 #D4B20B - Salmon #E9967A
|
||||
textColor: "#fff",
|
||||
isCloseable: false,
|
||||
},
|
||||
footer: {
|
||||
links: [],
|
||||
copyright: `Copyright © ${new Date().getFullYear()} Logspace.`,
|
||||
},
|
||||
zoom: {
|
||||
selector: ".markdown :not(a) > img:not(.no-zoom)",
|
||||
background: {
|
||||
light: "rgba(240, 240, 240, 0.9)",
|
||||
},
|
||||
config: {},
|
||||
},
|
||||
prism: {
|
||||
theme: lightCodeTheme,
|
||||
},
|
||||
}),
|
||||
}
|
||||
);
|
||||
],
|
||||
],
|
||||
plugins: [
|
||||
["docusaurus-node-polyfills", { excludeAliases: ["console"] }],
|
||||
"docusaurus-plugin-image-zoom",
|
||||
// ....
|
||||
async function myPlugin(context, options) {
|
||||
return {
|
||||
name: "docusaurus-tailwindcss",
|
||||
configurePostCss(postcssOptions) {
|
||||
// Appends TailwindCSS and AutoPrefixer.
|
||||
postcssOptions.plugins.push(require("tailwindcss"));
|
||||
postcssOptions.plugins.push(require("autoprefixer"));
|
||||
return postcssOptions;
|
||||
},
|
||||
};
|
||||
},
|
||||
],
|
||||
themes: ["mdx-v2"],
|
||||
themeConfig:
|
||||
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
|
||||
({
|
||||
navbar: {
|
||||
hideOnScroll: true,
|
||||
title: "Langflow",
|
||||
logo: {
|
||||
alt: "Langflow",
|
||||
src: "img/chain.png",
|
||||
},
|
||||
items: [
|
||||
// right
|
||||
{
|
||||
position: "right",
|
||||
href: "https://github.com/logspace-ai/langflow",
|
||||
position: "right",
|
||||
className: "header-github-link",
|
||||
target: "_blank",
|
||||
rel: null,
|
||||
},
|
||||
{
|
||||
position: "right",
|
||||
href: "https://twitter.com/logspace_ai",
|
||||
position: "right",
|
||||
className: "header-twitter-link",
|
||||
target: "_blank",
|
||||
rel: null,
|
||||
},
|
||||
{
|
||||
position: "right",
|
||||
href: "https://discord.gg/EqksyE2EX9",
|
||||
position: "right",
|
||||
className: "header-discord-link",
|
||||
target: "_blank",
|
||||
rel: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
tableOfContents: {
|
||||
minHeadingLevel: 2,
|
||||
maxHeadingLevel: 5,
|
||||
},
|
||||
colorMode: {
|
||||
defaultMode: "light",
|
||||
disableSwitch: true,
|
||||
respectPrefersColorScheme: false,
|
||||
},
|
||||
announcementBar: {
|
||||
content:
|
||||
'⭐️ If you like ⛓️Langflow, star it on <a target="_blank" rel="noopener noreferrer" href="https://github.com/logspace-ai/langflow">GitHub</a>! ⭐️',
|
||||
backgroundColor: "#E8EBF1", //Mustard Yellow #D19900 #D4B20B - Salmon #E9967A
|
||||
textColor: "#1C1E21",
|
||||
isCloseable: false,
|
||||
},
|
||||
footer: {
|
||||
links: [],
|
||||
copyright: `Copyright © ${new Date().getFullYear()} Logspace.`,
|
||||
},
|
||||
zoom: {
|
||||
selector: ".markdown :not(a) > img:not(.no-zoom)",
|
||||
background: {
|
||||
light: "rgba(240, 240, 240, 0.9)",
|
||||
},
|
||||
config: {},
|
||||
},
|
||||
// prism: {
|
||||
// theme: require("prism-react-renderer/themes/dracula"),
|
||||
// },
|
||||
docs: {
|
||||
sidebar: {
|
||||
hideable: true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
};
|
||||
|
|
|
|||
2033
docs/package-lock.json
generated
2033
docs/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
|
@ -15,12 +15,13 @@
|
|||
},
|
||||
"dependencies": {
|
||||
"@babel/preset-react": "^7.22.3",
|
||||
"@code-hike/mdx": "^0.9.0",
|
||||
"@docusaurus/core": "2.4.1",
|
||||
"@docusaurus/plugin-ideal-image": "^2.4.1",
|
||||
"@docusaurus/preset-classic": "2.4.1",
|
||||
"@docusaurus/theme-classic": "^2.4.1",
|
||||
"@docusaurus/theme-search-algolia": "^2.4.1",
|
||||
"@mdx-js/react": "^1.6.22",
|
||||
"@mdx-js/react": "^2.3.0",
|
||||
"@mendable/search": "^0.0.114",
|
||||
"@pbe/react-yandex-maps": "^1.2.4",
|
||||
"@prismicio/client": "^7.0.1",
|
||||
|
|
@ -28,6 +29,7 @@
|
|||
"autoprefixer": "^10.4.14",
|
||||
"clsx": "^1.2.1",
|
||||
"docusaurus-plugin-image-zoom": "^0.1.4",
|
||||
"docusaurus-theme-mdx-v2": "^0.1.2",
|
||||
"jquery": "^3.7.0",
|
||||
"medium-zoom": "^1.0.8",
|
||||
"node-fetch": "^3.3.1",
|
||||
|
|
@ -67,4 +69,4 @@
|
|||
"engines": {
|
||||
"node": ">=16.14"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@ module.exports = {
|
|||
"guidelines/collection",
|
||||
"guidelines/prompt-customization",
|
||||
"guidelines/chat-interface",
|
||||
"guidelines/chat-widget",
|
||||
"guidelines/custom-component",
|
||||
],
|
||||
},
|
||||
{
|
||||
|
|
@ -30,11 +32,13 @@ module.exports = {
|
|||
items: [
|
||||
"components/agents",
|
||||
"components/chains",
|
||||
"components/custom",
|
||||
"components/embeddings",
|
||||
"components/llms",
|
||||
"components/loaders",
|
||||
"components/memories",
|
||||
"components/prompts",
|
||||
"components/retrievers",
|
||||
"components/text-splitters",
|
||||
"components/toolkits",
|
||||
"components/tools",
|
||||
|
|
@ -63,6 +67,7 @@ module.exports = {
|
|||
label: "Examples",
|
||||
collapsed: false,
|
||||
items: [
|
||||
"examples/flow-runner",
|
||||
"examples/conversation-chain",
|
||||
"examples/buffer-memory",
|
||||
"examples/midjourney-prompt-chain",
|
||||
|
|
|
|||
|
|
@ -3,17 +3,19 @@
|
|||
* bundles Infima by default. Infima is a CSS framework designed to
|
||||
* work well for content-centric websites.
|
||||
*/
|
||||
:root {
|
||||
:root {
|
||||
--ifm-background-color: var(--token-primary-bg-c);
|
||||
--ifm-navbar-link-hover-color: initial;
|
||||
--ifm-navbar-padding-vertical: 0;
|
||||
--ifm-navbar-item-padding-vertical: 0;
|
||||
--ifm-font-family-base: -apple-system, BlinkMacSystemFont, Inter, Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI emoji';
|
||||
--ifm-font-family-monospace: 'SFMono-Regular', 'Roboto Mono', Consolas, 'Liberation Mono', Menlo, Courier, monospace;
|
||||
--ifm-font-family-base: -apple-system, BlinkMacSystemFont, Inter, Helvetica,
|
||||
Arial, sans-serif, "Apple Color Emoji", "Segoe UI emoji";
|
||||
--ifm-font-family-monospace: "SFMono-Regular", "Roboto Mono", Consolas,
|
||||
"Liberation Mono", Menlo, Courier, monospace;
|
||||
}
|
||||
|
||||
.theme-doc-sidebar-item-category.menu__list-item:not(:first-child) {
|
||||
margin-top: 1.5rem!important;
|
||||
margin-top: 1.5rem !important;
|
||||
}
|
||||
|
||||
.docusaurus-highlight-code-line {
|
||||
|
|
@ -31,7 +33,7 @@
|
|||
transform: skewY(6deg);
|
||||
}
|
||||
|
||||
[class^='announcementBar'] {
|
||||
[class^="announcementBar"] {
|
||||
z-index: 10;
|
||||
}
|
||||
|
||||
|
|
@ -112,7 +114,7 @@ body {
|
|||
}
|
||||
|
||||
.header-github-link:before {
|
||||
content: '';
|
||||
content: "";
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
display: flex;
|
||||
|
|
@ -126,7 +128,7 @@ body {
|
|||
}
|
||||
|
||||
.header-twitter-link::before {
|
||||
content: '';
|
||||
content: "";
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
display: flex;
|
||||
|
|
@ -140,7 +142,7 @@ body {
|
|||
}
|
||||
|
||||
.header-discord-link::before {
|
||||
content: '';
|
||||
content: "";
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
display: flex;
|
||||
|
|
@ -148,7 +150,6 @@ body {
|
|||
background-size: contain;
|
||||
}
|
||||
|
||||
|
||||
/* Images */
|
||||
.image-rendering-crisp {
|
||||
image-rendering: crisp-edges;
|
||||
|
|
@ -164,7 +165,7 @@ body {
|
|||
.img-center {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
width: 100%,
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.resized-image {
|
||||
|
|
@ -188,4 +189,22 @@ body {
|
|||
.mendable-search {
|
||||
width: 140px;
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
.ch-scrollycoding {
|
||||
gap: 10rem !important;
|
||||
} */
|
||||
|
||||
.ch-scrollycoding-content {
|
||||
max-width: 55% !important;
|
||||
min-width: 40% !important;
|
||||
}
|
||||
|
||||
.ch-scrollycoding-sticker {
|
||||
max-width: 60% !important;
|
||||
min-width: 45% !important;
|
||||
}
|
||||
|
||||
.ch-scrollycoding-step-content {
|
||||
min-height: 70px;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import React, { useState, useEffect } from 'react';
|
||||
import ThemedImage from '@theme/ThemedImage';
|
||||
import useBaseUrl from '@docusaurus/useBaseUrl';
|
||||
import React, { useState, useEffect } from "react";
|
||||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
|
||||
const ZoomableImage = ({ alt, sources }) => {
|
||||
const ZoomableImage = ({ alt, sources, style }) => {
|
||||
// add style here
|
||||
const [isFullscreen, setIsFullscreen] = useState(false);
|
||||
|
||||
const toggleFullscreen = () => {
|
||||
|
|
@ -10,27 +11,36 @@ const ZoomableImage = ({ alt, sources }) => {
|
|||
};
|
||||
|
||||
const handleKeyPress = (event) => {
|
||||
if (event.key === 'Escape') {
|
||||
if (event.key === "Escape") {
|
||||
setIsFullscreen(false);
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (isFullscreen) {
|
||||
document.addEventListener('keydown', handleKeyPress);
|
||||
document.addEventListener("keydown", handleKeyPress);
|
||||
} else {
|
||||
document.removeEventListener('keydown', handleKeyPress);
|
||||
document.removeEventListener("keydown", handleKeyPress);
|
||||
}
|
||||
|
||||
return () => {
|
||||
document.removeEventListener('keydown', handleKeyPress);
|
||||
document.removeEventListener("keydown", handleKeyPress);
|
||||
};
|
||||
}, [isFullscreen]);
|
||||
|
||||
// Default style
|
||||
const defaultStyle = {
|
||||
width: "50%",
|
||||
margin: "0 auto",
|
||||
display: "flex",
|
||||
justifyContent: "center",
|
||||
};
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`zoomable-image ${isFullscreen ? 'fullscreen' : ''}`}
|
||||
className={`zoomable-image ${isFullscreen ? "fullscreen" : ""}`}
|
||||
onClick={toggleFullscreen}
|
||||
style={{ ...defaultStyle, ...style }}
|
||||
>
|
||||
<ThemedImage
|
||||
className="zoomable-image-inner"
|
||||
|
|
|
|||
2
docs/static/CNAME
vendored
2
docs/static/CNAME
vendored
|
|
@ -1 +1 @@
|
|||
langflow.org
|
||||
docs.langflow.org
|
||||
BIN
docs/static/img/document_processor.png
vendored
Normal file
BIN
docs/static/img/document_processor.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 197 KiB |
BIN
docs/static/img/document_processor_code.png
vendored
Normal file
BIN
docs/static/img/document_processor_code.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 407 KiB |
BIN
docs/static/img/flow_runner.png
vendored
Normal file
BIN
docs/static/img/flow_runner.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 129 KiB |
BIN
docs/static/img/flow_runner_code.png
vendored
Normal file
BIN
docs/static/img/flow_runner_code.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 379 KiB |
BIN
docs/static/img/widget-code.png
vendored
Normal file
BIN
docs/static/img/widget-code.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 787 KiB |
BIN
docs/static/img/widget-sidebar.png
vendored
Normal file
BIN
docs/static/img/widget-sidebar.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 129 KiB |
BIN
docs/static/videos/langflow_widget.mp4
vendored
Normal file
BIN
docs/static/videos/langflow_widget.mp4
vendored
Normal file
Binary file not shown.
1901
poetry.lock
generated
1901
poetry.lock
generated
File diff suppressed because it is too large
Load diff
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "langflow"
|
||||
version = "0.3.3"
|
||||
version = "0.4.7"
|
||||
description = "A Python package with a built-in web application"
|
||||
authors = ["Logspace <contact@logspace.ai>"]
|
||||
maintainers = [
|
||||
|
|
@ -19,7 +19,7 @@ readme = "README.md"
|
|||
keywords = ["nlp", "langchain", "openai", "gpt", "gui"]
|
||||
packages = [{ include = "langflow", from = "src/backend" }]
|
||||
include = ["src/backend/langflow/*", "src/backend/langflow/**/*"]
|
||||
|
||||
documentation = "https://docs.langflow.org"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
langflow = "langflow.__main__:main"
|
||||
|
|
@ -33,19 +33,19 @@ google-search-results = "^2.4.1"
|
|||
google-api-python-client = "^2.79.0"
|
||||
typer = "^0.9.0"
|
||||
gunicorn = "^21.1.0"
|
||||
langchain = "^0.0.240"
|
||||
langchain = "^0.0.256"
|
||||
openai = "^0.27.8"
|
||||
pandas = "^2.0.0"
|
||||
chromadb = "^0.3.21"
|
||||
huggingface-hub = "^0.15.0"
|
||||
huggingface-hub = { version = "^0.16.0", extras = ["inference"] }
|
||||
rich = "^13.4.2"
|
||||
llama-cpp-python = "~0.1.0"
|
||||
llama-cpp-python = { version = "~0.1.0", optional = true }
|
||||
networkx = "^3.1"
|
||||
unstructured = "^0.7.0"
|
||||
pypdf = "^3.11.0"
|
||||
lxml = "^4.9.2"
|
||||
pysrt = "^1.1.2"
|
||||
fake-useragent = "^1.1.3"
|
||||
fake-useragent = "^1.2.1"
|
||||
docstring-parser = "^0.15"
|
||||
psycopg2-binary = "^2.9.6"
|
||||
pyarrow = "^12.0.0"
|
||||
|
|
@ -56,14 +56,14 @@ qdrant-client = "^1.3.0"
|
|||
websockets = "^10.3"
|
||||
weaviate-client = "^3.21.0"
|
||||
jina = "3.15.2"
|
||||
sentence-transformers = "^2.2.2"
|
||||
ctransformers = "^0.2.10"
|
||||
sentence-transformers = { version = "^2.2.2", optional = true }
|
||||
ctransformers = { version = "^0.2.10", optional = true }
|
||||
cohere = "^4.11.0"
|
||||
python-multipart = "^0.0.6"
|
||||
sqlmodel = "^0.0.8"
|
||||
faiss-cpu = "^1.7.4"
|
||||
anthropic = "^0.3.0"
|
||||
orjson = "^3.9.1"
|
||||
orjson = "3.9.3"
|
||||
multiprocess = "^0.70.14"
|
||||
cachetools = "^5.3.1"
|
||||
types-cachetools = "^5.3.0.5"
|
||||
|
|
@ -75,8 +75,12 @@ certifi = "^2023.5.7"
|
|||
google-cloud-aiplatform = "^1.26.1"
|
||||
psycopg = "^3.1.9"
|
||||
psycopg-binary = "^3.1.9"
|
||||
fastavro = "^1.8.0"
|
||||
langchain-experimental = "^0.0.8"
|
||||
alembic = "^1.11.2"
|
||||
metaphor-python = "^0.1.11"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
black = "^23.1.0"
|
||||
ipykernel = "^6.21.2"
|
||||
mypy = "^1.1.1"
|
||||
|
|
@ -94,6 +98,9 @@ types-pyyaml = "^6.0.12.8"
|
|||
|
||||
[tool.poetry.extras]
|
||||
deploy = ["langchain-serve"]
|
||||
local = ["llama-cpp-python", "sentence-transformers", "ctransformers"]
|
||||
all = ["deploy", "local"]
|
||||
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = "6.0"
|
||||
|
|
|
|||
|
|
@ -11,4 +11,4 @@ RUN rm *.whl
|
|||
|
||||
EXPOSE 80
|
||||
|
||||
CMD [ "uvicorn", "--host", "0.0.0.0", "--port", "80", "langflow.backend.app:app" ]
|
||||
CMD [ "uvicorn", "--host", "0.0.0.0", "--port", "7860", "--factory", "langflow.main:create_app" ]
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
from importlib import metadata
|
||||
from langflow.cache import cache_manager # noqa: E402
|
||||
from langflow.processing.process import load_flow_from_json # noqa: E402
|
||||
|
||||
# Deactivate cache manager for now
|
||||
# from langflow.services.cache import cache_manager
|
||||
from langflow.processing.process import load_flow_from_json
|
||||
from langflow.interface.custom.custom_component import CustomComponent
|
||||
|
||||
try:
|
||||
__version__ = metadata.version(__package__)
|
||||
|
|
@ -9,5 +12,4 @@ except metadata.PackageNotFoundError:
|
|||
__version__ = ""
|
||||
del metadata # optional, avoids polluting the results of dir(__package__)
|
||||
|
||||
|
||||
__all__ = ["load_flow_from_json", "cache_manager"]
|
||||
__all__ = ["load_flow_from_json", "cache_manager", "CustomComponent"]
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import os
|
||||
import sys
|
||||
import time
|
||||
import httpx
|
||||
from multiprocess import Process, cpu_count # type: ignore
|
||||
from langflow.services.utils import get_settings_manager
|
||||
from langflow.utils.util import get_number_of_workers
|
||||
from multiprocess import Process # type: ignore
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
|
@ -12,7 +13,6 @@ from rich import box
|
|||
from rich import print as rprint
|
||||
import typer
|
||||
from langflow.main import setup_app
|
||||
from langflow.settings import settings
|
||||
from langflow.utils.logger import configure, logger
|
||||
import webbrowser
|
||||
from dotenv import load_dotenv
|
||||
|
|
@ -20,52 +20,29 @@ from dotenv import load_dotenv
|
|||
app = typer.Typer()
|
||||
|
||||
|
||||
def get_number_of_workers(workers=None):
|
||||
if workers == -1:
|
||||
workers = (cpu_count() * 2) + 1
|
||||
return workers
|
||||
|
||||
|
||||
def update_settings(
|
||||
config: str,
|
||||
cache: str,
|
||||
dev: bool = False,
|
||||
database_url: Optional[str] = None,
|
||||
remove_api_keys: bool = False,
|
||||
components_path: Optional[Path] = None,
|
||||
):
|
||||
"""Update the settings from a config file."""
|
||||
|
||||
# Check for database_url in the environment variables
|
||||
database_url = database_url or os.getenv("langflow_database_url")
|
||||
|
||||
settings_manager = get_settings_manager()
|
||||
if config:
|
||||
settings.update_from_yaml(config, dev=dev)
|
||||
if database_url:
|
||||
settings.update_settings(database_url=database_url)
|
||||
logger.debug(f"Loading settings from {config}")
|
||||
settings_manager.settings.update_from_yaml(config, dev=dev)
|
||||
if remove_api_keys:
|
||||
settings.update_settings(remove_api_keys=remove_api_keys)
|
||||
logger.debug(f"Setting remove_api_keys to {remove_api_keys}")
|
||||
settings_manager.settings.update_settings(REMOVE_API_KEYS=remove_api_keys)
|
||||
if cache:
|
||||
settings.update_settings(cache=cache)
|
||||
|
||||
|
||||
def load_params():
|
||||
"""
|
||||
Load the parameters from the environment variables.
|
||||
"""
|
||||
global_vars = globals()
|
||||
|
||||
for key, value in global_vars.items():
|
||||
env_key = f"LANGFLOW_{key.upper()}"
|
||||
if env_key in os.environ:
|
||||
if isinstance(value, bool):
|
||||
# Handle booleans
|
||||
global_vars[key] = os.getenv(env_key, str(value)).lower() == "true"
|
||||
elif isinstance(value, int):
|
||||
# Handle integers
|
||||
global_vars[key] = int(os.getenv(env_key, str(value)))
|
||||
elif isinstance(value, str) or value is None:
|
||||
# Handle strings and None values
|
||||
global_vars[key] = os.getenv(env_key, str(value))
|
||||
logger.debug(f"Setting cache to {cache}")
|
||||
settings_manager.settings.update_settings(CACHE=cache)
|
||||
if components_path:
|
||||
logger.debug(f"Adding component path {components_path}")
|
||||
settings_manager.settings.update_settings(COMPONENTS_PATH=components_path)
|
||||
|
||||
|
||||
def serve_on_jcloud():
|
||||
|
|
@ -120,14 +97,21 @@ def serve(
|
|||
"127.0.0.1", help="Host to bind the server to.", envvar="LANGFLOW_HOST"
|
||||
),
|
||||
workers: int = typer.Option(
|
||||
1, help="Number of worker processes.", envvar="LANGFLOW_WORKERS"
|
||||
2, help="Number of worker processes.", envvar="LANGFLOW_WORKERS"
|
||||
),
|
||||
timeout: int = typer.Option(60, help="Worker timeout in seconds."),
|
||||
timeout: int = typer.Option(300, help="Worker timeout in seconds."),
|
||||
port: int = typer.Option(7860, help="Port to listen on.", envvar="LANGFLOW_PORT"),
|
||||
config: str = typer.Option("config.yaml", help="Path to the configuration file."),
|
||||
components_path: Optional[Path] = typer.Option(
|
||||
Path(__file__).parent / "components",
|
||||
help="Path to the directory containing custom components.",
|
||||
envvar="LANGFLOW_COMPONENTS_PATH",
|
||||
),
|
||||
config: str = typer.Option(
|
||||
Path(__file__).parent / "config.yaml", help="Path to the configuration file."
|
||||
),
|
||||
# .env file param
|
||||
env_file: Path = typer.Option(
|
||||
".env", help="Path to the .env file containing environment variables."
|
||||
None, help="Path to the .env file containing environment variables."
|
||||
),
|
||||
log_level: str = typer.Option(
|
||||
"critical", help="Logging level.", envvar="LANGFLOW_LOG_LEVEL"
|
||||
|
|
@ -142,11 +126,13 @@ def serve(
|
|||
),
|
||||
jcloud: bool = typer.Option(False, help="Deploy on Jina AI Cloud"),
|
||||
dev: bool = typer.Option(False, help="Run in development mode (may contain bugs)"),
|
||||
database_url: str = typer.Option(
|
||||
None,
|
||||
help="Database URL to connect to. If not provided, a local SQLite database will be used.",
|
||||
envvar="LANGFLOW_DATABASE_URL",
|
||||
),
|
||||
# This variable does not work but is set by the .env file
|
||||
# and works with Pydantic
|
||||
# database_url: str = typer.Option(
|
||||
# None,
|
||||
# help="Database URL to connect to. If not provided, a local SQLite database will be used.",
|
||||
# envvar="LANGFLOW_DATABASE_URL",
|
||||
# ),
|
||||
path: str = typer.Option(
|
||||
None,
|
||||
help="Path to the frontend directory containing build files. This is for development purposes only.",
|
||||
|
|
@ -162,6 +148,11 @@ def serve(
|
|||
help="Remove API keys from the projects saved in the database.",
|
||||
envvar="LANGFLOW_REMOVE_API_KEYS",
|
||||
),
|
||||
backend_only: bool = typer.Option(
|
||||
False,
|
||||
help="Run only the backend server without the frontend.",
|
||||
envvar="LANGFLOW_BACKEND_ONLY",
|
||||
),
|
||||
):
|
||||
"""
|
||||
Run the Langflow server.
|
||||
|
|
@ -169,7 +160,6 @@ def serve(
|
|||
# override env variables with .env file
|
||||
if env_file:
|
||||
load_dotenv(env_file, override=True)
|
||||
load_params()
|
||||
|
||||
if jcloud:
|
||||
return serve_on_jcloud()
|
||||
|
|
@ -178,13 +168,13 @@ def serve(
|
|||
update_settings(
|
||||
config,
|
||||
dev=dev,
|
||||
database_url=database_url,
|
||||
remove_api_keys=remove_api_keys,
|
||||
cache=cache,
|
||||
components_path=components_path,
|
||||
)
|
||||
# create path object if path is provided
|
||||
static_files_dir: Optional[Path] = Path(path) if path else None
|
||||
app = setup_app(static_files_dir=static_files_dir)
|
||||
app = setup_app(static_files_dir=static_files_dir, backend_only=backend_only)
|
||||
# check if port is being used
|
||||
if is_port_in_use(port, host):
|
||||
port = get_free_port(port)
|
||||
|
|
@ -196,6 +186,10 @@ def serve(
|
|||
"timeout": timeout,
|
||||
}
|
||||
|
||||
# Define an env variable to know if we are just testing the server
|
||||
if "pytest" in sys.modules:
|
||||
return
|
||||
|
||||
if platform.system() in ["Windows"]:
|
||||
# Run using uvicorn on MacOS and Windows
|
||||
# Windows doesn't support gunicorn
|
||||
|
|
@ -298,7 +292,7 @@ def run_langflow(host, port, log_level, options, app):
|
|||
Run Langflow server on localhost
|
||||
"""
|
||||
try:
|
||||
if platform.system() in ["Darwin", "Windows"]:
|
||||
if platform.system() in ["Windows"]:
|
||||
# Run using uvicorn on MacOS and Windows
|
||||
# Windows doesn't support gunicorn
|
||||
# MacOS requires an env variable to be set to use gunicorn
|
||||
|
|
|
|||
113
src/backend/langflow/alembic.ini
Normal file
113
src/backend/langflow/alembic.ini
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
# A generic, single database configuration.
|
||||
|
||||
[alembic]
|
||||
# path to migration scripts
|
||||
script_location = alembic
|
||||
|
||||
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
|
||||
# Uncomment the line below if you want the files to be prepended with date and time
|
||||
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
|
||||
# for all available tokens
|
||||
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
|
||||
|
||||
# sys.path path, will be prepended to sys.path if present.
|
||||
# defaults to the current working directory.
|
||||
prepend_sys_path = .
|
||||
|
||||
# timezone to use when rendering the date within the migration file
|
||||
# as well as the filename.
|
||||
# If specified, requires the python-dateutil library that can be
|
||||
# installed by adding `alembic[tz]` to the pip requirements
|
||||
# string value is passed to dateutil.tz.gettz()
|
||||
# leave blank for localtime
|
||||
# timezone =
|
||||
|
||||
# max length of characters to apply to the
|
||||
# "slug" field
|
||||
# truncate_slug_length = 40
|
||||
|
||||
# set to 'true' to run the environment during
|
||||
# the 'revision' command, regardless of autogenerate
|
||||
# revision_environment = false
|
||||
|
||||
# set to 'true' to allow .pyc and .pyo files without
|
||||
# a source .py file to be detected as revisions in the
|
||||
# versions/ directory
|
||||
# sourceless = false
|
||||
|
||||
# version location specification; This defaults
|
||||
# to alembic/versions. When using multiple version
|
||||
# directories, initial revisions must be specified with --version-path.
|
||||
# The path separator used here should be the separator specified by "version_path_separator" below.
|
||||
# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
|
||||
|
||||
# version path separator; As mentioned above, this is the character used to split
|
||||
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
|
||||
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
|
||||
# Valid values for version_path_separator are:
|
||||
#
|
||||
# version_path_separator = :
|
||||
# version_path_separator = ;
|
||||
# version_path_separator = space
|
||||
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
|
||||
|
||||
# set to 'true' to search source files recursively
|
||||
# in each "version_locations" directory
|
||||
# new in Alembic version 1.10
|
||||
# recursive_version_locations = false
|
||||
|
||||
# the output encoding used when revision files
|
||||
# are written from script.py.mako
|
||||
# output_encoding = utf-8
|
||||
|
||||
# This is the path to the db in the root of the project.
|
||||
# When the user runs the Langflow the database url will
|
||||
# be set dinamically.
|
||||
sqlalchemy.url = sqlite:///../../../langflow.db
|
||||
|
||||
|
||||
[post_write_hooks]
|
||||
# post_write_hooks defines scripts or Python functions that are run
|
||||
# on newly generated revision scripts. See the documentation for further
|
||||
# detail and examples
|
||||
|
||||
# format using "black" - use the console_scripts runner, against the "black" entrypoint
|
||||
# hooks = black
|
||||
# black.type = console_scripts
|
||||
# black.entrypoint = black
|
||||
# black.options = -l 79 REVISION_SCRIPT_FILENAME
|
||||
|
||||
# Logging configuration
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
qualname =
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S
|
||||
1
src/backend/langflow/alembic/README
Normal file
1
src/backend/langflow/alembic/README
Normal file
|
|
@ -0,0 +1 @@
|
|||
Generic single-database configuration.
|
||||
78
src/backend/langflow/alembic/env.py
Normal file
78
src/backend/langflow/alembic/env.py
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
from logging.config import fileConfig
|
||||
|
||||
from sqlalchemy import engine_from_config
|
||||
from sqlalchemy import pool
|
||||
|
||||
from alembic import context
|
||||
|
||||
from langflow.services.database.manager import SQLModel
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
config = context.config
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
if config.config_file_name is not None:
|
||||
fileConfig(config.config_file_name)
|
||||
|
||||
# add your model's MetaData object here
|
||||
# for 'autogenerate' support
|
||||
# from myapp import mymodel
|
||||
# target_metadata = mymodel.Base.metadata
|
||||
target_metadata = SQLModel.metadata
|
||||
|
||||
# other values from the config, defined by the needs of env.py,
|
||||
# can be acquired:
|
||||
# my_important_option = config.get_main_option("my_important_option")
|
||||
# ... etc.
|
||||
|
||||
|
||||
def run_migrations_offline() -> None:
|
||||
"""Run migrations in 'offline' mode.
|
||||
|
||||
This configures the context with just a URL
|
||||
and not an Engine, though an Engine is acceptable
|
||||
here as well. By skipping the Engine creation
|
||||
we don't even need a DBAPI to be available.
|
||||
|
||||
Calls to context.execute() here emit the given string to the
|
||||
script output.
|
||||
|
||||
"""
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
context.configure(
|
||||
url=url,
|
||||
target_metadata=target_metadata,
|
||||
literal_binds=True,
|
||||
dialect_opts={"paramstyle": "named"},
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def run_migrations_online() -> None:
|
||||
"""Run migrations in 'online' mode.
|
||||
|
||||
In this scenario we need to create an Engine
|
||||
and associate a connection with the context.
|
||||
|
||||
"""
|
||||
connectable = engine_from_config(
|
||||
config.get_section(config.config_ini_section, {}),
|
||||
prefix="sqlalchemy.",
|
||||
poolclass=pool.NullPool,
|
||||
)
|
||||
|
||||
with connectable.connect() as connection:
|
||||
context.configure(connection=connection, target_metadata=target_metadata)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
||||
27
src/backend/langflow/alembic/script.py.mako
Normal file
27
src/backend/langflow/alembic/script.py.mako
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
"""${message}
|
||||
|
||||
Revision ID: ${up_revision}
|
||||
Revises: ${down_revision | comma,n}
|
||||
Create Date: ${create_date}
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
${imports if imports else ""}
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = ${repr(up_revision)}
|
||||
down_revision: Union[str, None] = ${repr(down_revision)}
|
||||
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
|
||||
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
${downgrades if downgrades else "pass"}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
"""Remove FlowStyles table
|
||||
|
||||
Revision ID: 0a534bdfd84b
|
||||
Revises: 4814b6f4abfd
|
||||
Create Date: 2023-08-07 14:09:06.844104
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0a534bdfd84b"
|
||||
down_revision: Union[str, None] = "4814b6f4abfd"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table("flowstyle")
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table(
|
||||
"flowstyle",
|
||||
sa.Column("color", sa.VARCHAR(), nullable=False),
|
||||
sa.Column("emoji", sa.VARCHAR(), nullable=False),
|
||||
sa.Column("flow_id", sa.CHAR(length=32), nullable=True),
|
||||
sa.Column("id", sa.CHAR(length=32), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["flow_id"],
|
||||
["flow.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("id"),
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
"""Add Flow table
|
||||
|
||||
Revision ID: 4814b6f4abfd
|
||||
Revises:
|
||||
Create Date: 2023-08-05 17:47:42.879824
|
||||
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "4814b6f4abfd"
|
||||
down_revision: Union[str, None] = None
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
|
||||
# This suppress is used to not break the migration if the table already exists.
|
||||
with contextlib.suppress(sa.exc.OperationalError):
|
||||
op.create_table(
|
||||
"flow",
|
||||
sa.Column("data", sa.JSON(), nullable=True),
|
||||
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
||||
sa.Column("description", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
||||
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("id"),
|
||||
)
|
||||
op.create_index(
|
||||
op.f("ix_flow_description"), "flow", ["description"], unique=False
|
||||
)
|
||||
op.create_index(op.f("ix_flow_name"), "flow", ["name"], unique=False)
|
||||
with contextlib.suppress(sa.exc.OperationalError):
|
||||
op.create_table(
|
||||
"flowstyle",
|
||||
sa.Column("color", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
||||
sa.Column("emoji", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
||||
sa.Column("flow_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
|
||||
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["flow_id"],
|
||||
["flow.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("id"),
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table("flowstyle")
|
||||
op.drop_index(op.f("ix_flow_name"), table_name="flow")
|
||||
op.drop_index(op.f("ix_flow_description"), table_name="flow")
|
||||
op.drop_table("flow")
|
||||
# ### end Alembic commands ###
|
||||
|
|
@ -5,7 +5,7 @@ from langflow.api.v1 import (
|
|||
endpoints_router,
|
||||
validate_router,
|
||||
flows_router,
|
||||
flow_styles_router,
|
||||
component_router,
|
||||
)
|
||||
|
||||
router = APIRouter(
|
||||
|
|
@ -14,5 +14,5 @@ router = APIRouter(
|
|||
router.include_router(chat_router)
|
||||
router.include_router(endpoints_router)
|
||||
router.include_router(validate_router)
|
||||
router.include_router(component_router)
|
||||
router.include_router(flows_router)
|
||||
router.include_router(flow_styles_router)
|
||||
|
|
|
|||
|
|
@ -57,3 +57,39 @@ def build_input_keys_response(langchain_object, artifacts):
|
|||
input_keys_response["template"] = langchain_object.prompt.template
|
||||
|
||||
return input_keys_response
|
||||
|
||||
|
||||
def merge_nested_dicts(dict1, dict2):
|
||||
for key, value in dict2.items():
|
||||
if isinstance(value, dict) and isinstance(dict1.get(key), dict):
|
||||
dict1[key] = merge_nested_dicts(dict1[key], value)
|
||||
else:
|
||||
dict1[key] = value
|
||||
return dict1
|
||||
|
||||
|
||||
def merge_nested_dicts_with_renaming(dict1, dict2):
|
||||
for key, value in dict2.items():
|
||||
if (
|
||||
key in dict1
|
||||
and isinstance(value, dict)
|
||||
and isinstance(dict1.get(key), dict)
|
||||
):
|
||||
for sub_key, sub_value in value.items():
|
||||
if sub_key in dict1[key]:
|
||||
new_key = get_new_key(dict1[key], sub_key)
|
||||
dict1[key][new_key] = sub_value
|
||||
else:
|
||||
dict1[key][sub_key] = sub_value
|
||||
else:
|
||||
dict1[key] = value
|
||||
return dict1
|
||||
|
||||
|
||||
def get_new_key(dictionary, original_key):
|
||||
counter = 1
|
||||
new_key = original_key + " (" + str(counter) + ")"
|
||||
while new_key in dictionary:
|
||||
counter += 1
|
||||
new_key = original_key + " (" + str(counter) + ")"
|
||||
return new_key
|
||||
|
|
|
|||
|
|
@ -2,12 +2,12 @@ from langflow.api.v1.endpoints import router as endpoints_router
|
|||
from langflow.api.v1.validate import router as validate_router
|
||||
from langflow.api.v1.chat import router as chat_router
|
||||
from langflow.api.v1.flows import router as flows_router
|
||||
from langflow.api.v1.flow_styles import router as flow_styles_router
|
||||
from langflow.api.v1.components import router as component_router
|
||||
|
||||
__all__ = [
|
||||
"chat_router",
|
||||
"endpoints_router",
|
||||
"component_router",
|
||||
"validate_router",
|
||||
"flows_router",
|
||||
"flow_styles_router",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -91,8 +91,8 @@ class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
|
|||
# This is to emulate the stream of tokens
|
||||
for resp in resps:
|
||||
await self.websocket.send_json(resp.dict())
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
except Exception as exc:
|
||||
logger.error(f"Error sending response: {exc}")
|
||||
|
||||
async def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
|
|
|
|||
|
|
@ -3,13 +3,13 @@ from fastapi.responses import StreamingResponse
|
|||
from langflow.api.utils import build_input_keys_response
|
||||
from langflow.api.v1.schemas import BuildStatus, BuiltResponse, InitResponse, StreamData
|
||||
|
||||
from langflow.chat.manager import ChatManager
|
||||
from langflow.services import service_manager, ServiceType
|
||||
from langflow.graph.graph.base import Graph
|
||||
from langflow.utils.logger import logger
|
||||
from cachetools import LRUCache
|
||||
|
||||
router = APIRouter(tags=["Chat"])
|
||||
chat_manager = ChatManager()
|
||||
|
||||
flow_data_store: LRUCache = LRUCache(maxsize=10)
|
||||
|
||||
|
||||
|
|
@ -17,6 +17,7 @@ flow_data_store: LRUCache = LRUCache(maxsize=10)
|
|||
async def chat(client_id: str, websocket: WebSocket):
|
||||
"""Websocket endpoint for chat."""
|
||||
try:
|
||||
chat_manager = service_manager.get(ServiceType.CHAT_MANAGER)
|
||||
if client_id in chat_manager.in_memory_cache:
|
||||
await chat_manager.handle_websocket(client_id, websocket)
|
||||
else:
|
||||
|
|
@ -26,7 +27,7 @@ async def chat(client_id: str, websocket: WebSocket):
|
|||
message = "Please, build the flow before sending messages"
|
||||
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=message)
|
||||
except WebSocketException as exc:
|
||||
logger.error(exc)
|
||||
logger.error(f"Websocket error: {exc}")
|
||||
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=str(exc))
|
||||
|
||||
|
||||
|
|
@ -45,6 +46,7 @@ async def init_build(graph_data: dict, flow_id: str):
|
|||
return InitResponse(flowId=flow_id)
|
||||
|
||||
# Delete from cache if already exists
|
||||
chat_manager = service_manager.get(ServiceType.CHAT_MANAGER)
|
||||
if flow_id in chat_manager.in_memory_cache:
|
||||
with chat_manager.in_memory_cache._lock:
|
||||
chat_manager.in_memory_cache.delete(flow_id)
|
||||
|
|
@ -56,7 +58,7 @@ async def init_build(graph_data: dict, flow_id: str):
|
|||
|
||||
return InitResponse(flowId=flow_id)
|
||||
except Exception as exc:
|
||||
logger.error(exc)
|
||||
logger.error(f"Error initializing build: {exc}")
|
||||
return HTTPException(status_code=500, detail=str(exc))
|
||||
|
||||
|
||||
|
|
@ -74,7 +76,7 @@ async def build_status(flow_id: str):
|
|||
)
|
||||
|
||||
except Exception as exc:
|
||||
logger.error(exc)
|
||||
logger.error(f"Error checking build status: {exc}")
|
||||
return HTTPException(status_code=500, detail=str(exc))
|
||||
|
||||
|
||||
|
|
@ -125,9 +127,8 @@ async def stream_build(flow_id: str):
|
|||
vertex.build()
|
||||
params = vertex._built_object_repr()
|
||||
valid = True
|
||||
logger.debug(
|
||||
f"Building node {str(params)[:50]}{'...' if len(str(params)) > 50 else ''}"
|
||||
)
|
||||
logger.debug(f"Building node {str(vertex.vertex_type)}")
|
||||
logger.debug(f"Output: {params}")
|
||||
if vertex.artifacts:
|
||||
# The artifacts will be prompt variables
|
||||
# passed to build_input_keys_response
|
||||
|
|
@ -156,12 +157,12 @@ async def stream_build(flow_id: str):
|
|||
)
|
||||
else:
|
||||
input_keys_response = {
|
||||
"input_keys": {},
|
||||
"input_keys": None,
|
||||
"memory_keys": [],
|
||||
"handle_keys": [],
|
||||
}
|
||||
yield str(StreamData(event="message", data=input_keys_response))
|
||||
|
||||
chat_manager = service_manager.get(ServiceType.CHAT_MANAGER)
|
||||
chat_manager.set_cache(flow_id, langchain_object)
|
||||
# We need to reset the chat history
|
||||
chat_manager.chat_history.empty_history(flow_id)
|
||||
|
|
@ -177,5 +178,5 @@ async def stream_build(flow_id: str):
|
|||
try:
|
||||
return StreamingResponse(event_stream(flow_id), media_type="text/event-stream")
|
||||
except Exception as exc:
|
||||
logger.error(exc)
|
||||
logger.error(f"Error streaming build: {exc}")
|
||||
raise HTTPException(status_code=500, detail=str(exc))
|
||||
|
|
|
|||
77
src/backend/langflow/api/v1/components.py
Normal file
77
src/backend/langflow/api/v1/components.py
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
from datetime import timezone
|
||||
from typing import List
|
||||
from uuid import UUID
|
||||
from langflow.services.database.models.component import Component, ComponentModel
|
||||
from langflow.services.utils import get_session
|
||||
from sqlmodel import Session, select
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
COMPONENT_NOT_FOUND = "Component not found"
|
||||
COMPONENT_ALREADY_EXISTS = "A component with the same id already exists."
|
||||
COMPONENT_DELETED = "Component deleted"
|
||||
|
||||
|
||||
router = APIRouter(prefix="/components", tags=["Components"])
|
||||
|
||||
|
||||
@router.post("/", response_model=Component)
|
||||
def create_component(component: ComponentModel, db: Session = Depends(get_session)):
|
||||
db_component = Component(**component.dict())
|
||||
try:
|
||||
db.add(db_component)
|
||||
db.commit()
|
||||
db.refresh(db_component)
|
||||
except IntegrityError as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=COMPONENT_ALREADY_EXISTS,
|
||||
) from e
|
||||
return db_component
|
||||
|
||||
|
||||
@router.get("/{component_id}", response_model=Component)
|
||||
def read_component(component_id: UUID, db: Session = Depends(get_session)):
|
||||
if component := db.get(Component, component_id):
|
||||
return component
|
||||
else:
|
||||
raise HTTPException(status_code=404, detail=COMPONENT_NOT_FOUND)
|
||||
|
||||
|
||||
@router.get("/", response_model=List[Component])
|
||||
def read_components(skip: int = 0, limit: int = 50, db: Session = Depends(get_session)):
|
||||
query = select(Component)
|
||||
query = query.offset(skip).limit(limit)
|
||||
|
||||
return db.execute(query).fetchall()
|
||||
|
||||
|
||||
@router.patch("/{component_id}", response_model=Component)
|
||||
def update_component(
|
||||
component_id: UUID, component: ComponentModel, db: Session = Depends(get_session)
|
||||
):
|
||||
db_component = db.get(Component, component_id)
|
||||
if not db_component:
|
||||
raise HTTPException(status_code=404, detail=COMPONENT_NOT_FOUND)
|
||||
component_data = component.dict(exclude_unset=True)
|
||||
|
||||
for key, value in component_data.items():
|
||||
setattr(db_component, key, value)
|
||||
|
||||
db_component.update_at = datetime.now(timezone.utc)
|
||||
db.commit()
|
||||
db.refresh(db_component)
|
||||
return db_component
|
||||
|
||||
|
||||
@router.delete("/{component_id}")
|
||||
def delete_component(component_id: UUID, db: Session = Depends(get_session)):
|
||||
component = db.get(Component, component_id)
|
||||
if not component:
|
||||
raise HTTPException(status_code=404, detail=COMPONENT_NOT_FOUND)
|
||||
db.delete(component)
|
||||
db.commit()
|
||||
return {"detail": COMPONENT_DELETED}
|
||||
|
|
@ -1,18 +1,31 @@
|
|||
from typing import Optional
|
||||
from langflow.cache.utils import save_uploaded_file
|
||||
from langflow.database.models.flow import Flow
|
||||
from langflow.processing.process import process_graph_cached, process_tweaks
|
||||
from langflow.utils.logger import logger
|
||||
from http import HTTPStatus
|
||||
from typing import Annotated, Optional
|
||||
|
||||
from langflow.services.cache.utils import save_uploaded_file
|
||||
from langflow.services.database.models.flow import Flow
|
||||
from langflow.processing.process import process_graph_cached, process_tweaks
|
||||
from langflow.services.utils import get_settings_manager
|
||||
from langflow.utils.logger import logger
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, Body
|
||||
|
||||
from langflow.interface.custom.custom_component import CustomComponent
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile
|
||||
|
||||
from langflow.api.v1.schemas import (
|
||||
ProcessResponse,
|
||||
UploadFileResponse,
|
||||
CustomComponentCode,
|
||||
)
|
||||
|
||||
from langflow.interface.types import langchain_types_dict
|
||||
from langflow.database.base import get_session
|
||||
from langflow.api.utils import merge_nested_dicts_with_renaming
|
||||
|
||||
from langflow.interface.types import (
|
||||
build_langchain_types_dict,
|
||||
build_langchain_template_custom_component,
|
||||
build_langchain_custom_component_list_from_path,
|
||||
)
|
||||
|
||||
from langflow.services.utils import get_session
|
||||
from sqlmodel import Session
|
||||
|
||||
# build router
|
||||
|
|
@ -21,7 +34,37 @@ router = APIRouter(tags=["Base"])
|
|||
|
||||
@router.get("/all")
|
||||
def get_all():
|
||||
return langchain_types_dict
|
||||
logger.debug("Building langchain types dict")
|
||||
native_components = build_langchain_types_dict()
|
||||
# custom_components is a list of dicts
|
||||
# need to merge all the keys into one dict
|
||||
custom_components_from_file = {}
|
||||
settings_manager = get_settings_manager()
|
||||
if settings_manager.settings.COMPONENTS_PATH:
|
||||
logger.info(
|
||||
f"Building custom components from {settings_manager.settings.COMPONENTS_PATH}"
|
||||
)
|
||||
custom_component_dicts = [
|
||||
build_langchain_custom_component_list_from_path(str(path))
|
||||
for path in settings_manager.settings.COMPONENTS_PATH
|
||||
]
|
||||
logger.info(f"Loading {len(custom_component_dicts)} category(ies)")
|
||||
for custom_component_dict in custom_component_dicts:
|
||||
# custom_component_dict is a dict of dicts
|
||||
if not custom_component_dict:
|
||||
continue
|
||||
category = list(custom_component_dict.keys())[0]
|
||||
logger.info(
|
||||
f"Loading {len(custom_component_dict[category])} component(s) from category {category}"
|
||||
)
|
||||
logger.debug(custom_component_dict)
|
||||
custom_components_from_file = merge_nested_dicts_with_renaming(
|
||||
custom_components_from_file, custom_component_dict
|
||||
)
|
||||
|
||||
return merge_nested_dicts_with_renaming(
|
||||
native_components, custom_components_from_file
|
||||
)
|
||||
|
||||
|
||||
# For backwards compatibility we will keep the old endpoint
|
||||
|
|
@ -31,6 +74,7 @@ async def process_flow(
|
|||
flow_id: str,
|
||||
inputs: Optional[dict] = None,
|
||||
tweaks: Optional[dict] = None,
|
||||
clear_cache: Annotated[bool, Body(embed=True)] = False, # noqa: F821
|
||||
session: Session = Depends(get_session),
|
||||
):
|
||||
"""
|
||||
|
|
@ -50,7 +94,7 @@ async def process_flow(
|
|||
graph_data = process_tweaks(graph_data, tweaks)
|
||||
except Exception as exc:
|
||||
logger.error(f"Error processing tweaks: {exc}")
|
||||
response = process_graph_cached(graph_data, inputs)
|
||||
response = process_graph_cached(graph_data, inputs, clear_cache)
|
||||
return ProcessResponse(
|
||||
result=response,
|
||||
)
|
||||
|
|
@ -60,7 +104,11 @@ async def process_flow(
|
|||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
|
||||
|
||||
@router.post("/upload/{flow_id}", response_model=UploadFileResponse, status_code=201)
|
||||
@router.post(
|
||||
"/upload/{flow_id}",
|
||||
response_model=UploadFileResponse,
|
||||
status_code=HTTPStatus.CREATED,
|
||||
)
|
||||
async def create_upload_file(file: UploadFile, flow_id: str):
|
||||
# Cache file
|
||||
try:
|
||||
|
|
@ -81,3 +129,13 @@ def get_version():
|
|||
from langflow import __version__
|
||||
|
||||
return {"version": __version__}
|
||||
|
||||
|
||||
@router.post("/custom_component", status_code=HTTPStatus.OK)
|
||||
async def custom_component(
|
||||
raw_code: CustomComponentCode,
|
||||
):
|
||||
extractor = CustomComponent(code=raw_code.code)
|
||||
extractor.is_check_valid()
|
||||
|
||||
return build_langchain_template_custom_component(extractor)
|
||||
|
|
|
|||
|
|
@ -1,83 +0,0 @@
|
|||
from uuid import UUID
|
||||
from langflow.database.models.flow_style import (
|
||||
FlowStyle,
|
||||
FlowStyleCreate,
|
||||
FlowStyleRead,
|
||||
FlowStyleUpdate,
|
||||
)
|
||||
from langflow.database.base import get_session
|
||||
from sqlmodel import Session, select
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
|
||||
# build router
|
||||
router = APIRouter(prefix="/flow_styles", tags=["FlowStyles"])
|
||||
|
||||
# FlowStyleCreate:
|
||||
# class FlowStyleBase(SQLModel):
|
||||
# color: str = Field(index=True)
|
||||
# emoji: str = Field(index=False)
|
||||
# flow_id: UUID = Field(default=None, foreign_key="flow.id")
|
||||
|
||||
|
||||
@router.post("/", response_model=FlowStyleRead)
|
||||
def create_flow_style(
|
||||
*, session: Session = Depends(get_session), flow_style: FlowStyleCreate
|
||||
):
|
||||
"""Create a new flow_style."""
|
||||
db_flow_style = FlowStyle.from_orm(flow_style)
|
||||
session.add(db_flow_style)
|
||||
session.commit()
|
||||
session.refresh(db_flow_style)
|
||||
return db_flow_style
|
||||
|
||||
|
||||
@router.get("/", response_model=list[FlowStyleRead])
|
||||
def read_flow_styles(*, session: Session = Depends(get_session)):
|
||||
"""Read all flows."""
|
||||
try:
|
||||
flows = session.exec(select(FlowStyle)).all()
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
return flows
|
||||
|
||||
|
||||
@router.get("/{flow_styles_id}", response_model=FlowStyleRead)
|
||||
def read_flow_style(*, session: Session = Depends(get_session), flow_styles_id: UUID):
|
||||
"""Read a flow_style."""
|
||||
if flow_style := session.get(FlowStyle, flow_styles_id):
|
||||
return flow_style
|
||||
else:
|
||||
raise HTTPException(status_code=404, detail="FlowStyle not found")
|
||||
|
||||
|
||||
@router.patch("/{flow_style_id}", response_model=FlowStyleRead)
|
||||
def update_flow_style(
|
||||
*,
|
||||
session: Session = Depends(get_session),
|
||||
flow_style_id: UUID,
|
||||
flow_style: FlowStyleUpdate,
|
||||
):
|
||||
"""Update a flow_style."""
|
||||
db_flow_style = session.get(FlowStyle, flow_style_id)
|
||||
if not db_flow_style:
|
||||
raise HTTPException(status_code=404, detail="FlowStyle not found")
|
||||
flow_data = flow_style.dict(exclude_unset=True)
|
||||
for key, value in flow_data.items():
|
||||
if hasattr(db_flow_style, key) and value is not None:
|
||||
setattr(db_flow_style, key, value)
|
||||
session.add(db_flow_style)
|
||||
session.commit()
|
||||
session.refresh(db_flow_style)
|
||||
return db_flow_style
|
||||
|
||||
|
||||
@router.delete("/{flow_id}")
|
||||
def delete_flow_style(*, session: Session = Depends(get_session), flow_id: UUID):
|
||||
"""Delete a flow_style."""
|
||||
flow_style = session.get(FlowStyle, flow_id)
|
||||
if not flow_style:
|
||||
raise HTTPException(status_code=404, detail="FlowStyle not found")
|
||||
session.delete(flow_style)
|
||||
session.commit()
|
||||
return {"message": "FlowStyle deleted successfully"}
|
||||
|
|
@ -1,16 +1,15 @@
|
|||
from typing import List
|
||||
from uuid import UUID
|
||||
from langflow.settings import settings
|
||||
from langflow.api.utils import remove_api_keys
|
||||
from langflow.api.v1.schemas import FlowListCreate, FlowListRead
|
||||
from langflow.database.models.flow import (
|
||||
from langflow.services.database.models.flow import (
|
||||
Flow,
|
||||
FlowCreate,
|
||||
FlowRead,
|
||||
FlowReadWithStyle,
|
||||
FlowUpdate,
|
||||
)
|
||||
from langflow.database.base import get_session
|
||||
from langflow.services.utils import get_session
|
||||
from langflow.services.utils import get_settings_manager
|
||||
from sqlmodel import Session, select
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
|
|
@ -32,7 +31,7 @@ def create_flow(*, session: Session = Depends(get_session), flow: FlowCreate):
|
|||
return db_flow
|
||||
|
||||
|
||||
@router.get("/", response_model=list[FlowReadWithStyle], status_code=200)
|
||||
@router.get("/", response_model=list[FlowRead], status_code=200)
|
||||
def read_flows(*, session: Session = Depends(get_session)):
|
||||
"""Read all flows."""
|
||||
try:
|
||||
|
|
@ -42,7 +41,7 @@ def read_flows(*, session: Session = Depends(get_session)):
|
|||
return [jsonable_encoder(flow) for flow in flows]
|
||||
|
||||
|
||||
@router.get("/{flow_id}", response_model=FlowReadWithStyle, status_code=200)
|
||||
@router.get("/{flow_id}", response_model=FlowRead, status_code=200)
|
||||
def read_flow(*, session: Session = Depends(get_session), flow_id: UUID):
|
||||
"""Read a flow."""
|
||||
if flow := session.get(Flow, flow_id):
|
||||
|
|
@ -61,7 +60,8 @@ def update_flow(
|
|||
if not db_flow:
|
||||
raise HTTPException(status_code=404, detail="Flow not found")
|
||||
flow_data = flow.dict(exclude_unset=True)
|
||||
if settings.remove_api_keys:
|
||||
settings_manager = get_settings_manager()
|
||||
if settings_manager.settings.REMOVE_API_KEYS:
|
||||
flow_data = remove_api_keys(flow_data)
|
||||
for key, value in flow_data.items():
|
||||
setattr(db_flow, key, value)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from langflow.database.models.flow import FlowCreate, FlowRead
|
||||
from langflow.services.database.models.flow import FlowCreate, FlowRead
|
||||
from pydantic import BaseModel, Field, validator
|
||||
import json
|
||||
|
||||
|
|
@ -116,3 +116,20 @@ class StreamData(BaseModel):
|
|||
|
||||
def __str__(self) -> str:
|
||||
return f"event: {self.event}\ndata: {json.dumps(self.data)}\n\n"
|
||||
|
||||
|
||||
class CustomComponentCode(BaseModel):
|
||||
code: str
|
||||
|
||||
|
||||
class CustomComponentResponseError(BaseModel):
|
||||
detail: str
|
||||
traceback: str
|
||||
|
||||
|
||||
class ComponentListCreate(BaseModel):
|
||||
flows: List[FlowCreate]
|
||||
|
||||
|
||||
class ComponentListRead(BaseModel):
|
||||
flows: List[FlowRead]
|
||||
|
|
|
|||
7
src/backend/langflow/cache/__init__.py
vendored
7
src/backend/langflow/cache/__init__.py
vendored
|
|
@ -1,7 +0,0 @@
|
|||
from langflow.cache.manager import cache_manager
|
||||
from langflow.cache.flow import InMemoryCache
|
||||
|
||||
__all__ = [
|
||||
"cache_manager",
|
||||
"InMemoryCache",
|
||||
]
|
||||
4
src/backend/langflow/components/__init__.py
Normal file
4
src/backend/langflow/components/__init__.py
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
from langflow.interface.custom.custom_component import CustomComponent
|
||||
|
||||
|
||||
__all__ = ["CustomComponent"]
|
||||
33
src/backend/langflow/components/chains/PromptRunner.py
Normal file
33
src/backend/langflow/components/chains/PromptRunner.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
from langflow import CustomComponent
|
||||
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain import PromptTemplate
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
class PromptRunner(CustomComponent):
|
||||
display_name: str = "Prompt Runner"
|
||||
description: str = "Run a Chain with the given PromptTemplate"
|
||||
beta = True
|
||||
field_config = {
|
||||
"llm": {"display_name": "LLM"},
|
||||
"prompt": {
|
||||
"display_name": "Prompt Template",
|
||||
"info": "Make sure the prompt has all variables filled.",
|
||||
},
|
||||
"code": {"show": False},
|
||||
"inputs": {"field_type": "code"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLLM,
|
||||
prompt: PromptTemplate,
|
||||
) -> Document:
|
||||
chain = prompt | llm
|
||||
# The input is an empty dict because the prompt is already filled
|
||||
result = chain.invoke({})
|
||||
if hasattr(result, "content"):
|
||||
result = result.content
|
||||
self.repr_value = result
|
||||
return Document(page_content=str(result))
|
||||
56
src/backend/langflow/components/toolkits/Metaphor.py
Normal file
56
src/backend/langflow/components/toolkits/Metaphor.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
from typing import List, Union
|
||||
from langflow import CustomComponent
|
||||
|
||||
from metaphor_python import Metaphor # type: ignore
|
||||
from langchain.tools import Tool
|
||||
from langchain.agents import tool
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
|
||||
|
||||
class MetaphorToolkit(CustomComponent):
|
||||
display_name: str = "Metaphor"
|
||||
description: str = "Metaphor Toolkit"
|
||||
documentation = (
|
||||
"https://python.langchain.com/docs/integrations/tools/metaphor_search"
|
||||
)
|
||||
beta = True
|
||||
# api key should be password = True
|
||||
field_config = {
|
||||
"metaphor_api_key": {"display_name": "Metaphor API Key", "password": True},
|
||||
"code": {"advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
metaphor_api_key: str,
|
||||
use_autoprompt: bool = True,
|
||||
search_num_results: int = 5,
|
||||
similar_num_results: int = 5,
|
||||
) -> Union[Tool, BaseToolkit]:
|
||||
# If documents, then we need to create a Vectara instance using .from_documents
|
||||
client = Metaphor(api_key=metaphor_api_key)
|
||||
|
||||
@tool
|
||||
def search(query: str):
|
||||
"""Call search engine with a query."""
|
||||
return client.search(
|
||||
query, use_autoprompt=use_autoprompt, num_results=search_num_results
|
||||
)
|
||||
|
||||
@tool
|
||||
def get_contents(ids: List[str]):
|
||||
"""Get contents of a webpage.
|
||||
|
||||
The ids passed in should be a list of ids as fetched from `search`.
|
||||
"""
|
||||
return client.get_contents(ids)
|
||||
|
||||
@tool
|
||||
def find_similar(url: str):
|
||||
"""Get search results similar to a given URL.
|
||||
|
||||
The url passed in should be a URL returned from `search`
|
||||
"""
|
||||
return client.find_similar(url, num_results=similar_num_results)
|
||||
|
||||
return [search, get_contents, find_similar] # type: ignore
|
||||
50
src/backend/langflow/components/vectorstores/Vectara.py
Normal file
50
src/backend/langflow/components/vectorstores/Vectara.py
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
from typing import Optional, Union
|
||||
from langflow import CustomComponent
|
||||
|
||||
from langchain.vectorstores import Vectara
|
||||
from langchain.schema import Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain.schema import BaseRetriever
|
||||
from langchain.embeddings.base import Embeddings
|
||||
|
||||
|
||||
class VectaraComponent(CustomComponent):
|
||||
display_name: str = "Vectara"
|
||||
description: str = "Implementation of Vector Store using Vectara"
|
||||
documentation = (
|
||||
"https://python.langchain.com/docs/integrations/vectorstores/vectara"
|
||||
)
|
||||
beta = True
|
||||
# api key should be password = True
|
||||
field_config = {
|
||||
"vectara_customer_id": {"display_name": "Vectara Customer ID"},
|
||||
"vectara_corpus_id": {"display_name": "Vectara Corpus ID"},
|
||||
"vectara_api_key": {"display_name": "Vectara API Key", "password": True},
|
||||
"code": {"show": False},
|
||||
"documents": {"display_name": "Documents"},
|
||||
"embedding": {"display_name": "Embedding"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
vectara_customer_id: str,
|
||||
vectara_corpus_id: str,
|
||||
vectara_api_key: str,
|
||||
embedding: Optional[Embeddings] = None,
|
||||
documents: Optional[Document] = None,
|
||||
) -> Union[VectorStore, BaseRetriever]:
|
||||
# If documents, then we need to create a Vectara instance using .from_documents
|
||||
if documents is not None and embedding is not None:
|
||||
return Vectara.from_documents(
|
||||
documents=documents, # type: ignore
|
||||
vectara_customer_id=vectara_customer_id,
|
||||
vectara_corpus_id=vectara_corpus_id,
|
||||
vectara_api_key=vectara_api_key,
|
||||
embedding=embedding,
|
||||
)
|
||||
|
||||
return Vectara(
|
||||
vectara_customer_id=vectara_customer_id,
|
||||
vectara_corpus_id=vectara_corpus_id,
|
||||
vectara_api_key=vectara_api_key,
|
||||
)
|
||||
|
|
@ -104,6 +104,8 @@ embeddings:
|
|||
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/sentence_transformers"
|
||||
CohereEmbeddings:
|
||||
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/cohere"
|
||||
VertexAIEmbeddings:
|
||||
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/google_vertex_ai_palm"
|
||||
llms:
|
||||
OpenAI:
|
||||
documentation: "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai"
|
||||
|
|
@ -127,8 +129,8 @@ llms:
|
|||
# There's a bug in this component deactivating until we get it sorted: _language_models.py", line 804, in send_message
|
||||
# is_blocked=safety_attributes.get("blocked", False),
|
||||
# AttributeError: 'list' object has no attribute 'get'
|
||||
# ChatVertexAI:
|
||||
# documentation: "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/google_vertex_ai_palm"
|
||||
ChatVertexAI:
|
||||
documentation: "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/google_vertex_ai_palm"
|
||||
###
|
||||
memories:
|
||||
# https://github.com/supabase-community/supabase-py/issues/482
|
||||
|
|
@ -153,6 +155,8 @@ memories:
|
|||
documentation: "https://python.langchain.com/docs/modules/memory/how_to/vectorstore_retriever_memory"
|
||||
MongoDBChatMessageHistory:
|
||||
documentation: "https://python.langchain.com/docs/modules/memory/integrations/mongodb_chat_message_history"
|
||||
MotorheadMemory:
|
||||
documentation: "https://python.langchain.com/docs/integrations/memory/motorhead_memory"
|
||||
prompts:
|
||||
ChatMessagePromptTemplate:
|
||||
documentation: "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/msg_prompt_templates"
|
||||
|
|
@ -290,3 +294,6 @@ output_parsers:
|
|||
documentation: "https://python.langchain.com/docs/modules/model_io/output_parsers/structured"
|
||||
ResponseSchema:
|
||||
documentation: "https://python.langchain.com/docs/modules/model_io/output_parsers/structured"
|
||||
custom_components:
|
||||
CustomComponent:
|
||||
documentation: ""
|
||||
|
|
|
|||
|
|
@ -31,6 +31,9 @@ CUSTOM_NODES = {
|
|||
"MidJourneyPromptChain": frontend_node.chains.MidJourneyPromptChainNode(),
|
||||
"load_qa_chain": frontend_node.chains.CombineDocsChainNode(),
|
||||
},
|
||||
"custom_components": {
|
||||
"CustomComponent": frontend_node.custom_components.CustomComponentFrontendNode(),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,37 +0,0 @@
|
|||
from langflow.settings import settings
|
||||
from sqlmodel import SQLModel, Session, create_engine
|
||||
from langflow.utils.logger import logger
|
||||
|
||||
if settings.database_url and settings.database_url.startswith("sqlite"):
|
||||
connect_args = {"check_same_thread": False}
|
||||
else:
|
||||
connect_args = {}
|
||||
if not settings.database_url:
|
||||
raise RuntimeError("No database_url provided")
|
||||
engine = create_engine(settings.database_url, connect_args=connect_args)
|
||||
|
||||
|
||||
def create_db_and_tables():
|
||||
logger.debug("Creating database and tables")
|
||||
try:
|
||||
SQLModel.metadata.create_all(engine)
|
||||
except Exception as exc:
|
||||
logger.error(f"Error creating database and tables: {exc}")
|
||||
raise RuntimeError("Error creating database and tables") from exc
|
||||
# Now check if the table Flow exists, if not, something went wrong
|
||||
# and we need to create the tables again.
|
||||
from sqlalchemy import inspect
|
||||
|
||||
inspector = inspect(engine)
|
||||
if "flow" not in inspector.get_table_names():
|
||||
logger.error("Something went wrong creating the database and tables.")
|
||||
logger.error("Please check your database settings.")
|
||||
|
||||
raise RuntimeError("Something went wrong creating the database and tables.")
|
||||
else:
|
||||
logger.debug("Database and tables created successfully")
|
||||
|
||||
|
||||
def get_session():
|
||||
with Session(engine) as session:
|
||||
yield session
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
# Path: src/backend/langflow/database/models/flowstyle.py
|
||||
|
||||
from langflow.database.models.base import SQLModelSerializable
|
||||
from sqlmodel import Field, Relationship
|
||||
from uuid import UUID, uuid4
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langflow.database.models.flow import Flow
|
||||
|
||||
|
||||
class FlowStyleBase(SQLModelSerializable):
|
||||
color: str
|
||||
emoji: str
|
||||
flow_id: UUID = Field(default=None, foreign_key="flow.id")
|
||||
|
||||
|
||||
class FlowStyle(FlowStyleBase, table=True):
|
||||
id: UUID = Field(default_factory=uuid4, primary_key=True, unique=True)
|
||||
flow: "Flow" = Relationship(back_populates="style")
|
||||
|
||||
|
||||
class FlowStyleUpdate(SQLModelSerializable):
|
||||
color: Optional[str] = None
|
||||
emoji: Optional[str] = None
|
||||
|
||||
|
||||
class FlowStyleCreate(FlowStyleBase):
|
||||
pass
|
||||
|
||||
|
||||
class FlowStyleRead(FlowStyleBase):
|
||||
id: UUID
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Dict, Generator, List, Type, Union
|
||||
|
||||
from langflow.graph.edge.base import Edge
|
||||
from langflow.graph.graph.constants import VERTEX_TYPE_MAP
|
||||
from langflow.graph.graph.constants import lazy_load_vertex_dict
|
||||
from langflow.graph.vertex.base import Vertex
|
||||
from langflow.graph.vertex.types import (
|
||||
FileToolVertex,
|
||||
|
|
@ -77,6 +77,8 @@ class Graph:
|
|||
|
||||
def _validate_nodes(self) -> None:
|
||||
"""Check that all nodes have edges"""
|
||||
if len(self.nodes) == 1:
|
||||
return
|
||||
for node in self.nodes:
|
||||
if not self._validate_node(node):
|
||||
raise ValueError(
|
||||
|
|
@ -185,10 +187,12 @@ class Graph:
|
|||
"""Returns the node class based on the node type."""
|
||||
if node_type in FILE_TOOLS:
|
||||
return FileToolVertex
|
||||
if node_type in VERTEX_TYPE_MAP:
|
||||
return VERTEX_TYPE_MAP[node_type]
|
||||
if node_type in lazy_load_vertex_dict.VERTEX_TYPE_MAP:
|
||||
return lazy_load_vertex_dict.VERTEX_TYPE_MAP[node_type]
|
||||
return (
|
||||
VERTEX_TYPE_MAP[node_lc_type] if node_lc_type in VERTEX_TYPE_MAP else Vertex
|
||||
lazy_load_vertex_dict.VERTEX_TYPE_MAP[node_lc_type]
|
||||
if node_lc_type in lazy_load_vertex_dict.VERTEX_TYPE_MAP
|
||||
else Vertex
|
||||
)
|
||||
|
||||
def _build_vertices(self) -> List[Vertex]:
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
from langflow.graph.vertex.base import Vertex
|
||||
from langflow.graph.vertex import types
|
||||
from langflow.interface.agents.base import agent_creator
|
||||
from langflow.interface.chains.base import chain_creator
|
||||
|
|
@ -14,23 +13,46 @@ from langflow.interface.vector_store.base import vectorstore_creator
|
|||
from langflow.interface.wrappers.base import wrapper_creator
|
||||
from langflow.interface.output_parsers.base import output_parser_creator
|
||||
from langflow.interface.retrievers.base import retriever_creator
|
||||
|
||||
from typing import Dict, Type
|
||||
from langflow.interface.custom.base import custom_component_creator
|
||||
from langflow.utils.lazy_load import LazyLoadDictBase
|
||||
|
||||
|
||||
VERTEX_TYPE_MAP: Dict[str, Type[Vertex]] = {
|
||||
**{t: types.PromptVertex for t in prompt_creator.to_list()},
|
||||
**{t: types.AgentVertex for t in agent_creator.to_list()},
|
||||
**{t: types.ChainVertex for t in chain_creator.to_list()},
|
||||
**{t: types.ToolVertex for t in tool_creator.to_list()},
|
||||
**{t: types.ToolkitVertex for t in toolkits_creator.to_list()},
|
||||
**{t: types.WrapperVertex for t in wrapper_creator.to_list()},
|
||||
**{t: types.LLMVertex for t in llm_creator.to_list()},
|
||||
**{t: types.MemoryVertex for t in memory_creator.to_list()},
|
||||
**{t: types.EmbeddingVertex for t in embedding_creator.to_list()},
|
||||
**{t: types.VectorStoreVertex for t in vectorstore_creator.to_list()},
|
||||
**{t: types.DocumentLoaderVertex for t in documentloader_creator.to_list()},
|
||||
**{t: types.TextSplitterVertex for t in textsplitter_creator.to_list()},
|
||||
**{t: types.OutputParserVertex for t in output_parser_creator.to_list()},
|
||||
**{t: types.RetrieverVertex for t in retriever_creator.to_list()},
|
||||
}
|
||||
class VertexTypesDict(LazyLoadDictBase):
|
||||
def __init__(self):
|
||||
self._all_types_dict = None
|
||||
|
||||
@property
|
||||
def VERTEX_TYPE_MAP(self):
|
||||
return self.all_types_dict
|
||||
|
||||
def _build_dict(self):
|
||||
langchain_types_dict = self.get_type_dict()
|
||||
return {
|
||||
**langchain_types_dict,
|
||||
"Custom": ["Custom Tool", "Python Function"],
|
||||
}
|
||||
|
||||
def get_type_dict(self):
|
||||
return {
|
||||
**{t: types.PromptVertex for t in prompt_creator.to_list()},
|
||||
**{t: types.AgentVertex for t in agent_creator.to_list()},
|
||||
**{t: types.ChainVertex for t in chain_creator.to_list()},
|
||||
**{t: types.ToolVertex for t in tool_creator.to_list()},
|
||||
**{t: types.ToolkitVertex for t in toolkits_creator.to_list()},
|
||||
**{t: types.WrapperVertex for t in wrapper_creator.to_list()},
|
||||
**{t: types.LLMVertex for t in llm_creator.to_list()},
|
||||
**{t: types.MemoryVertex for t in memory_creator.to_list()},
|
||||
**{t: types.EmbeddingVertex for t in embedding_creator.to_list()},
|
||||
**{t: types.VectorStoreVertex for t in vectorstore_creator.to_list()},
|
||||
**{t: types.DocumentLoaderVertex for t in documentloader_creator.to_list()},
|
||||
**{t: types.TextSplitterVertex for t in textsplitter_creator.to_list()},
|
||||
**{t: types.OutputParserVertex for t in output_parser_creator.to_list()},
|
||||
**{
|
||||
t: types.CustomComponentVertex
|
||||
for t in custom_component_creator.to_list()
|
||||
},
|
||||
**{t: types.RetrieverVertex for t in retriever_creator.to_list()},
|
||||
}
|
||||
|
||||
|
||||
lazy_load_vertex_dict = VertexTypesDict()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import ast
|
||||
from langflow.interface.initialize import loading
|
||||
from langflow.interface.listing import ALL_TYPES_DICT
|
||||
from langflow.interface.listing import lazy_load_dict
|
||||
from langflow.utils.constants import DIRECT_TYPES
|
||||
from langflow.utils.logger import logger
|
||||
from langflow.utils.util import sync_to_async
|
||||
|
|
@ -61,7 +62,7 @@ class Vertex:
|
|||
)
|
||||
|
||||
if self.base_type is None:
|
||||
for base_type, value in ALL_TYPES_DICT.items():
|
||||
for base_type, value in lazy_load_dict.ALL_TYPES_DICT.items():
|
||||
if self.vertex_type in value:
|
||||
self.base_type = base_type
|
||||
break
|
||||
|
|
@ -100,7 +101,9 @@ class Vertex:
|
|||
params[param_key] = edge.source
|
||||
|
||||
for key, value in template_dict.items():
|
||||
if key == "_type" or not value.get("show"):
|
||||
# Skip _type and any value that has show == False and is not code
|
||||
# If we don't want to show code but we want to use it
|
||||
if key == "_type" or (not value.get("show") and key != "code"):
|
||||
continue
|
||||
# If the type is not transformable to a python base class
|
||||
# then we need to get the edge that connects to this node
|
||||
|
|
@ -112,7 +115,14 @@ class Vertex:
|
|||
|
||||
params[key] = file_path
|
||||
elif value.get("type") in DIRECT_TYPES and params.get(key) is None:
|
||||
params[key] = value.get("value")
|
||||
if value.get("type") == "code":
|
||||
try:
|
||||
params[key] = ast.literal_eval(value.get("value"))
|
||||
except Exception as exc:
|
||||
logger.debug(f"Error parsing code: {exc}")
|
||||
params[key] = value.get("value")
|
||||
else:
|
||||
params[key] = value.get("value")
|
||||
|
||||
if not value.get("required") and params.get(key) is None:
|
||||
if value.get("default"):
|
||||
|
|
@ -259,4 +269,8 @@ class Vertex:
|
|||
|
||||
def _built_object_repr(self):
|
||||
# Add a message with an emoji, stars for sucess,
|
||||
return "Built sucessfully ✨" if self._built_object else "Failed to build 😵💫"
|
||||
return (
|
||||
"Built sucessfully ✨"
|
||||
if self._built_object is not None
|
||||
else "Failed to build 😵💫"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -226,7 +226,11 @@ class PromptVertex(Vertex):
|
|||
# so the prompt format doesn't break
|
||||
artifacts.pop("handle_keys", None)
|
||||
try:
|
||||
template = self._built_object.format(**artifacts)
|
||||
template = self._built_object.template
|
||||
for key, value in artifacts.items():
|
||||
if value:
|
||||
replace_key = "{" + key + "}"
|
||||
template = template.replace(replace_key, value)
|
||||
return (
|
||||
template
|
||||
if isinstance(template, str)
|
||||
|
|
@ -239,3 +243,12 @@ class PromptVertex(Vertex):
|
|||
class OutputParserVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="output_parsers")
|
||||
|
||||
|
||||
class CustomComponentVertex(Vertex):
|
||||
def __init__(self, data: Dict):
|
||||
super().__init__(data, base_type="custom_components")
|
||||
|
||||
def _built_object_repr(self):
|
||||
if self.artifacts and "repr" in self.artifacts:
|
||||
return self.artifacts["repr"] or super()._built_object_repr()
|
||||
|
|
|
|||
|
|
@ -5,7 +5,8 @@ from langchain.agents import types
|
|||
from langflow.custom.customs import get_custom_nodes
|
||||
from langflow.interface.agents.custom import CUSTOM_AGENTS
|
||||
from langflow.interface.base import LangChainTypeCreator
|
||||
from langflow.settings import settings
|
||||
from langflow.services.utils import get_settings_manager
|
||||
|
||||
from langflow.template.frontend_node.agents import AgentFrontendNode
|
||||
from langflow.utils.logger import logger
|
||||
from langflow.utils.util import build_template_from_class, build_template_from_method
|
||||
|
|
@ -53,13 +54,17 @@ class AgentCreator(LangChainTypeCreator):
|
|||
# Now this is a generator
|
||||
def to_list(self) -> List[str]:
|
||||
names = []
|
||||
settings_manager = get_settings_manager()
|
||||
for _, agent in self.type_to_loader_dict.items():
|
||||
agent_name = (
|
||||
agent.function_name()
|
||||
if hasattr(agent, "function_name")
|
||||
else agent.__name__
|
||||
)
|
||||
if agent_name in settings.agents or settings.dev:
|
||||
if (
|
||||
agent_name in settings_manager.settings.AGENTS
|
||||
or settings_manager.settings.DEV
|
||||
):
|
||||
names.append(agent_name)
|
||||
return names
|
||||
|
||||
|
|
|
|||
|
|
@ -2,13 +2,14 @@ from abc import ABC, abstractmethod
|
|||
from typing import Any, Dict, List, Optional, Type, Union
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.agents import AgentExecutor
|
||||
from langflow.services.utils import get_settings_manager
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langflow.template.field.base import TemplateField
|
||||
from langflow.template.frontend_node.base import FrontendNode
|
||||
from langflow.template.template.base import Template
|
||||
from langflow.utils.logger import logger
|
||||
from langflow.settings import settings
|
||||
|
||||
|
||||
# Assuming necessary imports for Field, Template, and FrontendNode classes
|
||||
|
||||
|
|
@ -26,15 +27,18 @@ class LangChainTypeCreator(BaseModel, ABC):
|
|||
@property
|
||||
def docs_map(self) -> Dict[str, str]:
|
||||
"""A dict with the name of the component as key and the documentation link as value."""
|
||||
settings_manager = get_settings_manager()
|
||||
if self.name_docs_dict is None:
|
||||
try:
|
||||
type_settings = getattr(settings, self.type_name)
|
||||
type_settings = getattr(
|
||||
settings_manager.settings, self.type_name.upper()
|
||||
)
|
||||
self.name_docs_dict = {
|
||||
name: value_dict["documentation"]
|
||||
for name, value_dict in type_settings.items()
|
||||
}
|
||||
except AttributeError as exc:
|
||||
logger.error(exc)
|
||||
logger.error(f"Error getting settings for {self.type_name}: {exc}")
|
||||
|
||||
self.name_docs_dict = {}
|
||||
return self.name_docs_dict
|
||||
|
|
|
|||
|
|
@ -3,11 +3,13 @@ from typing import Any, Dict, List, Optional, Type
|
|||
from langflow.custom.customs import get_custom_nodes
|
||||
from langflow.interface.base import LangChainTypeCreator
|
||||
from langflow.interface.importing.utils import import_class
|
||||
from langflow.settings import settings
|
||||
from langflow.services.utils import get_settings_manager
|
||||
|
||||
from langflow.template.frontend_node.chains import ChainFrontendNode
|
||||
from langflow.utils.logger import logger
|
||||
from langflow.utils.util import build_template_from_class, build_template_from_method
|
||||
from langchain import chains
|
||||
from langchain_experimental.sql import SQLDatabaseChain # type: ignore
|
||||
|
||||
# Assuming necessary imports for Field, Template, and FrontendNode classes
|
||||
|
||||
|
|
@ -29,18 +31,22 @@ class ChainCreator(LangChainTypeCreator):
|
|||
@property
|
||||
def type_to_loader_dict(self) -> Dict:
|
||||
if self.type_dict is None:
|
||||
settings_manager = get_settings_manager()
|
||||
self.type_dict: dict[str, Any] = {
|
||||
chain_name: import_class(f"langchain.chains.{chain_name}")
|
||||
for chain_name in chains.__all__
|
||||
}
|
||||
from langflow.interface.chains.custom import CUSTOM_CHAINS
|
||||
|
||||
self.type_dict["SQLDatabaseChain"] = SQLDatabaseChain
|
||||
|
||||
self.type_dict.update(CUSTOM_CHAINS)
|
||||
# Filter according to settings.chains
|
||||
self.type_dict = {
|
||||
name: chain
|
||||
for name, chain in self.type_dict.items()
|
||||
if name in settings.chains or settings.dev
|
||||
if name in settings_manager.settings.CHAINS
|
||||
or settings_manager.settings.DEV
|
||||
}
|
||||
return self.type_dict
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue