Merge branch 'zustand/io/migration' into globalVariables

This commit is contained in:
Lucas Oliveira 2024-03-18 23:40:15 +01:00
commit 6c9a87bed7
529 changed files with 26323 additions and 10993 deletions

View file

@ -56,6 +56,13 @@ LANGFLOW_REMOVE_API_KEYS=
# LANGFLOW_REDIS_CACHE_EXPIRE (default: 3600)
LANGFLOW_CACHE_TYPE=
# Set AUTO_LOGIN to false if you want to disable auto login
# and use the login form to login. LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD
# must be set if AUTO_LOGIN is set to false
# Values: true, false
LANGFLOW_AUTO_LOGIN=
# Superuser username
# Example: LANGFLOW_SUPERUSER=admin
LANGFLOW_SUPERUSER=

View file

@ -1,6 +0,0 @@
#!/bin/sh
added_files=$(git diff --name-only --cached --diff-filter=d)
make format
git add ${added_files}

11
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,11 @@
# Set update schedule for GitHub Actions
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
# Check for updates to GitHub Actions every week
interval: "monthly"

View file

@ -16,10 +16,10 @@ jobs:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Cache Docker layers
uses: actions/cache@v2
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}

View file

@ -30,11 +30,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@ -48,7 +48,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
uses: github/codeql-action/autobuild@v3
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@ -61,6 +61,6 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View file

@ -12,8 +12,8 @@ jobs:
name: Deploy to GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 18
cache: npm

View file

@ -3,7 +3,15 @@ name: lint
on:
push:
branches: [main]
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
pull_request:
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
env:
POETRY_VERSION: "1.7.0"
@ -16,13 +24,14 @@ jobs:
python-version:
- "3.9"
- "3.10"
- "3.11"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Install poetry
run: |
pipx install poetry==$POETRY_VERSION
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: poetry

View file

@ -18,11 +18,11 @@ jobs:
if: ${{ (github.event.pull_request.merged == true) && contains(github.event.pull_request.labels.*.name, 'pre-release') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python 3.10
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: "3.10"
cache: "poetry"

View file

@ -17,11 +17,11 @@ jobs:
if: ${{ (github.event.pull_request.merged == true) && contains(github.event.pull_request.labels.*.name, 'Release') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python 3.10
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: "3.10"
cache: "poetry"

View file

@ -3,8 +3,16 @@ name: test
on:
push:
branches: [main]
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
pull_request:
branches: [dev]
paths:
- "poetry.lock"
- "pyproject.toml"
- "src/backend/**"
env:
POETRY_VERSION: "1.5.0"
@ -16,14 +24,15 @@ jobs:
matrix:
python-version:
- "3.10"
- "3.11"
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: "poetry"

1
.gitignore vendored
View file

@ -260,3 +260,4 @@ langflow.db
src/backend/langflow/frontend/
.docker
scratchpad*
chroma*/*

3
.vscode/launch.json vendored
View file

@ -17,6 +17,9 @@
],
"jinja": true,
"justMyCode": true,
"env": {
"LANGFLOW_LOG_LEVEL": "debug"
},
"envFile": "${workspaceFolder}/.env"
},
{

View file

@ -3,10 +3,6 @@
all: help
init:
@echo 'Installing pre-commit hooks'
git config core.hooksPath .githooks
@echo 'Making pre-commit hook executable'
chmod +x .githooks/pre-commit
@echo 'Installing backend dependencies'
make install_backend
@echo 'Installing frontend dependencies'

View file

@ -1,46 +1,27 @@
<!-- Title -->
<!-- markdownlint-disable MD030 -->
# ⛓️ Langflow
~ An effortless way to experiment and prototype [LangChain](https://github.com/hwchase17/langchain) pipelines ~
<h3>Discover a simpler & smarter way to build around Foundation Models</h3>
<p>
<img alt="GitHub Contributors" src="https://img.shields.io/github/contributors/logspace-ai/langflow" />
<img alt="GitHub Last Commit" src="https://img.shields.io/github/last-commit/logspace-ai/langflow" />
<img alt="" src="https://img.shields.io/github/repo-size/logspace-ai/langflow" />
<img alt="GitHub Issues" src="https://img.shields.io/github/issues/logspace-ai/langflow" />
<img alt="GitHub Pull Requests" src="https://img.shields.io/github/issues-pr/logspace-ai/langflow" />
<img alt="Github License" src="https://img.shields.io/github/license/logspace-ai/langflow" />
</p>
[![Release Notes](https://img.shields.io/github/release/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/releases)
[![Contributors](https://img.shields.io/github/contributors/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/contributors)
[![Last Commit](https://img.shields.io/github/last-commit/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/last-commit)
[![Open Issues](https://img.shields.io/github/issues-raw/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/issues)
[![LRepo-size](https://img.shields.io/github/repo-size/logspace-ai/langflow)](https://github.com/logspace-ai/langflow/repo-size)
[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/logspace-ai/langflow)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![GitHub star chart](https://img.shields.io/github/stars/logspace-ai/langflow?style=social)](https://star-history.com/#logspace-ai/langflow)
[![GitHub fork](https://img.shields.io/github/forks/logspace-ai/langflow?style=social)](https://github.com/logspace-ai/langflow/fork)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langflow_ai.svg?style=social&label=Follow%20%40langflow_ai)](https://twitter.com/langflow_ai)
[![](https://dcbadge.vercel.app/api/server/EqksyE2EX9?compact=true&style=flat)](https://discord.com/invite/EqksyE2EX9)
[![HuggingFace Spaces](https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg)](https://huggingface.co/spaces/Logspace/Langflow)
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/logspace-ai/langflow)
<p>
<a href="https://discord.gg/EqksyE2EX9"><img alt="Discord Server" src="https://dcbadge.vercel.app/api/server/EqksyE2EX9?compact=true&style=flat"/></a>
<a href="https://huggingface.co/spaces/Logspace/Langflow"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
</p>
The easiest way to create and customize your flow
<a href="https://github.com/logspace-ai/langflow">
<img width="100%" src="https://github.com/logspace-ai/langflow/blob/dev/img/langflow-demo.gif?raw=true"></a>
<p>
</p>
# Table of Contents
- [⛓️ Langflow](#-langflow)
- [Table of Contents](#table-of-contents)
- [📦 Installation](#-installation)
- [Locally](#locally)
- [HuggingFace Spaces](#huggingface-spaces)
- [🖥️ Command Line Interface (CLI)](#-command-line-interface-cli)
- [Usage](#usage)
- [Environment Variables](#environment-variables)
- [Deployment](#deployment)
- [Deploy Langflow on Google Cloud Platform](#deploy-langflow-on-google-cloud-platform)
- [Deploy on Railway](#deploy-on-railway)
- [Deploy on Render](#deploy-on-render)
- [🎨 Creating Flows](#-creating-flows)
- [👋 Contributing](#-contributing)
- [📄 License](#-license)
<img width="100%" src="https://github.com/logspace-ai/langflow/blob/dev/docs/static/img/new_langflow_demo.gif"></a>
# 📦 Installation
@ -65,7 +46,7 @@ This will install the following dependencies:
- [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
- [sentence-transformers](https://github.com/UKPLab/sentence-transformers)
You can still use models from projects like LocalAI
You can still use models from projects like LocalAI, Ollama, LM Studio, Jan and others.
Next, run:
@ -117,7 +98,7 @@ Each option is detailed below:
- `--backend-only`: This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable.
- `--store`: This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable.
These parameters are important for users who need to customize the behavior of Langflow, especially in development or specialized deployment scenarios. You may want to update the documentation to include these parameters for completeness and clarity.
These parameters are important for users who need to customize the behavior of Langflow, especially in development or specialized deployment scenarios.
### Environment Variables
@ -147,19 +128,19 @@ Alternatively, click the **"Open in Cloud Shell"** button below to launch Google
# 🎨 Creating Flows
Creating flows with Langflow is easy. Simply drag sidebar components onto the canvas and connect them together to create your pipeline. Langflow provides a range of [LangChain components](https://python.langchain.com/docs/integrations/components) to choose from, including LLMs, prompt serializers, agents, and chains.
Creating flows with Langflow is easy. Simply drag components from the sidebar onto the canvas and connect them to start building your application.
Explore by editing prompt parameters, link chains and agents, track an agent's thought process, and export your flow.
Explore by editing prompt parameters, grouping components into a single high-level component, and building your own Custom Components.
Once you're done, you can export your flow as a JSON file to use with LangChain.
To do so, click the "Export" button in the top right corner of the canvas, then
in Python, you can load the flow with:
Once youre done, you can export your flow as a JSON file.
Load the flow with:
```python
from langflow import load_flow_from_json
flow = load_flow_from_json("path/to/flow.json")
# Now you can use it like any chain
# Now you can use it
flow("Hey, have you heard of Langflow?")
```
@ -167,15 +148,16 @@ flow("Hey, have you heard of Langflow?")
We welcome contributions from developers of all levels to our open-source project on GitHub. If you'd like to contribute, please check our [contributing guidelines](./CONTRIBUTING.md) and help make Langflow more accessible.
Join our [Discord](https://discord.com/invite/EqksyE2EX9) server to ask questions, make suggestions, and showcase your projects! 🦾
---
Join our [Discord](https://discord.com/invite/EqksyE2EX9) server to ask questions, make suggestions and showcase your projects! 🦾
<p>
</p>
[![Star History Chart](https://api.star-history.com/svg?repos=logspace-ai/langflow&type=Timeline)](https://star-history.com/#logspace-ai/langflow&Date)
# 🌟 Contributors
[![langflow contributors](https://contrib.rocks/image?repo=logspace-ai/langflow)](https://github.com/logspace-ai/langflow/graphs/contributors)
# 📄 License
Langflow is released under the MIT License. See the LICENSE file for details.

View file

@ -22,6 +22,8 @@ services:
dockerfile: ./cdk.Dockerfile
args:
- BACKEND_URL=http://backend:7860
depends_on:
- backend
environment:
- VITE_PROXY_TARGET=http://backend:7860
ports:

View file

@ -81,7 +81,17 @@ The CustomComponent class serves as the foundation for creating custom component
| _`required: bool`_ | Makes the field required. |
| _`info: str`_ | Adds a tooltip to the field. |
| _`file_types: List[str]`_ | This is a requirement if the _`field_type`_ is _file_. Defines which file types will be accepted. For example, _json_, _yaml_ or _yml_. |
| _`range_spec: langflow.field_typing.RangeSpec`_ | This is a requirement if the _`field_type`_ is _`float`_. Defines the range of values accepted and the step size. If none is defined, the default is _`[-1, 1, 0.1]`_. |
| _`range_spec: langflow.field_typing.RangeSpec`_ | This is a requirement if the _`field_type`_ is _`float`_. Defines the range of values accepted and the step size. If none is defined, the default is _`[-1, 1, 0.1]`_. |
| _`title_case: bool`_ | Formats the name of the field when _`display_name`_ is not defined. Set it to False to keep the name as you set it in the _`build`_ method. |
| _`refresh_button: bool`_ | If set to True a button will appear to the right of the field, and when clicked, it will call the _`update_build_config`_ method which takes in the _`build_config`_, the name of the field (_`field_name`_) and the latest value of the field (_`field_value`_). This is useful when you want to update the _`build_config`_ based on the value of the field. |
| _`real_time_refresh: bool`_ | If set to True, the _`update_build_config`_ method will be called every time the field value changes. |
<Admonition type="info" label="Tip">
By using the _`update_build_config`_ method, you can update the _`build_config`_ in whatever way you want based on the value of the field or not.
</Admonition>
- The CustomComponent class also provides helpful methods for specific tasks (e.g., to load and use other flows from the Langflow platform):
| Method Name | Description |
@ -94,7 +104,9 @@ The CustomComponent class serves as the foundation for creating custom component
| Attribute Name | Description |
| -------------- | ----------------------------------------------------------------------------- |
| _`repr_value`_ | Displays the value it receives in the _`build`_ method. Useful for debugging. |
| _`status`_ | Displays the value it receives in the _`build`_ method. Useful for debugging. |
| _`field_order`_ | Defines the order the fields will be displayed in the canvas. |
| _`icon`_ | Defines the emoji (for example, _`:rocket:`_) that will be displayed in the canvas. |
<Admonition type="info" label="Tip">

View file

@ -98,9 +98,9 @@ Used to load [OpenAIs](https://openai.com/) embedding models.
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings).
:::info
<Admonition type="info">
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
:::
</Admonition>
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
- **location:** The default location to use when making API calls defaults to `us-central1`.

View file

View file

@ -0,0 +1,39 @@
import Admonition from '@theme/Admonition';
# Inputs
### ChatInput
This component is designed to get user input from the chat.
**Params**
- **Sender Type:** specifies the sender type. Defaults to _`"User"`_. Options are _`"Machine"`_ and _`"User"`_.
- **Sender Name:** specifies the name of the sender. Defaults to _`"User"`_.
- **Message:** specifies the message text. It is a multiline text input.
- **Session ID:** specifies the session ID of the chat history. If provided, the message will be saved in the Message History.
<Admonition type="note" title="Note">
<p>
If _`As Record`_ is _`true`_ and the _`Message`_ is a _`Record`_, the data of the _`Record`_ will be updated with the _`Sender`_, _`Sender Name`_, and _`Session ID`_.
</p>
</Admonition>
### TextInput
This component is designed for simple text input, allowing users to pass textual data to subsequent components in the workflow. It's particularly useful for scenarios where a brief user input is required to initiate or influence the flow.
**Params**
- **Value:** Specifies the text input value. This is where the user can input the text data that will be passed to the next component in the sequence. If no value is provided, it defaults to an empty string.
<Admonition type="note" title="Note">
<p>
The `TextInput` component serves as a straightforward means for setting Text input values in the chat window. It ensures that textual data can be seamlessly passed to subsequent components in the flow.
</p>
</Admonition>

View file

@ -12,6 +12,26 @@ Memory is a concept in chat-based applications that allows the system to remembe
---
### MessageHistory
This component is designed to retrieve stored messages based on various filters such as sender type, sender name, session ID, and a specific file path where messages are stored. It allows for a flexible retrieval of chat history, providing insights into past interactions.
**Params**
- **Sender Type:** (Optional) Specifies the type of the sender. Options are _`"Machine"`_, _`"User"`_, or _`"Machine and User"`_. Filters the messages by the type of the sender.
- **Sender Name:** (Optional) Specifies the name of the sender. Filters the messages by the name of the sender.
- **Session ID:** (Optional) Specifies the session ID of the chat history. Filters the messages belonging to a specific session.
- **Number of Messages:** Specifies the number of messages to retrieve. Defaults to _`5`_. Determines how many recent messages from the chat history to fetch.
<Admonition type="note" title="Note">
<p>
The component retrieves messages based on the provided criteria, including the specific file path for stored messages. If no specific criteria are provided, it will return the most recent messages up to the specified limit. This component can be used to review past interactions and analyze the flow of conversations.
</p>
</Admonition>
### ConversationBufferMemory
The `ConversationBufferMemory` component is a type of memory system that plainly stores the last few inputs and outputs of a conversation.
@ -27,7 +47,7 @@ The `ConversationBufferMemory` component is a type of memory system that plainly
### ConversationBufferWindowMemory
`ConversationBufferWindowMemory` is a variation of the `ConversationBufferMemory` that maintains a list of the recent interactions in a conversation. It only keeps the last K interactions in memory, which can be useful for maintaining a sliding window of the most recent interactions without letting the buffer get too large.
`ConversationBufferWindowMemory` is a variation of the `ConversationBufferMemory` that maintains a list of the recent interactions in a conversation. It only keeps the last K interactions in memory, which can be useful for maintaining a sliding window of the most recent interactions without letting the buffer get too large.
**Params**
@ -72,7 +92,7 @@ The `ConversationEntityMemory` component incorporates intricate memory structure
### ConversationSummaryMemory
The `ConversationSummaryMemory` is a memory component that creates a summary of the conversation over time. It condenses information from the conversation and stores the current summary in memory. It is particularly useful for longer conversations where keeping the entire message history in the prompt would take up too many tokens.
The `ConversationSummaryMemory` is a memory component that creates a summary of the conversation over time. It condenses information from the conversation and stores the current summary in memory. It is particularly useful for longer conversations where keeping the entire message history in the prompt would take up too many tokens.
**Params**

View file

@ -40,9 +40,10 @@ Wrapper around Anthropic's large language model used for chat-based interactions
The `CTransformers` component provides access to the Transformer models implemented in C/C++ using the [GGML](https://github.com/ggerganov/ggml) library.
:::info
<Admonition type="info">
Make sure to have the `ctransformers` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/marella/ctransformers).
:::
</Admonition>
**config:** Configuration for the Transformer models. Check out [config](https://github.com/marella/ctransformers#config). Defaults to:
@ -115,9 +116,9 @@ Wrapper around [Cohere's](https://cohere.com) large language models.
Wrapper around [HuggingFace](https://www.huggingface.co/models) models.
:::info
<Admonition type="info">
The HuggingFace Hub is an online platform that hosts over 120k models, 20k datasets, and 50k demo apps, all of which are open-source and publicly available. Discover more at [HuggingFace](http://www.huggingface.co).
:::
</Admonition>
- **huggingfacehub_api_token:** Token needed to authenticate the API.
- **model_kwargs:** Keyword arguments to pass to the model.
@ -130,9 +131,9 @@ The HuggingFace Hub is an online platform that hosts over 120k models, 20k datas
The `LlamaCpp` component provides access to the `llama.cpp` models.
:::info
<Admonition type="info">
Make sure to have the `llama.cpp` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/ggerganov/llama.cpp).
:::
</Admonition>
- **echo:** Whether to echo the prompt defaults to `False`.
- **f16_kv:** Use half-precision for key/value cache defaults to `True`.
@ -181,9 +182,9 @@ Wrapper around [OpenAI's](https://openai.com) large language models.
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
:::info
<Admonition type="info">
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
:::
</Admonition>
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
- **location:** The default location to use when making API calls defaults to `us-central1`.
@ -203,9 +204,9 @@ Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP).
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
:::info
<Admonition type="info">
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
:::
</Admonition>
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
- **location:** The default location to use when making API calls defaults to `us-central1`.

View file

View file

@ -0,0 +1,37 @@
import Admonition from '@theme/Admonition';
# Outputs
### ChatOutput
This component is designed to send a message to the chat.
**Params**
- **Sender Type:** specifies the sender type. Defaults to _`"Machine"`_. Options are _`"Machine"`_ and _`"User"`_.
- **Sender Name:** specifies the name of the sender. Defaults to _`"AI"`_.
- **Session ID:** specifies the session ID of the chat history. If provided, the message will be saved in the Message History.
- **Message:** specifies the message text.
<Admonition type="note" title="Note">
<p>
If _`As Record`_ is _`true`_ and the _`Message`_ is a _`Record`_, the data of the _`Record`_ will be updated with the _`Sender`_, _`Sender Name`_, and _`Session ID`_.
</p>
</Admonition>
### TextOutput
This component is designed to display text data to the user. It's particularly useful for scenarios where you don't want to send the text data to the chat, but still want to display it.
**Params**
- **Value:** Specifies the text data to be displayed. This is where the text data to be displayed is provided. If no value is provided, it defaults to an empty string.
<Admonition type="note" title="Note">
<p>
The `TextOutput` component serves as a straightforward means for displaying text data. It ensures that textual data can be seamlessly observed in the chat window throughout your flow.
</p>
</Admonition>

View file

@ -21,7 +21,7 @@ The `PromptTemplate` component allows users to create prompts and define variabl
<Admonition type="info">
Once a variable is defined in the prompt template, it becomes a component
input of its own. Check out [Prompt
Customization](../guidelines/prompt-customization.mdx) to learn more.
Customization](../docs/guidelines/prompt-customization.mdx) to learn more.
</Admonition>
- **template:** Template used to format an individual request.

View file

@ -9,6 +9,21 @@ import Admonition from '@theme/Admonition';
</Admonition>
### SearchApi
Real-time search engine results API. Returns structured JSON data that includes answer box, knowledge graph, organic results, and more.
**Parameters**
- **Api Key:** A unique identifier for the SearchApi, necessary for authenticating requests to real-time search engines. This key can be retrieved from the [SearchApi dashboard](https://www.searchapi.io/).
- **Engine:** Specifies the search engine. For instance: google, google_scholar, bing, youtube, and youtube_transcripts. A full list of supported engines is available in the [documentation](https://www.searchapi.io/docs/google).
- **Parameters:** Allows the selection of any parameters recognized by SearchApi, with some being required and others optional.
**Output**
- **Document:** The JSON response from the request as a Document.
### BingSearchRun
Bing Search is a web search engine owned and operated by Microsoft. It provides search results for various types of content, including web pages, images, videos, and news articles. It uses a combination of algorithms and human editors to deliver search results to users.
@ -60,4 +75,4 @@ Tool for getting metadata about a SQL database. The input to this tool is a comm
**Params**
- **Db:** SQLDatabase to query.
- **Db:** SQLDatabase to query.

View file

@ -74,3 +74,23 @@ Build a Document containing a JSON object using a key and another Document page
**Output**
- **List of Documents:** A list containing the Document with the JSON object.
## Unique ID Generator
Generates a unique identifier (UUID) for each instance it is invoked, providing a distinct and reliable identifier suitable for a variety of applications.
**Params**
- **Value:** This field displays the generated unique identifier (UUID). The UUID is generated dynamically for each instance of the component, ensuring uniqueness across different uses.
**Output**
- Returns a unique identifier (UUID) as a string. This UUID is generated using Python's `uuid` module, ensuring that each identifier is unique and can be used as a reliable reference in your application.
<Admonition type="note" title="Note">
<p>
The Unique ID Generator is crucial for scenarios requiring distinct identifiers, such as session management, transaction tracking, or any context where different instances or entities must be uniquely identified. The generated UUID is provided as a hexadecimal string, offering a high level of uniqueness and security for identification purposes.
</p>
</Admonition>
For additional information and examples, please consult the [Langflow Components Custom Documentation](http://docs.langflow.org/components/custom).

View file

@ -12,7 +12,7 @@
## 🐦 Stay tunned for **Langflow** on Twitter
Follow [@logspace_ai](https://twitter.com/logspace_ai) on **Twitter** to get the latest news about **Langflow**.
Follow [@langflow_ai](https://twitter.com/langflow_ai) on **Twitter** to get the latest news about **Langflow**.
---
## ⭐️ Star **Langflow** on GitHub

View file

@ -4,9 +4,8 @@
This guide will help you set up a Langflow development VM in a Google Cloud Platform project using Google Cloud Shell.
:::note
When Cloud Shell opens, be sure to select **Trust repo**. Some `gcloud` commands might not run in an ephemeral Cloud Shell environment.
:::
> Note: When Cloud Shell opens, be sure to select **Trust repo**. Some `gcloud` commands might not run in an ephemeral Cloud Shell environment.
## Standard VM

View file

@ -14,6 +14,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
alt="Docusaurus themed image"
sources={{
light: "img/buffer-memory.png",
dark: "img/buffer-memory.png",
}}
/>

View file

@ -20,6 +20,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
alt="Docusaurus themed image"
sources={{
light: "img/basic-chat.png",
dark: "img/basic-chat.png",
}}
/>

View file

@ -32,6 +32,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
alt="Docusaurus themed image"
sources={{
light: "img/csv-loader.png",
dark: "img/csv-loader.png",
}}
/>
@ -39,12 +40,12 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`CSVLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/csv)
- [`CSVLoader`](https://python.langchain.com/docs/integrations/document_loaders/csv)
- [`CharacterTextSplitter`](https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter)
- [`OpenAIEmbedding`](https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/openai)
- [`Chroma`](https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/chroma)
- [`OpenAIEmbedding`](https://python.langchain.com/docs/integrations/text_embedding/openai)
- [`Chroma`](https://python.langchain.com/docs/integrations/vectorstores/chroma)
- [`VectorStoreInfo`](https://python.langchain.com/docs/modules/data_connection/vectorstores/)
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
- [`VectorStoreAgent`](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore)
- [`VectorStoreAgent`](https://js.langchain.com/docs/modules/agents/tools/how_to/agents_with_vectorstores)
</Admonition>

View file

@ -14,6 +14,7 @@ The CustomComponent class allows us to create components that interact with Lang
alt="Document Processor Component"
sources={{
light: "img/flow_runner.png",
dark: "img/flow_runner.png",
}}
style={{
width: "30%",
@ -339,6 +340,7 @@ Done! This is what our script and custom component looks like:
alt="Document Processor Code"
sources={{
light: "img/flow_runner_code.png",
dark: "img/flow_runner_code.png",
}}
style={{
maxWidth: "100%",
@ -353,6 +355,7 @@ Done! This is what our script and custom component looks like:
alt="Document Processor Component"
sources={{
light: "img/flow_runner.png",
dark: "img/flow_runner.png",
}}
style={{
width: "40%",

View file

@ -12,6 +12,7 @@ Langflow Examples is a repository on [GitHub](https://github.com/logspace-ai/lan
alt="Docusaurus themed image"
sources={{
light: "img/community-examples.png",
dark: "img/community-examples.png",
}}
style={{ width: "100%" }}
/>

View file

@ -32,6 +32,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
alt="Docusaurus themed image"
sources={{
light: "img/midjourney-prompt-chain.png",
dark: "img/midjourney-prompt-chain.png",
}}
/>
@ -40,6 +41,6 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
- [`ConversationSummaryMemory`](https://python.langchain.com/docs/modules/memory/how_to/summary)
- [`ConversationSummaryMemory`](https://python.langchain.com/docs/modules/memory/types/summary)
</Admonition>

View file

@ -24,7 +24,7 @@ https://pt.wikipedia.org/wiki/Harry_Potter
<Admonition type="info">
Learn more about Multiple Vector Stores
[here](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore?highlight=Multiple%20Vector%20Stores#multiple-vectorstores).
[here](https://python.langchain.com/docs/modules/data_connection/vectorstores/).
</Admonition>
## ⛓️ Langflow Example
@ -37,6 +37,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
alt="Docusaurus themed image"
sources={{
light: "img/multiple-vectorstores.png",
dark: "img/multiple-vectorstores.png",
}}
/>
@ -44,14 +45,14 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`WebBaseLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/web_base)
- [`TextLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/unstructured_file)
- [`WebBaseLoader`](https://python.langchain.com/docs/integrations/document_loaders/web_base)
- [`TextLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/)
- [`CharacterTextSplitter`](https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter)
- [`OpenAIEmbedding`](https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/openai)
- [`Chroma`](https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/chroma)
- [`OpenAIEmbedding`](https://python.langchain.com/docs/integrations/text_embedding/openai)
- [`Chroma`](https://python.langchain.com/docs/integrations/vectorstores/chroma)
- [`VectorStoreInfo`](https://python.langchain.com/docs/modules/data_connection/vectorstores/)
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
- [`VectorStoreRouterToolkit`](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore)
- [`VectorStoreRouterAgent`](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore)
- [`VectorStoreRouterToolkit`](https://js.langchain.com/docs/modules/agents/tools/how_to/agents_with_vectorstores)
- [`VectorStoreRouterAgent`](https://js.langchain.com/docs/modules/agents/tools/how_to/agents_with_vectorstores)
</Admonition>

View file

@ -28,7 +28,7 @@ The `AgentInitializer` component is a quick way to construct an agent from the m
<Admonition type="info">
The `PythonFunction` is a custom component that uses the LangChain 🦜🔗 tool
decorator. Learn more about it
[here](https://python.langchain.com/docs/modules/agents/tools/how_to/custom_tools).
[here](https://python.langchain.com/docs/modules/agents/tools/custom_tools).
</Admonition>
## ⛓️ Langflow Example
@ -41,6 +41,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
alt="Docusaurus themed image"
sources={{
light: "img/python-function.png",
dark: "img/python-function.png",
}}
/>
@ -48,7 +49,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`PythonFunctionTool`](https://python.langchain.com/docs/modules/agents/tools/how_to/custom_tools)
- [`PythonFunctionTool`](https://python.langchain.com/docs/modules/agents/tools/custom_tools)
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)
- [`AgentInitializer`](https://python.langchain.com/docs/modules/agents/)

View file

@ -0,0 +1,52 @@
import Admonition from "@theme/Admonition";
# SearchApi Tool
The [SearchApi](https://www.searchapi.io/) allows developers to retrieve results from search engines such as Google, Google Scholar, YouTube, YouTube transcripts, and more, and can be used as in Langflow through the `SearchApi` tool.
<Admonition type="info">
To use the SearchApi, you must first obtain an API key by registering at [SearchApi's website](https://www.searchapi.io/).
</Admonition>
In the given example, we specify `engine` as `youtube_transcripts` and provide a `video_id`.
<Admonition type="info">
All engines and parameters can be found in [SearchApi documentation](https://www.searchapi.io/docs/google).
</Admonition>
The `RetrievalQA` chain processes a `Document` along with a user's question to return an answer.
<Admonition type="tip">
In this example, we used [`ChatOpenAI`](https://platform.openai.com/) as the
LLM, but feel free to experiment with other Language Models!
</Admonition>
The `RetrievalQA` takes `CombineDocsChain` and `SearchApi` tool as inputs, using the tool as a `Document` to answer questions.
<Admonition type="info">
Learn more about the SearchApi
[here](https://python.langchain.com/docs/integrations/tools/searchapi).
</Admonition>
## ⛓️ Langflow Example
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/searchapi-tool.png",
}}
/>
#### <a target="\_blank" href="json_files/SearchApi_Tool.json" download>Download Flow</a>
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
- [`SearchApiAPIWrapper`](https://python.langchain.com/docs/integrations/providers/searchapi#wrappers)
- [`ZeroShotAgent`](https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent)
</Admonition>

View file

@ -22,7 +22,7 @@ The `ZeroShotAgent` takes the `LLMChain` and the `Search` tool as inputs, using
<Admonition type="info">
Learn more about the Serp API
[here](https://python.langchain.com/docs/modules/agents/tools/integrations/serpapi).
[here](https://python.langchain.com/docs/integrations/providers/serpapi ).
</Admonition>
## ⛓️ Langflow Example
@ -35,6 +35,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
alt="Docusaurus themed image"
sources={{
light: "img/serp-api-tool.png",
dark: "img/serp-api-tool.png",
}}
/>
@ -45,7 +46,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
- [`ZeroShotPrompt`](https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/)
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
- [`LLMChain`](https://python.langchain.com/docs/modules/chains/foundational/llm_chain)
- [`Search`](https://python.langchain.com/docs/modules/agents/tools/integrations/serpapi)
- [`Search`](https://python.langchain.com/docs/integrations/providers/serpapi)
- [`ZeroShotAgent`](https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent)
</Admonition>

View file

@ -7,18 +7,20 @@ import ReactPlayer from "react-player";
## Compose
Creating flows with Langflow is easy. Drag sidebar components onto the canvas and connect them together to create your pipeline. Langflow provides a range of [LangChain components](https://python.langchain.com/docs/modules/) to choose from, including LLMs, prompt serializers, agents, and chains.
Creating flows with Langflow is easy. Drag sidebar components onto the canvas and connect them together to create your pipeline.
Langflow provides a range of Components to choose from, including **Chat Input**, **Chat Output**, **API Request** and **Prompt**.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/langflow_canvas.png",
dark: "img/langflow_canvas.png"
}}
/>
## Fork
## Starter Flows
The easiest way to start with Langflow is by forking a **community example**. Forking an example stores a copy in your project collection, allowing you to edit and save the modified version as a new flow.
Langflow provides a range of starter flows to help you get started. These flows are pre-built and can be used as a starting point for your own flows.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
@ -26,9 +28,21 @@ The easiest way to start with Langflow is by forking a **community example**. Fo
<ReactPlayer playing controls url="/videos/langflow_fork.mp4" />
</div>
## Build
## Defining Inputs and Outputs
Each flow can have multiple inputs and outputs. These can be defined by placing **Inputs** and **Outputs** components on the canvas.
The **Inputs** components define the inputs to the flow.
Whenever you place an Input component on the canvas, it will allow you to interactively define change its value
from the Interactive Panel.
The **Text Input** component allows you to define a text input, and the **Chat Input** component allows you to use the chat input from the Interactive Panel.
The **Outputs** components define the outputs of the flow and work similarly to the Inputs components.
Both Inputs and Outputs components can be connected to other components on the canvas and are used to define how the API works too.
Building a flow means validating if the components have prerequisites fulfilled and are properly instantiated. When a chat message is sent, the flow will run for the first time, executing the pipeline.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}

View file

@ -12,6 +12,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
alt="Docusaurus themed image"
sources={{
light: "img/hugging-face.png",
dark: "img/hugging-face.png",
}}
style={{ width: "100%" }}
/>

View file

@ -17,6 +17,7 @@ Langflow offers an API Key functionality that allows users to access their indiv
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/api-key.png"),
dark: useBaseUrl("img/api-key.png"),
}}
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
/>

View file

@ -13,6 +13,7 @@ Langflows chat interface provides a user-friendly experience and functionalit
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/chat_interface.png"),
dark: useBaseUrl("img/chat_interface.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
@ -25,6 +26,7 @@ Notice that editing variables in the chat interface take place temporarily and w
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/chat_interface2.png"),
dark: useBaseUrl("img/chat_interface2.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
@ -36,6 +38,7 @@ To view the complete prompt in its original, structured format, click the "Displ
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/chat_interface3.png"),
dark: useBaseUrl("img/chat_interface3.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
@ -47,6 +50,7 @@ In the chat interface, you can redefine which variable should be interpreted as
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/chat_interface4.png"),
dark: useBaseUrl("img/chat_interface4.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>

View file

@ -38,6 +38,7 @@ import Admonition from "@theme/Admonition";
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/widget-sidebar.png"),
dark: useBaseUrl("img/widget-sidebar.png"),
}}
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
/>
@ -53,6 +54,7 @@ import Admonition from "@theme/Admonition";
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/widget-code.png"),
dark: useBaseUrl("img/widget-code.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>

View file

@ -30,6 +30,7 @@ Components are the building blocks of the flows. They are made of inputs, output
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/single-compenent.png"),
dark: useBaseUrl("img/single-compenent.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>

View file

@ -63,6 +63,7 @@ class DocumentProcessor(CustomComponent):
alt="Document Processor Component"
sources={{
light: "img/document_processor.png",
dark: "img/document_processor.png",
}}
style={{
margin: "0 auto",
@ -330,6 +331,7 @@ All done! This is what our script and brand-new custom component look like:
alt="Document Processor Code"
sources={{
light: "img/document_processor_code.png",
dark: "img/document_processor_code.png",
}}
style={{
maxWidth: "100%",
@ -344,6 +346,7 @@ All done! This is what our script and brand-new custom component look like:
alt="Document Processor Component"
sources={{
light: "img/document_processor.png",
dark: "img/document_processor.png",
}}
style={{
width: "40%",

View file

@ -18,6 +18,7 @@ import Admonition from "@theme/Admonition";
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/features.png"),
dark: useBaseUrl("img/features.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>

View file

@ -86,6 +86,7 @@ With _`LANGFLOW_AUTO_LOGIN`_ set to _`False`_, Langflow requires users to sign u
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/sign-up.png"),
dark: useBaseUrl("img/sign-up.png"),
}}
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
/>
@ -102,6 +103,7 @@ Users can change their profile settings by clicking on the profile icon in the t
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/my-account.png"),
dark: useBaseUrl("img/my-account.png"),
}}
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
/>
@ -112,6 +114,7 @@ By clicking on **Profile Settings**, the user is taken to the profile settings p
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/profile-settings.png"),
dark: useBaseUrl("img/profile-settings.png"),
}}
style={{ maxWidth: "600px", margin: "0 auto" }}
/>
@ -122,6 +125,7 @@ By clicking on **Admin Page**, the superuser is taken to the admin page, where t
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/admin-page.png"),
dark: useBaseUrl("img/admin-page.png"),
}}
style={{ maxWidth: "600px", margin: "0 auto" }}

View file

@ -13,6 +13,7 @@ The prompt template allows users to create prompts and define variables that pro
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/prompt_customization.png"),
dark: useBaseUrl("img/prompt_customization.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
@ -25,6 +26,7 @@ Variables can be used to define instructions, questions, context, inputs, or exa
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/prompt_customization2.png"),
dark: useBaseUrl("img/prompt_customization2.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
@ -37,6 +39,7 @@ Once inserted, these variables are immediately recognized as new fields in the p
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/prompt_customization3.png"),
dark: useBaseUrl("img/prompt_customization3.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
@ -49,6 +52,7 @@ You can also use documents or output parsers as prompt variables. By plugging th
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/prompt_customization4.png"),
dark: useBaseUrl("img/prompt_customization4.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
@ -63,6 +67,7 @@ If working with an interactive (chat-like) flow, remember to keep one of the inp
alt="Docusaurus themed image"
sources={{
light: useBaseUrl("img/prompt_customization5.png"),
dark: useBaseUrl("img/prompt_customization5.png"),
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>

View file

@ -1,6 +1,6 @@
# 👋 Welcome to Langflow
Langflow is an easy way to prototype [LangChain](https://github.com/hwchase17/langchain) flows. The drag-and-drop feature allows quick and effortless experimentation, while the built-in chat interface facilitates real-time interaction. It provides options to edit prompt parameters, create chains and agents, track thought processes, and export flows.
Langflow is an easy way to create flows. The drag-and-drop feature allows quick and effortless experimentation, while the built-in chat interface facilitates real-time interaction. It provides options to edit prompt parameters, create chains and agents, track thought processes, and export flows.
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
@ -11,7 +11,8 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/new_langflow.gif",
light: "img/new_langflow_demo.gif",
dark: "img/new_langflow_demo.gif",
}}
style={{ width: "100%" }}
/>

View file

@ -90,7 +90,7 @@ module.exports = {
},
{
position: "right",
href: "https://twitter.com/logspace_ai",
href: "https://twitter.com/langflow_ai",
position: "right",
className: "header-twitter-link",
target: "_blank",

View file

@ -33,11 +33,18 @@ module.exports = {
label: "Component Reference",
collapsed: false,
items: [
"components/inputs",
"components/outputs",
"components/data",
"components/prompts",
"components/models",
"components/helpers",
"components/experimental",
"components/agents",
"components/chains",
"components/custom",
"components/embeddings",
"components/llms",
"components/model_specs",
"components/loaders",
"components/memories",
"components/prompts",
@ -81,6 +88,7 @@ module.exports = {
"examples/buffer-memory",
"examples/midjourney-prompt-chain",
"examples/csv-loader",
"examples/searchapi-tool",
"examples/serp-api-tool",
"examples/multiple-vectorstores",
"examples/python-function",

BIN
docs/static/img/new_langflow_demo.gif vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 MiB

BIN
docs/static/img/searchapi-tool.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 654 KiB

File diff suppressed because one or more lines are too long

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 550 KiB

5959
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.6.5a13"
version = "0.7.0a0"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -25,59 +25,57 @@ documentation = "https://docs.langflow.org"
langflow = "langflow.__main__:main"
[tool.poetry.dependencies]
python = ">=3.9,<3.11"
fastapi = "^0.108.0"
uvicorn = "^0.25.0"
python = ">=3.9,<3.12"
duckdb = "^0.9.2"
fastapi = "^0.109.0"
uvicorn = "^0.27.0"
beautifulsoup4 = "^4.12.2"
google-search-results = "^2.4.1"
google-api-python-client = "^2.79.0"
google-api-python-client = "^2.118.0"
typer = "^0.9.0"
gunicorn = "^21.2.0"
langchain = "~0.1.0"
openai = "^1.10.0"
pandas = "2.0.3"
chromadb = "^0.4.0"
huggingface-hub = { version = "^0.19.0", extras = ["inference"] }
openai = "^1.12.0"
pandas = "2.2.0"
chromadb = "^0.4.23"
huggingface-hub = { version = "^0.20.0", extras = ["inference"] }
rich = "^13.7.0"
llama-cpp-python = { version = "~0.2.0", optional = true }
networkx = "^3.1"
unstructured = "^0.11.0"
pypdf = "^3.17.0"
lxml = "^4.9.2"
pypdf = "^4.0.0"
pysrt = "^1.1.2"
fake-useragent = "^1.4.0"
docstring-parser = "^0.15"
psycopg2-binary = "^2.9.6"
pyarrow = "^14.0.0"
tiktoken = "~0.5.0"
tiktoken = "~0.6.0"
wikipedia = "^1.4.0"
qdrant-client = "^1.7.0"
websockets = "^10.3"
weaviate-client = "*"
jina = "*"
sentence-transformers = { version = "^2.2.2", optional = true }
sentence-transformers = { version = "^2.3.1", optional = true }
ctransformers = { version = "^0.2.10", optional = true }
cohere = "^4.39.0"
python-multipart = "^0.0.6"
cohere = "^4.47.0"
python-multipart = "^0.0.7"
sqlmodel = "^0.0.14"
faiss-cpu = "^1.7.4"
anthropic = "^0.13.0"
orjson = "3.9.3"
anthropic = "^0.15.0"
orjson = "3.9.15"
multiprocess = "^0.70.14"
cachetools = "^5.3.1"
types-cachetools = "^5.3.0.5"
platformdirs = "^4.1.0"
pinecone-client = "^2.2.2"
platformdirs = "^4.2.0"
pinecone-client = "^3.0.3"
pymongo = "^4.6.0"
supabase = "^2.3.0"
certifi = "^2023.11.17"
google-cloud-aiplatform = "^1.36.0"
psycopg = "^3.1.9"
psycopg-binary = "^3.1.9"
fastavro = "^1.8.0"
langchain-experimental = "*"
celery = { extras = ["redis"], version = "^5.3.6", optional = true }
redis = { version = "^4.6.0", optional = true }
redis = { version = "^5.0.1", optional = true }
flower = { version = "^2.0.0", optional = true }
alembic = "^1.13.0"
passlib = "^1.7.4"
@ -86,50 +84,54 @@ python-jose = "^3.3.0"
metaphor-python = "^0.1.11"
pydantic = "^2.5.0"
pydantic-settings = "^2.1.0"
zep-python = "*"
zep-python = "1.5.0"
pywin32 = { version = "^306", markers = "sys_platform == 'win32'" }
loguru = "^0.7.1"
langfuse = "^1.1.11"
pillow = "^10.0.0"
metal-sdk = "^2.4.0"
langfuse = "^2.9.0"
pillow = "^10.2.0"
metal-sdk = "^2.5.0"
markupsafe = "^2.1.3"
extract-msg = "^0.45.0"
extract-msg = "^0.47.0"
# jq is not available for windows
jq = { version = "^1.6.0", markers = "sys_platform != 'win32'" }
boto3 = "^1.34.0"
numexpr = "^2.8.6"
qianfan = "0.2.0"
qianfan = "0.3.0"
pgvector = "^0.2.3"
pyautogen = "^0.2.0"
langchain-google-genai = "^0.0.2"
elasticsearch = "^8.11.1"
langchain-google-genai = "^0.0.6"
elasticsearch = "^8.12.0"
pytube = "^15.0.0"
llama-index = "^0.9.24"
langchain-openai = "^0.0.2"
python-socketio = "^5.11.0"
llama-index = "^0.10.13"
langchain-openai = "^0.0.6"
unstructured = { extras = ["md"], version = "^0.12.4" }
dspy-ai = "^2.4.0"
[tool.poetry.group.dev.dependencies]
pytest-asyncio = "^0.23.1"
types-redis = "^4.6.0.5"
ipykernel = "^6.27.0"
ipykernel = "^6.29.0"
mypy = "^1.8.0"
ruff = "^0.1.5"
ruff = "^0.2.1"
httpx = "*"
pytest = "^7.4.2"
pytest = "^8.0.0"
types-requests = "^2.31.0"
requests = "^2.31.0"
pytest-cov = "^4.1.0"
pandas-stubs = "^2.0.0.230412"
types-pillow = "^9.5.0.2"
pandas-stubs = "^2.1.4.231227"
types-pillow = "^10.2.0.20240213"
types-pyyaml = "^6.0.12.8"
types-python-jose = "^3.3.4.8"
types-passlib = "^1.7.7.13"
locust = "^2.19.1"
locust = "^2.23.1"
pytest-mock = "^3.12.0"
pytest-xdist = "^3.5.0"
types-pywin32 = "^306.0.0.4"
types-google-cloud-ndb = "^2.2.0.0"
pytest-sugar = "^0.9.7"
pytest-sugar = "^1.0.0"
pytest-instafail = "^0.5.0"
respx = "^0.20.2"
[tool.poetry.extras]

View file

@ -109,7 +109,11 @@ def version_callback(value: bool):
@app.callback()
def main_entry_point(
version: bool = typer.Option(
None, "--version", callback=version_callback, is_eager=True, help="Show the version and exit."
None,
"--version",
callback=version_callback,
is_eager=True,
help="Show the version and exit.",
),
):
"""

View file

@ -63,7 +63,7 @@ version_path_separator = os # Use os.pathsep. Default configuration used for ne
# This is the path to the db in the root of the project.
# When the user runs the Langflow the database url will
# be set dinamically.
sqlalchemy.url = sqlite:///../../../langflow.db
sqlalchemy.url = sqlite:///./langflow.db
[post_write_hooks]
@ -98,7 +98,7 @@ handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
level = DEBUG
handlers =
qualname = alembic

View file

@ -1,10 +1,11 @@
import os
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from loguru import logger
from sqlalchemy import engine_from_config, pool
from langflow.services.database.models import * # noqa
from langflow.services.database.service import SQLModel
# this is the Alembic Config object, which provides
@ -40,7 +41,8 @@ def run_migrations_offline() -> None:
script output.
"""
url = config.get_main_option("sqlalchemy.url")
url = os.getenv("LANGFLOW_DATABASE_URL")
url = url or config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
@ -60,12 +62,32 @@ def run_migrations_online() -> None:
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
try:
from langflow.services.database.factory import DatabaseServiceFactory
from langflow.services.deps import get_db_service
from langflow.services.manager import (
initialize_settings_service,
service_manager,
)
from langflow.services.schema import ServiceType
initialize_settings_service()
service_manager.register_factory(
DatabaseServiceFactory(), [ServiceType.SETTINGS_SERVICE]
)
connectable = get_db_service().engine
except Exception as e:
logger.error(f"Error getting database engine: {e}")
url = os.getenv("LANGFLOW_DATABASE_URL")
url = url or config.get_main_option("sqlalchemy.url")
if url:
config.set_main_option("sqlalchemy.url", url)
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata, render_as_batch=True

View file

@ -10,6 +10,7 @@ from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
import sqlmodel
from sqlalchemy.engine.reflection import Inspector
${imports if imports else ""}
# revision identifiers, used by Alembic.
@ -20,8 +21,14 @@ depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
${upgrades if upgrades else "pass"}
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
${downgrades if downgrades else "pass"}

View file

@ -5,28 +5,43 @@ Revises: 1ef9c4f3765d
Create Date: 2023-12-13 18:55:52.587360
"""
from typing import Sequence, Union
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = '006b3990db50'
down_revision: Union[str, None] = '1ef9c4f3765d'
revision: str = "006b3990db50"
down_revision: Union[str, None] = "1ef9c4f3765d"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
api_key_constraints = inspector.get_unique_constraints("apikey")
flow_constraints = inspector.get_unique_constraints("flow")
user_constraints = inspector.get_unique_constraints("user")
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.create_unique_constraint('uq_apikey_id', ['id'])
if not any(
constraint["name"] == "uq_apikey_id" for constraint in api_key_constraints
):
with op.batch_alter_table("apikey", schema=None) as batch_op:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.create_unique_constraint('uq_flow_id', ['id'])
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_unique_constraint('uq_user_id', ['id'])
batch_op.create_unique_constraint("uq_apikey_id", ["id"])
if not any(
constraint["name"] == "uq_flow_id" for constraint in flow_constraints
):
with op.batch_alter_table("flow", schema=None) as batch_op:
batch_op.create_unique_constraint("uq_flow_id", ["id"])
if not any(
constraint["name"] == "uq_user_id" for constraint in user_constraints
):
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.create_unique_constraint("uq_user_id", ["id"])
except Exception as e:
print(e)
pass
@ -36,15 +51,24 @@ def upgrade() -> None:
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
api_key_constraints = inspector.get_unique_constraints("apikey")
flow_constraints = inspector.get_unique_constraints("flow")
user_constraints = inspector.get_unique_constraints("user")
try:
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_constraint('uq_user_id', type_='unique')
if any(
constraint["name"] == "uq_apikey_id" for constraint in api_key_constraints
):
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.drop_constraint("uq_user_id", type_="unique")
if any(constraint["name"] == "uq_flow_id" for constraint in flow_constraints):
with op.batch_alter_table("flow", schema=None) as batch_op:
batch_op.drop_constraint("uq_flow_id", type_="unique")
if any(constraint["name"] == "uq_user_id" for constraint in user_constraints):
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_constraint('uq_flow_id', type_='unique')
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.drop_constraint('uq_apikey_id', type_='unique')
with op.batch_alter_table("apikey", schema=None) as batch_op:
batch_op.drop_constraint("uq_apikey_id", type_="unique")
except Exception as e:
print(e)
pass

View file

@ -5,67 +5,25 @@ Revises: 006b3990db50
Create Date: 2024-01-17 10:32:56.686287
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '0b8757876a7c'
down_revision: Union[str, None] = '006b3990db50'
revision: str = "0b8757876a7c"
down_revision: Union[str, None] = "006b3990db50"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_apikey_api_key'), ['api_key'], unique=True)
batch_op.create_index(batch_op.f('ix_apikey_name'), ['name'], unique=False)
batch_op.create_index(batch_op.f('ix_apikey_user_id'), ['user_id'], unique=False)
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_flow_description'), ['description'], unique=False)
batch_op.create_index(batch_op.f('ix_flow_name'), ['name'], unique=False)
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
except Exception as e:
print(e)
pass
pass
try:
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], unique=True)
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_username'))
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
batch_op.drop_index(batch_op.f('ix_flow_name'))
batch_op.drop_index(batch_op.f('ix_flow_description'))
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_apikey_user_id'))
batch_op.drop_index(batch_op.f('ix_apikey_name'))
batch_op.drop_index(batch_op.f('ix_apikey_api_key'))
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
pass
# ### end Alembic commands ###

View file

@ -6,6 +6,7 @@ Revises: fd531f8868b1
Create Date: 2023-12-04 15:00:27.968998
"""
from typing import Sequence, Union
import sqlalchemy as sa
@ -13,8 +14,8 @@ import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '1ef9c4f3765d'
down_revision: Union[str, None] = 'fd531f8868b1'
revision: str = "1ef9c4f3765d"
down_revision: Union[str, None] = "fd531f8868b1"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@ -22,10 +23,10 @@ depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.alter_column('name',
existing_type=sqlmodel.sql.sqltypes.AutoString(),
nullable=True)
with op.batch_alter_table("apikey", schema=None) as batch_op:
batch_op.alter_column(
"name", existing_type=sqlmodel.sql.sqltypes.AutoString(), nullable=True
)
except Exception as e:
pass
# ### end Alembic commands ###
@ -34,10 +35,8 @@ def upgrade() -> None:
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.alter_column('name',
existing_type=sa.VARCHAR(),
nullable=False)
with op.batch_alter_table("apikey", schema=None) as batch_op:
batch_op.alter_column("name", existing_type=sa.VARCHAR(), nullable=False)
except Exception as e:
pass
# ### end Alembic commands ###

View file

@ -5,6 +5,7 @@ Revises:
Create Date: 2023-08-27 19:49:02.681355
"""
from typing import Sequence, Union
import sqlalchemy as sa
@ -33,7 +34,9 @@ def upgrade() -> None:
if "ix_flowstyle_flow_id" in [
index["name"] for index in inspector.get_indexes("flowstyle")
]:
op.drop_index("ix_flowstyle_flow_id", table_name="flowstyle")
op.drop_index(
"ix_flowstyle_flow_id", table_name="flowstyle", if_exists=True
)
existing_indices_flow = []
existing_fks_flow = []
@ -80,8 +83,7 @@ def upgrade() -> None:
sa.Column("api_key", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
["user_id"], ["user.id"], name="fk_apikey_user_id_user"
),
sa.PrimaryKeyConstraint("id", name="pk_apikey"),
sa.UniqueConstraint("id", name="uq_apikey_id"),
@ -103,8 +105,7 @@ def upgrade() -> None:
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
["user_id"], ["user.id"], name="fk_flow_user_id_user"
),
sa.PrimaryKeyConstraint("id", name="pk_flow"),
sa.UniqueConstraint("id", name="uq_flow_id"),
@ -151,21 +152,21 @@ def downgrade() -> None:
existing_tables = inspector.get_table_names()
if "flow" in existing_tables:
with op.batch_alter_table("flow", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_flow_user_id"))
batch_op.drop_index(batch_op.f("ix_flow_name"))
batch_op.drop_index(batch_op.f("ix_flow_description"))
batch_op.drop_index(batch_op.f("ix_flow_user_id"), if_exists=True)
batch_op.drop_index(batch_op.f("ix_flow_name"), if_exists=True)
batch_op.drop_index(batch_op.f("ix_flow_description"), if_exists=True)
op.drop_table("flow")
if "apikey" in existing_tables:
with op.batch_alter_table("apikey", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_apikey_user_id"))
batch_op.drop_index(batch_op.f("ix_apikey_name"))
batch_op.drop_index(batch_op.f("ix_apikey_api_key"))
batch_op.drop_index(batch_op.f("ix_apikey_user_id"), if_exists=True)
batch_op.drop_index(batch_op.f("ix_apikey_name"), if_exists=True)
batch_op.drop_index(batch_op.f("ix_apikey_api_key"), if_exists=True)
op.drop_table("apikey")
if "user" in existing_tables:
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_user_username"))
batch_op.drop_index(batch_op.f("ix_user_username"), if_exists=True)
op.drop_table("user")

View file

@ -5,34 +5,44 @@ Revises: 7d2162acc8b2
Create Date: 2023-11-24 10:45:38.465302
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = '2ac71eb9c3ae'
down_revision: Union[str, None] = '7d2162acc8b2'
revision: str = "2ac71eb9c3ae"
down_revision: Union[str, None] = "7d2162acc8b2"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
tables = inspector.get_table_names()
try:
op.create_table('credential',
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('value', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('provider', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column('id', sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
if "credential" not in tables:
op.create_table(
"credential",
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("value", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column(
"provider", sqlmodel.sql.sqltypes.AutoString(), nullable=True
),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
@ -40,7 +50,7 @@ def upgrade() -> None:
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
op.drop_table('credential')
op.drop_table("credential")
except Exception as e:
print(e)
pass

View file

@ -0,0 +1,56 @@
"""Add icon and icon_bg_color to Flow
Revision ID: 63b9c451fd30
Revises: bc2f01c40e4a
Create Date: 2024-03-06 10:53:47.148658
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = "63b9c451fd30"
down_revision: Union[str, None] = "bc2f01c40e4a"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
column_names = [column["name"] for column in inspector.get_columns("flow")]
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("flow", schema=None) as batch_op:
if "icon" not in column_names:
batch_op.add_column(
sa.Column("icon", sqlmodel.sql.sqltypes.AutoString(), nullable=True)
)
if "icon_bg_color" not in column_names:
batch_op.add_column(
sa.Column(
"icon_bg_color", sqlmodel.sql.sqltypes.AutoString(), nullable=True
)
)
# ### end Alembic commands ###
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
column_names = [column["name"] for column in inspector.get_columns("flow")]
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("flow", schema=None) as batch_op:
if "icon" in column_names:
batch_op.drop_column("icon")
if "icon_bg_color" in column_names:
batch_op.drop_column("icon_bg_color")
# ### end Alembic commands ###

View file

@ -5,6 +5,7 @@ Revises: 260dbcc8b680
Create Date: 2023-09-08 07:36:13.387318
"""
from typing import Sequence, Union
import sqlalchemy as sa
@ -21,29 +22,36 @@ depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
if "user" in inspector.get_table_names() and "profile_image" not in [
column["name"] for column in inspector.get_columns("user")
]:
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"profile_image", sqlmodel.sql.sqltypes.AutoString(), nullable=True
try:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
if "user" in inspector.get_table_names() and "profile_image" not in [
column["name"] for column in inspector.get_columns("user")
]:
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"profile_image",
sqlmodel.sql.sqltypes.AutoString(),
nullable=True,
)
)
)
except Exception as e:
print(e)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
if "user" in inspector.get_table_names() and "profile_image" in [
column["name"] for column in inspector.get_columns("user")
]:
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.drop_column("profile_image")
try:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
if "user" in inspector.get_table_names() and "profile_image" in [
column["name"] for column in inspector.get_columns("user")
]:
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.drop_column("profile_image")
except Exception as e:
print(e)
# ### end Alembic commands ###

View file

@ -5,12 +5,13 @@ Revises: eb5866d51fd2
Create Date: 2023-10-18 23:08:57.744906
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
from loguru import logger
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = "7843803a87b5"
@ -21,19 +22,26 @@ depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
flow_columns = [column["name"] for column in inspector.get_columns("flow")]
user_columns = [column["name"] for column in inspector.get_columns("user")]
try:
with op.batch_alter_table("flow", schema=None) as batch_op:
batch_op.add_column(sa.Column("is_component", sa.Boolean(), nullable=True))
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"store_api_key", sqlmodel.AutoString(), nullable=True
if "is_component" not in flow_columns:
with op.batch_alter_table("flow", schema=None) as batch_op:
batch_op.add_column(
sa.Column("is_component", sa.Boolean(), nullable=True)
)
)
except Exception as e:
logger.exception(e)
pass
try:
if "store_api_key" not in user_columns:
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.add_column(
sa.Column("store_api_key", sqlmodel.AutoString(), nullable=True)
)
except Exception as e:
pass
# ### end Alembic commands ###

View file

@ -5,88 +5,74 @@ Revises: f5ee9749d1a6
Create Date: 2023-11-21 20:56:53.998781
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = '7d2162acc8b2'
down_revision: Union[str, None] = 'f5ee9749d1a6'
revision: str = "7d2162acc8b2"
down_revision: Union[str, None] = "f5ee9749d1a6"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
api_key_columns = [column["name"] for column in inspector.get_columns("apikey")]
flow_columns = [column["name"] for column in inspector.get_columns("flow")]
try:
with op.batch_alter_table('component', schema=None) as batch_op:
batch_op.drop_index('ix_component_frontend_node_id')
batch_op.drop_index('ix_component_name')
op.drop_table('component')
op.drop_table('flowstyle')
if "name" in api_key_columns:
with op.batch_alter_table("apikey", schema=None) as batch_op:
batch_op.alter_column(
"name", existing_type=sa.VARCHAR(), nullable=False
)
except Exception as e:
print(e)
pass
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.alter_column('name',
existing_type=sa.VARCHAR(),
nullable=False)
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
pass
try:
with op.batch_alter_table("flow", schema=None) as batch_op:
if "updated_at" not in flow_columns:
batch_op.add_column(
sa.Column("updated_at", sa.DateTime(), nullable=True)
)
if "folder" not in flow_columns:
batch_op.add_column(
sa.Column(
"folder", sqlmodel.sql.sqltypes.AutoString(), nullable=True
)
)
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_column('folder')
batch_op.drop_column('updated_at')
with op.batch_alter_table("flow", schema=None) as batch_op:
batch_op.drop_column("folder")
batch_op.drop_column("updated_at")
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.alter_column('name',
existing_type=sa.VARCHAR(),
nullable=True)
with op.batch_alter_table("apikey", schema=None) as batch_op:
batch_op.alter_column("name", existing_type=sa.VARCHAR(), nullable=True)
except Exception as e:
print(e)
pass
try:
op.create_table('flowstyle',
sa.Column('color', sa.VARCHAR(), nullable=False),
sa.Column('emoji', sa.VARCHAR(), nullable=False),
sa.Column('flow_id', sa.CHAR(length=32), nullable=True),
sa.Column('id', sa.CHAR(length=32), nullable=False),
sa.ForeignKeyConstraint(['flow_id'], ['flow.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('component',
sa.Column('id', sa.CHAR(length=32), nullable=False),
sa.Column('frontend_node_id', sa.CHAR(length=32), nullable=False),
sa.Column('name', sa.VARCHAR(), nullable=False),
sa.Column('description', sa.VARCHAR(), nullable=True),
sa.Column('python_code', sa.VARCHAR(), nullable=True),
sa.Column('return_type', sa.VARCHAR(), nullable=True),
sa.Column('is_disabled', sa.BOOLEAN(), nullable=False),
sa.Column('is_read_only', sa.BOOLEAN(), nullable=False),
sa.Column('create_at', sa.DATETIME(), nullable=False),
sa.Column('update_at', sa.DATETIME(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('component', schema=None) as batch_op:
batch_op.create_index('ix_component_name', ['name'], unique=False)
batch_op.create_index('ix_component_frontend_node_id', ['frontend_node_id'], unique=False)
except Exception as e:
print(e)
pass
# ### end Alembic commands ###

View file

@ -5,55 +5,105 @@ Revises: 0b8757876a7c
Create Date: 2024-01-26 13:31:14.797548
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = 'b2fa308044b5'
down_revision: Union[str, None] = '0b8757876a7c'
revision: str = "b2fa308044b5"
down_revision: Union[str, None] = "0b8757876a7c"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
tables = inspector.get_table_names()
# ### commands auto generated by Alembic - please adjust! ###
try:
op.drop_table('flowstyle')
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_component', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
batch_op.add_column(sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=True))
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
batch_op.create_foreign_key('fk_flow_user_id_user', 'user', ['user_id'], ['id'])
if "flowstyle" in tables:
op.drop_table("flowstyle")
with op.batch_alter_table("flow", schema=None) as batch_op:
flow_columns = [column["name"] for column in inspector.get_columns("flow")]
if "is_component" not in flow_columns:
batch_op.add_column(
sa.Column("is_component", sa.Boolean(), nullable=True)
)
if "updated_at" not in flow_columns:
batch_op.add_column(
sa.Column("updated_at", sa.DateTime(), nullable=True)
)
if "folder" not in flow_columns:
batch_op.add_column(
sa.Column(
"folder", sqlmodel.sql.sqltypes.AutoString(), nullable=True
)
)
if "user_id" not in flow_columns:
batch_op.add_column(
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=True)
)
indices = inspector.get_indexes("flow")
indices_names = [index["name"] for index in indices]
if "ix_flow_user_id" not in indices_names:
batch_op.create_index(
batch_op.f("ix_flow_user_id"), ["user_id"], unique=False
)
if "fk_flow_user_id_user" not in indices_names:
batch_op.create_foreign_key(
"fk_flow_user_id_user", "user", ["user_id"], ["id"]
)
except Exception:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_constraint('fk_flow_user_id_user', type_='foreignkey')
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
batch_op.drop_column('user_id')
batch_op.drop_column('folder')
batch_op.drop_column('updated_at')
batch_op.drop_column('is_component')
# Re-create the dropped table 'flowstyle' if it was previously dropped in upgrade
if "flowstyle" not in inspector.get_table_names():
op.create_table(
"flowstyle",
sa.Column("color", sa.String(), nullable=False),
sa.Column("emoji", sa.String(), nullable=False),
sa.Column("flow_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.ForeignKeyConstraint(["flow_id"], ["flow.id"]),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
)
op.create_table('flowstyle',
sa.Column('color', sa.VARCHAR(), nullable=False),
sa.Column('emoji', sa.VARCHAR(), nullable=False),
sa.Column('flow_id', sa.CHAR(length=32), nullable=True),
sa.Column('id', sa.CHAR(length=32), nullable=False),
sa.ForeignKeyConstraint(['flow_id'], ['flow.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
except Exception:
pass
# ### end Alembic commands ###
with op.batch_alter_table("flow", schema=None) as batch_op:
# Check and remove newly added columns and constraints in upgrade
flow_columns = [column["name"] for column in inspector.get_columns("flow")]
if "user_id" in flow_columns:
batch_op.drop_column("user_id")
if "folder" in flow_columns:
batch_op.drop_column("folder")
if "updated_at" in flow_columns:
batch_op.drop_column("updated_at")
if "is_component" in flow_columns:
batch_op.drop_column("is_component")
indices = inspector.get_indexes("flow")
indices_names = [index["name"] for index in indices]
if "ix_flow_user_id" in indices_names:
batch_op.drop_index("ix_flow_user_id")
# Assuming fk_flow_user_id_user is a foreign key constraint's name, not an index
constraints = inspector.get_foreign_keys("flow")
constraint_names = [constraint["name"] for constraint in constraints]
if "fk_flow_user_id_user" in constraint_names:
batch_op.drop_constraint("fk_flow_user_id_user", type_="foreignkey")
except Exception as e:
# It's generally a good idea to log the exception or handle it in a way other than a bare pass
print(f"Error during downgrade: {e}")

View file

@ -5,46 +5,68 @@ Revises: b2fa308044b5
Create Date: 2024-01-26 13:34:14.496769
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = 'bc2f01c40e4a'
down_revision: Union[str, None] = 'b2fa308044b5'
revision: str = "bc2f01c40e4a"
down_revision: Union[str, None] = "b2fa308044b5"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_component', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
batch_op.add_column(sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=True))
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
batch_op.create_foreign_key('flow_user_id_fkey'
, 'user', ['user_id'], ['id'])
except Exception:
pass
# ### end Alembic commands ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
flow_columns = {column["name"] for column in inspector.get_columns("flow")}
flow_indexes = {index["name"] for index in inspector.get_indexes("flow")}
flow_fks = {fk["name"] for fk in inspector.get_foreign_keys("flow")}
with op.batch_alter_table("flow", schema=None) as batch_op:
if "is_component" not in flow_columns:
batch_op.add_column(sa.Column("is_component", sa.Boolean(), nullable=True))
if "updated_at" not in flow_columns:
batch_op.add_column(sa.Column("updated_at", sa.DateTime(), nullable=True))
if "folder" not in flow_columns:
batch_op.add_column(
sa.Column("folder", sqlmodel.sql.sqltypes.AutoString(), nullable=True)
)
if "user_id" not in flow_columns:
batch_op.add_column(
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=True)
)
if "ix_flow_user_id" not in flow_indexes:
batch_op.create_index(
batch_op.f("ix_flow_user_id"), ["user_id"], unique=False
)
if "flow_user_id_fkey" not in flow_fks:
batch_op.create_foreign_key(
"flow_user_id_fkey", "user", ["user_id"], ["id"]
)
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_constraint('flow_user_id_fkey', type_='foreignkey')
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
batch_op.drop_column('user_id')
batch_op.drop_column('folder')
batch_op.drop_column('updated_at')
batch_op.drop_column('is_component')
except Exception:
pass
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
flow_columns = {column["name"] for column in inspector.get_columns("flow")}
flow_indexes = {index["name"] for index in inspector.get_indexes("flow")}
flow_fks = {fk["name"] for fk in inspector.get_foreign_keys("flow")}
# ### end Alembic commands ###
with op.batch_alter_table("flow", schema=None) as batch_op:
if "flow_user_id_fkey" in flow_fks:
batch_op.drop_constraint("flow_user_id_fkey", type_="foreignkey")
if "ix_flow_user_id" in flow_indexes:
batch_op.drop_index(batch_op.f("ix_flow_user_id"))
if "user_id" in flow_columns:
batch_op.drop_column("user_id")
if "folder" in flow_columns:
batch_op.drop_column("folder")
if "updated_at" in flow_columns:
batch_op.drop_column("updated_at")
if "is_component" in flow_columns:
batch_op.drop_column("is_component")

View file

@ -5,11 +5,10 @@ Revises: 67cc006d50bf
Create Date: 2023-10-04 10:18:25.640458
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from sqlalchemy import exc
# revision identifiers, used by Alembic.
revision: str = "eb5866d51fd2"
@ -21,70 +20,12 @@ depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
connection = op.get_bind()
try:
op.drop_table("flowstyle")
with op.batch_alter_table("component", schema=None) as batch_op:
batch_op.drop_index("ix_component_frontend_node_id")
batch_op.drop_index("ix_component_name")
except exc.SQLAlchemyError:
# connection.execute(text("ROLLBACK"))
pass
except Exception as e:
print(e)
pass
try:
op.drop_table("component")
except exc.SQLAlchemyError:
# connection.execute(text("ROLLBACK"))
pass
except Exception as e:
print(e)
pass
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
op.create_table(
"component",
sa.Column("id", sa.CHAR(length=32), nullable=False),
sa.Column("frontend_node_id", sa.CHAR(length=32), nullable=False),
sa.Column("name", sa.VARCHAR(), nullable=False),
sa.Column("description", sa.VARCHAR(), nullable=True),
sa.Column("python_code", sa.VARCHAR(), nullable=True),
sa.Column("return_type", sa.VARCHAR(), nullable=True),
sa.Column("is_disabled", sa.BOOLEAN(), nullable=False),
sa.Column("is_read_only", sa.BOOLEAN(), nullable=False),
sa.Column("create_at", sa.DATETIME(), nullable=False),
sa.Column("update_at", sa.DATETIME(), nullable=False),
sa.PrimaryKeyConstraint("id", name="pk_component"),
)
with op.batch_alter_table("component", schema=None) as batch_op:
batch_op.create_index("ix_component_name", ["name"], unique=False)
batch_op.create_index(
"ix_component_frontend_node_id", ["frontend_node_id"], unique=False
)
except Exception as e:
print(e)
pass
try:
op.create_table(
"flowstyle",
sa.Column("color", sa.VARCHAR(), nullable=False),
sa.Column("emoji", sa.VARCHAR(), nullable=False),
sa.Column("flow_id", sa.CHAR(length=32), nullable=True),
sa.Column("id", sa.CHAR(length=32), nullable=False),
sa.ForeignKeyConstraint(
["flow_id"],
["flow.id"],
),
sa.PrimaryKeyConstraint("id", name="pk_flowstyle"),
sa.UniqueConstraint("id", name="uq_flowstyle_id"),
)
except Exception as e:
print(e)
pass
pass
# ### end Alembic commands ###

View file

@ -5,6 +5,7 @@ Revises: 7843803a87b5
Create Date: 2023-10-18 23:12:27.297016
"""
from typing import Sequence, Union
import sqlalchemy as sa

View file

@ -5,22 +5,35 @@ Revises: 2ac71eb9c3ae
Create Date: 2023-11-24 15:07:37.566516
"""
from typing import Sequence, Union
from typing import Optional, Sequence, Union
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = 'fd531f8868b1'
down_revision: Union[str, None] = '2ac71eb9c3ae'
revision: str = "fd531f8868b1"
down_revision: Union[str, None] = "2ac71eb9c3ae"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
tables = inspector.get_table_names()
foreign_keys_names = []
if "credential" in tables:
foreign_keys = inspector.get_foreign_keys("credential")
foreign_keys_names = [fk["name"] for fk in foreign_keys]
try:
with op.batch_alter_table('credential', schema=None) as batch_op:
batch_op.create_foreign_key("fk_credential_user_id", 'user', ['user_id'], ['id'])
if "credential" in tables and "fk_credential_user_id" not in foreign_keys_names:
with op.batch_alter_table("credential", schema=None) as batch_op:
batch_op.create_foreign_key(
"fk_credential_user_id", "user", ["user_id"], ["id"]
)
except Exception as e:
print(e)
pass
@ -30,9 +43,17 @@ def upgrade() -> None:
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
tables = inspector.get_table_names()
foreign_keys_names: list[Optional[str]] = []
if "credential" in tables:
foreign_keys = inspector.get_foreign_keys("credential")
foreign_keys_names = [fk["name"] for fk in foreign_keys]
try:
with op.batch_alter_table('credential', schema=None) as batch_op:
batch_op.drop_constraint("fk_credential_user_id", type_='foreignkey')
if "credential" in tables and "fk_credential_user_id" in foreign_keys_names:
with op.batch_alter_table("credential", schema=None) as batch_op:
batch_op.drop_constraint("fk_credential_user_id", type_="foreignkey")
except Exception as e:
print(e)
pass

View file

@ -6,8 +6,10 @@ from langflow.api.v1 import (
chat_router,
credentials_router,
endpoints_router,
files_router,
flows_router,
login_router,
monitor_router,
store_router,
users_router,
validate_router,
@ -25,3 +27,5 @@ router.include_router(users_router)
router.include_router(api_key_router)
router.include_router(login_router)
router.include_router(credentials_router)
router.include_router(files_router)
router.include_router(monitor_router)

View file

@ -1,10 +1,14 @@
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, List
from typing import TYPE_CHECKING, Optional
from fastapi import HTTPException
from platformdirs import user_cache_dir
from sqlmodel import Session
from langflow.graph.graph.base import Graph
from langflow.services.chat.service import ChatService
from langflow.services.database.models.flow import Flow
from langflow.services.store.schema import StoreComponentCreate
from langflow.services.store.utils import get_lf_version_from_pypi
@ -137,7 +141,7 @@ def get_file_path_value(file_path):
return file_path
def validate_is_component(flows: List["Flow"]):
def validate_is_component(flows: list["Flow"]):
for flow in flows:
if not flow.data or flow.is_component is not None:
continue
@ -171,19 +175,66 @@ async def check_langflow_version(component: StoreComponentCreate):
)
def format_elapsed_time(elapsed_time) -> str:
# Format elapsed time to human readable format coming from
# perf_counter()
# If the elapsed time is less than 1 second, return ms
# If the elapsed time is less than 1 minute, return seconds rounded to 2 decimals
time_str = ""
def format_elapsed_time(elapsed_time: float) -> str:
"""Format elapsed time to a human-readable format coming from perf_counter().
- Less than 1 second: returns milliseconds
- Less than 1 minute: returns seconds rounded to 2 decimals
- 1 minute or more: returns minutes and seconds
"""
if elapsed_time < 1:
elapsed_time = int(round(elapsed_time * 1000))
time_str = f"{elapsed_time} ms"
milliseconds = int(round(elapsed_time * 1000))
return f"{milliseconds} ms"
elif elapsed_time < 60:
elapsed_time = round(elapsed_time, 2)
time_str = f"{elapsed_time} seconds"
seconds = round(elapsed_time, 2)
unit = "second" if seconds == 1 else "seconds"
return f"{seconds} {unit}"
else:
elapsed_time = round(elapsed_time / 60, 2)
time_str = f"{elapsed_time} minutes"
return time_str
minutes = int(elapsed_time // 60)
seconds = round(elapsed_time % 60, 2)
minutes_unit = "minute" if minutes == 1 else "minutes"
seconds_unit = "second" if seconds == 1 else "seconds"
return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}"
async def build_and_cache_graph(
flow_id: str,
session: Session,
chat_service: "ChatService",
graph: Optional[Graph] = None,
):
"""Build and cache the graph."""
flow: Optional[Flow] = session.get(Flow, flow_id)
if not flow or not flow.data:
raise ValueError("Invalid flow ID")
other_graph = Graph.from_payload(flow.data, flow_id)
if graph is None:
graph = other_graph
else:
graph = graph.update(other_graph)
await chat_service.set_cache(flow_id, graph)
return graph
def format_syntax_error_message(exc: SyntaxError) -> str:
"""Format a SyntaxError message for returning to the frontend."""
if exc.text is None:
return f"Syntax error in code. Error on line {exc.lineno}"
return f"Syntax error in code. Error on line {exc.lineno}: {exc.text.strip()}"
def get_causing_exception(exc: BaseException) -> BaseException:
"""Get the causing exception from an exception."""
if hasattr(exc, "__cause__") and exc.__cause__:
return get_causing_exception(exc.__cause__)
return exc
def format_exception_message(exc: Exception) -> str:
"""Format an exception message for returning to the frontend."""
# We need to check if the __cause__ is a SyntaxError
# If it is, we need to return the message of the SyntaxError
causing_exception = get_causing_exception(exc)
if isinstance(causing_exception, SyntaxError):
return format_syntax_error_message(causing_exception)
return str(exc)

View file

@ -2,8 +2,10 @@ from langflow.api.v1.api_key import router as api_key_router
from langflow.api.v1.chat import router as chat_router
from langflow.api.v1.credential import router as credentials_router
from langflow.api.v1.endpoints import router as endpoints_router
from langflow.api.v1.files import router as files_router
from langflow.api.v1.flows import router as flows_router
from langflow.api.v1.login import router as login_router
from langflow.api.v1.monitor import router as monitor_router
from langflow.api.v1.store import router as store_router
from langflow.api.v1.users import router as users_router
from langflow.api.v1.validate import router as validate_router
@ -18,4 +20,6 @@ __all__ = [
"api_key_router",
"login_router",
"credentials_router",
"monitor_router",
"files_router",
]

View file

@ -1,9 +1,7 @@
from typing import Optional
from langchain.prompts import PromptTemplate
from pydantic import BaseModel, field_validator, model_serializer
from langflow.interface.utils import extract_input_variables_from_prompt
from langflow.template.frontend_node.base import FrontendNode
@ -28,7 +26,7 @@ class FrontendNodeRequest(FrontendNode):
class ValidatePromptRequest(BaseModel):
name: str
template: str
# optional for tweak call
custom_fields: Optional[dict] = None
frontend_node: Optional[FrontendNodeRequest] = None
@ -68,8 +66,6 @@ INVALID_CHARACTERS = {
")",
"[",
"]",
"{",
"}",
}
INVALID_NAMES = {
@ -82,79 +78,88 @@ INVALID_NAMES = {
}
def validate_prompt(template: str):
input_variables = extract_input_variables_from_prompt(template)
# Check if there are invalid characters in the input_variables
input_variables = check_input_variables(input_variables)
if any(var in INVALID_NAMES for var in input_variables):
raise ValueError(f"Invalid input variables. None of the variables can be named {', '.join(input_variables)}. ")
try:
PromptTemplate(template=template, input_variables=input_variables)
except Exception as exc:
raise ValueError(str(exc)) from exc
return input_variables
def is_json_like(var):
if var.startswith("{{") and var.endswith("}}"):
# If it is a double brance variable
# we don't want to validate any of its content
return True
# the above doesn't work on all cases because the json string can be multiline
# or indented which can add \n or spaces at the start or end of the string
# test_case_3 new_var == '\n{{\n "test": "hello",\n "text": "world"\n}}\n'
# what we can do is to remove the \n and spaces from the start and end of the string
# and then check if the string starts with {{ and ends with }}
var = var.strip()
var = var.replace("\n", "")
var = var.replace(" ", "")
# Now it should be a valid json string
return var.startswith("{{") and var.endswith("}}")
def check_input_variables(input_variables: list):
def fix_variable(var, invalid_chars, wrong_variables):
if not var:
return var, invalid_chars, wrong_variables
new_var = var
# Handle variables starting with a number
if var[0].isdigit():
invalid_chars.append(var[0])
new_var, invalid_chars, wrong_variables = fix_variable(var[1:], invalid_chars, wrong_variables)
# Temporarily replace {{ and }} to avoid treating them as invalid
new_var = new_var.replace("{{", "ᴛᴇᴍᴘᴏᴘᴇɴ").replace("}}", "ᴛᴇᴍᴘʟsᴇ")
# Remove invalid characters
for char in new_var:
if char in INVALID_CHARACTERS:
invalid_chars.append(char)
new_var = new_var.replace(char, "")
if var not in wrong_variables: # Avoid duplicating entries
wrong_variables.append(var)
# Restore {{ and }}
new_var = new_var.replace("ᴛᴇᴍᴘᴏᴘᴇɴ", "{{").replace("ᴛᴇᴍᴘʟsᴇ", "}}")
return new_var, invalid_chars, wrong_variables
def check_variable(var, invalid_chars, wrong_variables, empty_variables):
if any(char in invalid_chars for char in var):
wrong_variables.append(var)
elif var == "":
empty_variables.append(var)
return wrong_variables, empty_variables
def check_for_errors(input_variables, fixed_variables, wrong_variables, empty_variables):
if any(var for var in input_variables if var not in fixed_variables):
error_message = (
f"Error: Input variables contain invalid characters or formats. \n"
f"Invalid variables: {', '.join(wrong_variables)}.\n"
f"Empty variables: {', '.join(empty_variables)}. \n"
f"Fixed variables: {', '.join(fixed_variables)}."
)
raise ValueError(error_message)
def check_input_variables(input_variables):
invalid_chars = []
fixed_variables = []
wrong_variables = []
empty_variables = []
for variable in input_variables:
new_var = variable
variables_to_check = []
# if variable is empty, then we should add that to the wrong variables
if not variable:
empty_variables.append(variable)
for var in input_variables:
# First, let's check if the variable is a JSON string
# because if it is, it won't be considered a variable
# and we don't need to validate it
if is_json_like(var):
continue
# if variable starts with a number we should add that to the invalid chars
# and wrong variables
if variable[0].isdigit():
invalid_chars.append(variable[0])
new_var = new_var.replace(variable[0], "")
wrong_variables.append(variable)
else:
for char in INVALID_CHARACTERS:
if char in variable:
invalid_chars.append(char)
new_var = new_var.replace(char, "")
wrong_variables.append(variable)
new_var, wrong_variables, empty_variables = fix_variable(var, invalid_chars, wrong_variables)
wrong_variables, empty_variables = check_variable(var, INVALID_CHARACTERS, wrong_variables, empty_variables)
fixed_variables.append(new_var)
# If any of the input_variables is not in the fixed_variables, then it means that
# there are invalid characters in the input_variables
variables_to_check.append(var)
if any(var not in fixed_variables for var in input_variables):
error_message = build_error_message(
input_variables,
invalid_chars,
wrong_variables,
fixed_variables,
empty_variables,
)
raise ValueError(error_message)
return input_variables
check_for_errors(variables_to_check, fixed_variables, wrong_variables, empty_variables)
def build_error_message(input_variables, invalid_chars, wrong_variables, fixed_variables, empty_variables):
input_variables_str = ", ".join([f"'{var}'" for var in input_variables])
error_string = f"Invalid input variables: {input_variables_str}. "
if wrong_variables and invalid_chars:
# fix the wrong variables replacing invalid chars and find them in the fixed variables
error_string_vars = "You can fix them by replacing the invalid characters: "
wvars = wrong_variables.copy()
for i, wrong_var in enumerate(wvars):
for char in invalid_chars:
wrong_var = wrong_var.replace(char, "")
if wrong_var in fixed_variables:
error_string_vars += f"'{wrong_variables[i]}' -> '{wrong_var}'"
error_string += error_string_vars
elif empty_variables:
error_string += f" There are {len(empty_variables)} empty variable{'s' if len(empty_variables) > 1 else ''}."
elif len(set(fixed_variables)) != len(fixed_variables):
error_string += "There are duplicate variables."
return error_string
return fixed_variables

View file

@ -1,28 +1,37 @@
import asyncio
from typing import Any, Dict, List, Optional
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from uuid import UUID
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish
from langchain_core.callbacks.base import AsyncCallbackHandler
from loguru import logger
from langflow.api.v1.schemas import ChatResponse, PromptResponse
from langflow.services.deps import get_chat_service
from langflow.services.deps import get_chat_service, get_socket_service
from langflow.utils.util import remove_ansi_escape_codes
if TYPE_CHECKING:
from langflow.services.socket.service import SocketIOService
# https://github.com/hwchase17/chat-langchain/blob/master/callback.py
class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
class AsyncStreamingLLMCallbackHandleSIO(AsyncCallbackHandler):
"""Callback handler for streaming LLM responses."""
def __init__(self, client_id: str):
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return False
def __init__(self, session_id: str):
self.chat_service = get_chat_service()
self.client_id = client_id
self.websocket = self.chat_service.active_connections[self.client_id]
self.client_id = session_id
self.socketio_service: "SocketIOService" = get_socket_service()
self.sid = session_id
# self.socketio_service = self.chat_service.active_connections[self.client_id]
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
resp = ChatResponse(message=token, type="stream", intermediate_steps="")
await self.websocket.send_json(resp.model_dump())
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
async def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> Any:
"""Run when tool starts running."""
@ -31,7 +40,7 @@ class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
type="stream",
intermediate_steps=f"Tool input: {input_str}",
)
await self.websocket.send_json(resp.model_dump())
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
async def on_tool_end(self, output: str, **kwargs: Any) -> Any:
"""Run when tool ends running."""
@ -62,7 +71,7 @@ class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
try:
# This is to emulate the stream of tokens
for resp in resps:
await self.websocket.send_json(resp.model_dump())
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
except Exception as exc:
logger.error(f"Error sending response: {exc}")
@ -88,8 +97,7 @@ class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
resp = PromptResponse(
prompt=text,
)
await self.websocket.send_json(resp.model_dump())
self.chat_service.chat_history.add_message(self.client_id, resp)
await self.socketio_service.emit_message(to=self.sid, data=resp.model_dump())
async def on_agent_action(self, action: AgentAction, **kwargs: Any):
log = f"Thought: {action.log}"
@ -99,10 +107,10 @@ class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
logs = log.split("\n")
for log in logs:
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
await self.websocket.send_json(resp.model_dump())
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
else:
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
await self.websocket.send_json(resp.model_dump())
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
@ -111,20 +119,4 @@ class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
type="stream",
intermediate_steps=finish.log,
)
await self.websocket.send_json(resp.model_dump())
class StreamingLLMCallbackHandler(BaseCallbackHandler):
"""Callback handler for streaming LLM responses."""
def __init__(self, client_id: str):
self.chat_service = get_chat_service()
self.client_id = client_id
self.websocket = self.chat_service.active_connections[self.client_id]
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
resp = ChatResponse(message=token, type="stream", intermediate_steps="")
loop = asyncio.get_event_loop()
coroutine = self.websocket.send_json(resp.model_dump())
asyncio.run_coroutine_threadsafe(coroutine, loop)
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())

View file

@ -1,223 +1,35 @@
import time
import uuid
from typing import TYPE_CHECKING, Annotated, Optional
from fastapi import APIRouter, Depends, HTTPException, WebSocket, WebSocketException, status
from fastapi import APIRouter, BackgroundTasks, Body, Depends, HTTPException
from fastapi.responses import StreamingResponse
from langflow.api.utils import build_input_keys_response, format_elapsed_time
from langflow.api.v1.schemas import BuildStatus, BuiltResponse, InitResponse, StreamData
from langflow.graph.graph.base import Graph
from langflow.services.auth.utils import get_current_active_user, get_current_user_for_websocket
from langflow.services.cache.service import BaseCacheService
from langflow.services.cache.utils import update_build_status
from langflow.services.chat.service import ChatService
from langflow.services.deps import get_cache_service, get_chat_service, get_session
from loguru import logger
from sqlmodel import Session
from langflow.api.utils import (
build_and_cache_graph,
format_elapsed_time,
format_exception_message,
)
from langflow.api.v1.schemas import (
InputValueRequest,
ResultDataResponse,
StreamData,
VertexBuildResponse,
VerticesOrderResponse,
)
from langflow.services.auth.utils import get_current_active_user
from langflow.services.chat.service import ChatService
from langflow.services.deps import get_chat_service, get_session, get_session_service
from langflow.services.monitor.utils import log_vertex_build
if TYPE_CHECKING:
from langflow.graph.vertex.types import ChatVertex
from langflow.services.session.service import SessionService
router = APIRouter(tags=["Chat"])
@router.websocket("/chat/{client_id}")
async def chat(
client_id: str,
websocket: WebSocket,
db: Session = Depends(get_session),
chat_service: "ChatService" = Depends(get_chat_service),
):
"""Websocket endpoint for chat."""
try:
user = await get_current_user_for_websocket(websocket, db)
await websocket.accept()
if not user:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
elif not user.is_active:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
if client_id in chat_service.cache_service:
await chat_service.handle_websocket(client_id, websocket)
else:
# We accept the connection but close it immediately
# if the flow is not built yet
message = "Please, build the flow before sending messages"
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=message)
except WebSocketException as exc:
logger.error(f"Websocket exrror: {exc}")
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=str(exc))
except Exception as exc:
logger.error(f"Error in chat websocket: {exc}")
messsage = exc.detail if isinstance(exc, HTTPException) else str(exc)
if "Could not validate credentials" in str(exc):
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
else:
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=messsage)
@router.post("/build/init/{flow_id}", response_model=InitResponse, status_code=201)
async def init_build(
graph_data: dict,
flow_id: str,
current_user=Depends(get_current_active_user),
chat_service: "ChatService" = Depends(get_chat_service),
cache_service: "BaseCacheService" = Depends(get_cache_service),
):
"""Initialize the build by storing graph data and returning a unique session ID."""
try:
if flow_id is None:
raise ValueError("No ID provided")
# Check if already building
if (
flow_id in cache_service
and isinstance(cache_service[flow_id], dict)
and cache_service[flow_id].get("status") == BuildStatus.IN_PROGRESS
):
return InitResponse(flowId=flow_id)
# Delete from cache if already exists
if flow_id in chat_service.cache_service:
chat_service.cache_service.delete(flow_id)
logger.debug(f"Deleted flow {flow_id} from cache")
cache_service[flow_id] = {
"graph_data": graph_data,
"status": BuildStatus.STARTED,
"user_id": current_user.id,
}
return InitResponse(flowId=flow_id)
except Exception as exc:
logger.error(f"Error initializing build: {exc}")
return HTTPException(status_code=500, detail=str(exc))
@router.get("/build/{flow_id}/status", response_model=BuiltResponse)
async def build_status(flow_id: str, cache_service: "BaseCacheService" = Depends(get_cache_service)):
"""Check the flow_id is in the cache_service."""
try:
built = flow_id in cache_service and cache_service[flow_id]["status"] == BuildStatus.SUCCESS
return BuiltResponse(
built=built,
)
except Exception as exc:
logger.error(f"Error checking build status: {exc}")
return HTTPException(status_code=500, detail=str(exc))
@router.get("/build/stream/{flow_id}", response_class=StreamingResponse)
async def stream_build(
flow_id: str,
chat_service: "ChatService" = Depends(get_chat_service),
cache_service: "BaseCacheService" = Depends(get_cache_service),
):
"""Stream the build process based on stored flow data."""
async def event_stream(flow_id):
final_response = {"end_of_stream": True}
artifacts = {}
flow_cache = cache_service[flow_id]
flow_cache = flow_cache if isinstance(flow_cache, dict) else {}
try:
if flow_id not in cache_service:
error_message = "Invalid session ID"
yield str(StreamData(event="error", data={"error": error_message}))
return
if flow_cache.get("status") == BuildStatus.IN_PROGRESS:
error_message = "Already building"
yield str(StreamData(event="error", data={"error": error_message}))
return
graph_data = flow_cache.get("graph_data")
if not graph_data:
error_message = "No data provided"
yield str(StreamData(event="error", data={"error": error_message}))
return
logger.debug("Building langchain object")
# Some error could happen when building the graph
graph = Graph.from_payload(graph_data)
number_of_nodes = len(graph.vertices)
update_build_status(cache_service, flow_id, BuildStatus.IN_PROGRESS)
time_elapsed = ""
try:
user_id = flow_cache["user_id"]
except KeyError:
logger.debug("No user_id found in cache_service")
user_id = None
for i, vertex in enumerate(graph.generator_build(), 1):
start_time = time.perf_counter()
try:
log_dict = {
"log": f"Building node {vertex.vertex_type}",
}
yield str(StreamData(event="log", data=log_dict))
if vertex.is_task:
vertex = await try_running_celery_task(vertex, user_id)
else:
await vertex.build(user_id=user_id)
time_elapsed = format_elapsed_time(time.perf_counter() - start_time)
params = vertex._built_object_repr()
valid = True
logger.debug(f"Building node {str(vertex.vertex_type)}")
logger.debug(f"Output: {params[:100]}{'...' if len(params) > 100 else ''}")
if vertex.artifacts:
# The artifacts will be prompt variables
# passed to build_input_keys_response
# to set the input_keys values
artifacts.update(vertex.artifacts)
except Exception as exc:
logger.exception(exc)
params = str(exc)
valid = False
time_elapsed = format_elapsed_time(time.perf_counter() - start_time)
update_build_status(cache_service, flow_id, BuildStatus.FAILURE)
vertex_id = vertex.parent_node_id if vertex.parent_is_top_level else vertex.id
if vertex_id in graph.top_level_vertices:
response = {
"valid": valid,
"params": params,
"id": vertex_id,
"progress": round(i / number_of_nodes, 2),
"duration": time_elapsed,
}
yield str(StreamData(event="message", data=response))
langchain_object = await graph.build()
# Now we need to check the input_keys to send them to the client
if hasattr(langchain_object, "input_keys"):
input_keys_response = build_input_keys_response(langchain_object, artifacts)
else:
input_keys_response = {
"input_keys": None,
"memory_keys": [],
"handle_keys": [],
}
yield str(StreamData(event="message", data=input_keys_response))
chat_service.set_cache(flow_id, langchain_object)
# We need to reset the chat history
chat_service.chat_history.empty_history(flow_id)
update_build_status(cache_service, flow_id, BuildStatus.SUCCESS)
except Exception as exc:
logger.exception(exc)
logger.error("Error while building the flow: %s", exc)
update_build_status(cache_service, flow_id, BuildStatus.FAILURE)
yield str(StreamData(event="error", data={"error": str(exc)}))
finally:
yield str(StreamData(event="message", data=final_response))
try:
return StreamingResponse(event_stream(flow_id), media_type="text/event-stream")
except Exception as exc:
logger.error(f"Error streaming build: {exc}")
raise HTTPException(status_code=500, detail=str(exc))
async def try_running_celery_task(vertex, user_id):
# Try running the task in celery
# and set the task_id to the local vertex
@ -232,3 +44,219 @@ async def try_running_celery_task(vertex, user_id):
vertex.task_id = None
await vertex.build(user_id=user_id)
return vertex
@router.get("/build/{flow_id}/vertices", response_model=VerticesOrderResponse)
async def get_vertices(
flow_id: str,
stop_component_id: Optional[str] = None,
start_component_id: Optional[str] = None,
chat_service: "ChatService" = Depends(get_chat_service),
session=Depends(get_session),
):
"""Check the flow_id is in the flow_data_store."""
try:
# First, we need to check if the flow_id is in the cache
graph = None
if cache := await chat_service.get_cache(flow_id):
graph = cache.get("result")
graph = await build_and_cache_graph(flow_id, session, chat_service, graph)
if stop_component_id or start_component_id:
try:
vertices = graph.sort_vertices(stop_component_id, start_component_id)
except Exception as exc:
logger.error(exc)
vertices = graph.sort_vertices()
else:
vertices = graph.sort_vertices()
# Now vertices is a list of lists
# We need to get the id of each vertex
# and return the same structure but only with the ids
run_id = uuid.uuid4()
graph.set_run_id(run_id)
return VerticesOrderResponse(ids=vertices, run_id=run_id)
except Exception as exc:
logger.error(f"Error checking build status: {exc}")
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
@router.post("/build/{flow_id}/vertices/{vertex_id}")
async def build_vertex(
flow_id: str,
vertex_id: str,
background_tasks: BackgroundTasks,
inputs: Annotated[Optional[InputValueRequest], Body(embed=True)] = None,
chat_service: "ChatService" = Depends(get_chat_service),
current_user=Depends(get_current_active_user),
):
"""Build a vertex instead of the entire graph."""
start_time = time.perf_counter()
next_vertices_ids = []
try:
start_time = time.perf_counter()
cache = await chat_service.get_cache(flow_id)
if not cache:
# If there's no cache
logger.warning(f"No cache found for {flow_id}. Building graph starting at {vertex_id}")
graph = await build_and_cache_graph(flow_id=flow_id, session=next(get_session()), chat_service=chat_service)
else:
graph = cache.get("result")
result_data_response = ResultDataResponse(results={})
duration = ""
vertex = graph.get_vertex(vertex_id)
try:
if not vertex.frozen or not vertex._built:
inputs_dict = inputs.model_dump() if inputs else {}
await vertex.build(user_id=current_user.id, inputs=inputs_dict)
if vertex.result is not None:
params = vertex._built_object_repr()
valid = True
result_dict = vertex.result
artifacts = vertex.artifacts
else:
raise ValueError(f"No result found for vertex {vertex_id}")
async with chat_service._cache_locks[flow_id] as lock:
graph.remove_from_predecessors(vertex_id)
next_vertices_ids = vertex.successors_ids
next_vertices_ids = [v for v in next_vertices_ids if graph.should_run_vertex(v)]
await chat_service.set_cache(flow_id=flow_id, data=graph, lock=lock)
result_data_response = ResultDataResponse(**result_dict.model_dump())
except Exception as exc:
logger.exception(f"Error building vertex: {exc}")
params = format_exception_message(exc)
valid = False
result_data_response = ResultDataResponse(results={})
artifacts = {}
# If there's an error building the vertex
# we need to clear the cache
await chat_service.clear_cache(flow_id)
# Log the vertex build
if not vertex.will_stream:
background_tasks.add_task(
log_vertex_build,
flow_id=flow_id,
vertex_id=vertex_id,
valid=valid,
params=params,
data=result_data_response,
artifacts=artifacts,
)
timedelta = time.perf_counter() - start_time
duration = format_elapsed_time(timedelta)
result_data_response.duration = duration
result_data_response.timedelta = timedelta
vertex.add_build_time(timedelta)
inactivated_vertices = None
inactivated_vertices = list(graph.inactivated_vertices)
graph.reset_inactivated_vertices()
graph.reset_activated_vertices()
await chat_service.set_cache(flow_id, graph)
# graph.stop_vertex tells us if the user asked
# to stop the build of the graph at a certain vertex
# if it is in next_vertices_ids, we need to remove other
# vertices from next_vertices_ids
if graph.stop_vertex and graph.stop_vertex in next_vertices_ids:
next_vertices_ids = [graph.stop_vertex]
build_response = VertexBuildResponse(
inactivated_vertices=inactivated_vertices,
next_vertices_ids=next_vertices_ids,
valid=valid,
params=params,
id=vertex.id,
data=result_data_response,
)
return build_response
except Exception as exc:
logger.error(f"Error building vertex: {exc}")
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
# Now onto an endpoint that is an SSE endpoint
# it will receive a component_id and a flow_id
#
@router.get("/build/{flow_id}/{vertex_id}/stream", response_class=StreamingResponse)
async def build_vertex_stream(
flow_id: str,
vertex_id: str,
session_id: Optional[str] = None,
chat_service: "ChatService" = Depends(get_chat_service),
session_service: "SessionService" = Depends(get_session_service),
):
"""Build a vertex instead of the entire graph."""
try:
async def stream_vertex():
try:
if not session_id:
cache = chat_service.get_cache(flow_id)
if not cache:
# If there's no cache
raise ValueError(f"No cache found for {flow_id}.")
else:
graph = cache.get("result")
else:
session_data = await session_service.load_session(session_id, flow_id=flow_id)
graph, artifacts = session_data if session_data else (None, None)
if not graph:
raise ValueError(f"No graph found for {flow_id}.")
vertex: "ChatVertex" = graph.get_vertex(vertex_id)
if not hasattr(vertex, "stream"):
raise ValueError(f"Vertex {vertex_id} does not support streaming")
if isinstance(vertex._built_result, str) and vertex._built_result:
stream_data = StreamData(
event="message",
data={"message": f"Streaming vertex {vertex_id}"},
)
yield str(stream_data)
stream_data = StreamData(
event="message",
data={"chunk": vertex._built_result},
)
yield str(stream_data)
elif not vertex.frozen or not vertex._built:
logger.debug(f"Streaming vertex {vertex_id}")
stream_data = StreamData(
event="message",
data={"message": f"Streaming vertex {vertex_id}"},
)
yield str(stream_data)
async for chunk in vertex.stream():
stream_data = StreamData(
event="message",
data={"chunk": chunk},
)
yield str(stream_data)
elif vertex.result is not None:
stream_data = StreamData(
event="message",
data={"chunk": vertex._built_result},
)
yield str(stream_data)
else:
raise ValueError(f"No result found for vertex {vertex_id}")
except Exception as exc:
logger.error(f"Error building vertex: {exc}")
yield str(StreamData(event="error", data={"error": str(exc)}))
finally:
logger.debug("Closing stream")
yield str(StreamData(event="close", data={"message": "Stream closed"}))
return StreamingResponse(stream_vertex(), media_type="text/event-stream")
except Exception as exc:
raise HTTPException(status_code=500, detail="Error building vertex") from exc

View file

@ -2,6 +2,8 @@ from datetime import datetime
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException
from sqlmodel import Session, select
from langflow.services.auth import utils as auth_utils
from langflow.services.auth.utils import get_current_active_user
from langflow.services.database.models.credential import (
@ -12,7 +14,6 @@ from langflow.services.database.models.credential import (
)
from langflow.services.database.models.user.model import User
from langflow.services.deps import get_session, get_settings_service
from sqlmodel import Session, select
router = APIRouter(prefix="/credentials", tags=["Credentials"])

View file

@ -1,114 +1,39 @@
from http import HTTPStatus
from typing import Annotated, Any, List, Optional, Union
from typing import Annotated, List, Optional, Union
import sqlalchemy as sa
from fastapi import APIRouter, Body, Depends, HTTPException, UploadFile, status
from loguru import logger
from sqlmodel import Session, select
from langflow.api.utils import update_frontend_node_with_template_values
from langflow.api.v1.schemas import (
CustomComponentCode,
PreloadResponse,
CustomComponentRequest,
InputValueRequest,
ProcessResponse,
TaskResponse,
RunResponse,
TaskStatusResponse,
Tweaks,
UpdateCustomComponentRequest,
UploadFileResponse,
)
from langflow.graph.schema import RunOutputs
from langflow.interface.custom.custom_component import CustomComponent
from langflow.interface.custom.directory_reader import DirectoryReader
from langflow.interface.custom.utils import build_custom_component_template
from langflow.processing.process import build_graph_and_generate_result, process_graph_cached, process_tweaks
from langflow.processing.process import process_tweaks, run_graph
from langflow.services.auth.utils import api_key_security, get_current_active_user
from langflow.services.cache.utils import save_uploaded_file
from langflow.services.database.models.flow import Flow
from langflow.services.database.models.user.model import User
from langflow.services.deps import get_session, get_session_service, get_settings_service, get_task_service
from langflow.services.session.service import SessionService
from loguru import logger
from sqlmodel import select
try:
from langflow.worker import process_graph_cached_task
except ImportError:
def process_graph_cached_task(*args, **kwargs):
raise NotImplementedError("Celery is not installed")
from langflow.services.task.service import TaskService
from sqlmodel import Session
# build router
router = APIRouter(tags=["Base"])
async def process_graph_data(
graph_data: dict,
inputs: Optional[Union[List[dict], dict]] = None,
tweaks: Optional[dict] = None,
clear_cache: bool = False,
session_id: Optional[str] = None,
task_service: "TaskService" = Depends(get_task_service),
sync: bool = True,
):
task_result: Any = None
task_status = None
if tweaks:
try:
graph_data = process_tweaks(graph_data, tweaks)
except Exception as exc:
logger.error(f"Error processing tweaks: {exc}")
if sync:
result = await process_graph_cached(
graph_data,
inputs,
clear_cache,
session_id,
)
task_id = str(id(result))
if isinstance(result, dict) and "result" in result:
task_result = result["result"]
session_id = result["session_id"]
elif hasattr(result, "result") and hasattr(result, "session_id"):
task_result = result.result
session_id = result.session_id
else:
task_result = result
else:
logger.warning(
"This is an experimental feature and may not work as expected."
"Please report any issues to our GitHub repository."
)
if session_id is None:
# Generate a session ID
session_id = get_session_service().generate_key(session_id=session_id, data_graph=graph_data)
task_id, task = await task_service.launch_task(
process_graph_cached_task if task_service.use_celery else process_graph_cached,
graph_data,
inputs,
clear_cache,
session_id,
)
task_status = task.status
if task.status == "FAILURE":
logger.error(f"Task {task_id} failed: {task.traceback}")
task_result = str(task._exception)
else:
task_result = task.result
if task_id:
task_response = TaskResponse(id=task_id, href=f"api/v1/task/{task_id}")
else:
task_response = None
return ProcessResponse(
result=task_result,
status=task_status,
task=task_response,
session_id=session_id,
backend=task_service.backend_name,
)
@router.get("/all", dependencies=[Depends(get_current_active_user)])
def get_all(
settings_service=Depends(get_settings_service),
@ -117,65 +42,86 @@ def get_all(
logger.debug("Building langchain types dict")
try:
return get_all_types_dict(settings_service)
except Exception as exc:
raise HTTPException(status_code=500, detail=str(exc)) from exc
@router.post("/process/json", response_model=ProcessResponse)
async def process_json(
session: Annotated[Session, Depends(get_session)],
data: dict,
inputs: Optional[dict] = None,
tweaks: Optional[dict] = None,
clear_cache: Annotated[bool, Body(embed=True)] = False, # noqa: F821
session_id: Annotated[Union[None, str], Body(embed=True)] = None, # noqa: F821
task_service: "TaskService" = Depends(get_task_service),
sync: Annotated[bool, Body(embed=True)] = True, # noqa: F821
):
try:
return await process_graph_data(
graph_data=data,
inputs=inputs,
tweaks=tweaks,
clear_cache=clear_cache,
session_id=session_id,
task_service=task_service,
sync=sync,
)
all_types_dict = get_all_types_dict(settings_service.settings.COMPONENTS_PATH)
return all_types_dict
except Exception as exc:
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
# Endpoint to preload a graph
@router.post("/process/preload/{flow_id}", response_model=PreloadResponse)
async def preload_flow(
@router.post("/run/{flow_id}", response_model=RunResponse, response_model_exclude_none=True)
async def run_flow_with_caching(
session: Annotated[Session, Depends(get_session)],
flow_id: str,
session_id: Optional[str] = None,
session_service: SessionService = Depends(get_session_service),
inputs: Optional[List[InputValueRequest]] = [],
outputs: Optional[List[str]] = [],
tweaks: Annotated[Optional[Tweaks], Body(embed=True)] = None, # noqa: F821
stream: Annotated[bool, Body(embed=True)] = False, # noqa: F821
session_id: Annotated[Union[None, str], Body(embed=True)] = None, # noqa: F821
api_key_user: User = Depends(api_key_security),
clear_session: Annotated[bool, Body(embed=True)] = False, # noqa: F821
session_service: SessionService = Depends(get_session_service),
):
"""
Executes a specified flow by ID with optional input values, output selection, tweaks, and streaming capability.
This endpoint supports running flows with caching to enhance performance and efficiency.
### Parameters:
- `flow_id` (str): The unique identifier of the flow to be executed.
- `inputs` (List[InputValueRequest], optional): A list of inputs specifying the input values and components for the flow. Each input can target specific components and provide custom values.
- `outputs` (List[str], optional): A list of output names to retrieve from the executed flow. If not provided, all outputs are returned.
- `tweaks` (Optional[Tweaks], optional): A dictionary of tweaks to customize the flow execution. The tweaks can be used to modify the flow's parameters and components. Tweaks can be overridden by the input values.
- `stream` (bool, optional): Specifies whether the results should be streamed. Defaults to False.
- `session_id` (Union[None, str], optional): An optional session ID to utilize existing session data for the flow execution.
- `api_key_user` (User): The user associated with the current API key. Automatically resolved from the API key.
- `session_service` (SessionService): The session service object for managing flow sessions.
### Returns:
A `RunResponse` object containing the selected outputs (or all if not specified) of the executed flow and the session ID. The structure of the response accommodates multiple inputs, providing a nested list of outputs for each input.
### Raises:
HTTPException: Indicates issues with finding the specified flow, invalid input formats, or internal errors during flow execution.
### Example usage:
```json
POST /run/{flow_id}
Payload:
{
"inputs": [
{"components": ["component1"], "input_value": "value1"},
{"components": ["component3"], "input_value": "value2"}
],
"outputs": ["Component Name", "component_id"],
"tweaks": {"parameter_name": "value", "Component Name": {"parameter_name": "value"}, "component_id": {"parameter_name": "value"}}
"stream": false
}
```
This endpoint facilitates complex flow executions with customized inputs, outputs, and configurations, catering to diverse application requirements.
"""
try:
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
if clear_session:
session_service.clear_session(session_id)
# Check if the session exists
session_data = await session_service.load_session(session_id)
# Session data is a tuple of (graph, artifacts)
# or (None, None) if the session is empty
if isinstance(session_data, tuple):
graph, artifacts = session_data
is_clear = graph is None and artifacts is None
else:
is_clear = session_data is None
return PreloadResponse(session_id=session_id, is_clear=is_clear)
if outputs is None:
outputs = []
if session_id:
session_data = await session_service.load_session(session_id, flow_id=flow_id)
graph, artifacts = session_data if session_data else (None, None)
task_result: List[RunOutputs] = []
if not graph:
raise ValueError("Graph not found in the session")
task_result, session_id = await run_graph(
graph=graph,
flow_id=flow_id,
session_id=session_id,
inputs=inputs,
outputs=outputs,
artifacts=artifacts,
session_service=session_service,
stream=stream,
)
else:
if session_id is None:
session_id = flow_id
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
@ -183,18 +129,29 @@ async def preload_flow(
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
session_service.clear_session(session_id)
# Load the graph using SessionService
session_data = await session_service.load_session(session_id, graph_data)
graph, artifacts = session_data if session_data else (None, None)
if not graph:
raise ValueError("Graph not found in the session")
_ = await graph.build()
session_service.update_session(session_id, (graph, artifacts))
return PreloadResponse(session_id=session_id)
except Exception as exc:
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
graph_data = process_tweaks(graph_data, tweaks or {})
task_result, session_id = await run_graph(
graph=graph_data,
flow_id=flow_id,
session_id=session_id,
inputs=inputs,
outputs=outputs,
artifacts={},
session_service=session_service,
stream=stream,
)
return RunResponse(outputs=task_result, session_id=session_id)
except sa.exc.StatementError as exc:
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
if "badly formed hexadecimal UUID string" in str(exc):
# This means the Flow ID is not a valid UUID which means it can't find the flow
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
except ValueError as exc:
if f"Flow {flow_id} not found" in str(exc):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
else:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)) from exc
@router.post(
@ -221,84 +178,15 @@ async def process(
"""
Endpoint to process an input with a given flow_id.
"""
try:
if session_id:
session_data = await session_service.load_session(session_id)
graph, artifacts = session_data if session_data else (None, None)
task_result: Any = None
task_status = None
task_id = None
if not graph:
raise ValueError("Graph not found in the session")
result = await build_graph_and_generate_result(
graph=graph,
inputs=inputs,
artifacts=artifacts,
session_id=session_id,
session_service=session_service,
)
task_id = str(id(result))
if isinstance(result, dict) and "result" in result:
task_result = result["result"]
session_id = result["session_id"]
elif hasattr(result, "result") and hasattr(result, "session_id"):
task_result = result.result
session_id = result.session_id
else:
task_result = result
if task_id:
task_response = TaskResponse(id=task_id, href=f"api/v1/task/{task_id}")
else:
task_response = None
return ProcessResponse(
result=task_result,
status=task_status,
task=task_response,
session_id=session_id,
backend=task_service.backend_name,
)
else:
if api_key_user is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API Key",
)
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
return await process_graph_data(
graph_data=graph_data,
inputs=inputs,
tweaks=tweaks,
clear_cache=clear_cache,
session_id=session_id,
task_service=task_service,
sync=sync,
)
except sa.exc.StatementError as exc:
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
if "badly formed hexadecimal UUID string" in str(exc):
# This means the Flow ID is not a valid UUID which means it can't find the flow
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
except ValueError as exc:
if f"Flow {flow_id} not found" in str(exc):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
else:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)) from exc
except Exception as e:
# Log stack trace
logger.exception(e)
raise HTTPException(status_code=500, detail=str(e)) from e
# Raise a depreciation warning
logger.warning(
"The /process endpoint is deprecated and will be removed in a future version. " "Please use /run instead."
)
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="The /process endpoint is deprecated and will be removed in a future version. "
"Please use /run instead.",
)
@router.get("/task/{task_id}", response_model=TaskStatusResponse)
@ -331,8 +219,10 @@ async def get_task_status(task_id: str):
response_model=UploadFileResponse,
status_code=HTTPStatus.CREATED,
)
async def create_upload_file(file: UploadFile, flow_id: str):
# Cache file
async def create_upload_file(
file: UploadFile,
flow_id: str,
):
try:
file_path = save_uploaded_file(file, folder_name=flow_id)
@ -355,12 +245,12 @@ def get_version():
@router.post("/custom_component", status_code=HTTPStatus.OK)
async def custom_component(
raw_code: CustomComponentCode,
raw_code: CustomComponentRequest,
user: User = Depends(get_current_active_user),
):
component = CustomComponent(code=raw_code.code)
built_frontend_node = build_custom_component_template(component, user_id=user.id)
built_frontend_node, _ = build_custom_component_template(component, user_id=user.id)
built_frontend_node = update_frontend_node_with_template_values(built_frontend_node, raw_code.frontend_node)
return built_frontend_node
@ -377,18 +267,46 @@ async def reload_custom_component(path: str, user: User = Depends(get_current_ac
raise ValueError(content)
extractor = CustomComponent(code=content)
return build_custom_component_template(extractor, user_id=user.id)
frontend_node, _ = build_custom_component_template(extractor, user_id=user.id)
return frontend_node
except Exception as exc:
raise HTTPException(status_code=400, detail=str(exc))
@router.post("/custom_component/update", status_code=HTTPStatus.OK)
async def custom_component_update(
raw_code: CustomComponentCode,
code_request: UpdateCustomComponentRequest,
user: User = Depends(get_current_active_user),
):
component = CustomComponent(code=raw_code.code)
"""
Update a custom component with the provided code request.
component_node = build_custom_component_template(component, user_id=user.id, update_field=raw_code.field)
# Update the field
return component_node
This endpoint generates the CustomComponentFrontendNode normally but then runs the `update_build_config` method
on the latest version of the template. This ensures that every time it runs, it has the latest version of the template.
Args:
code_request (CustomComponentRequest): The code request containing the updated code for the custom component.
user (User, optional): The user making the request. Defaults to the current active user.
Returns:
dict: The updated custom component node.
"""
try:
component = CustomComponent(code=code_request.code)
component_node, cc_instance = build_custom_component_template(
component,
user_id=user.id,
)
updated_build_config = cc_instance.update_build_config(
build_config=code_request.get_template(),
field_value=code_request.field_value,
field_name=code_request.field,
)
component_node["template"] = updated_build_config
return component_node
except Exception as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc

View file

@ -0,0 +1,114 @@
import hashlib
from http import HTTPStatus
from io import BytesIO
from fastapi import APIRouter, Depends, HTTPException, UploadFile
from fastapi.responses import StreamingResponse
from langflow.api.v1.schemas import UploadFileResponse
from langflow.services.auth.utils import get_current_active_user
from langflow.services.database.models.flow import Flow
from langflow.services.deps import get_session, get_storage_service
from langflow.services.storage.service import StorageService
from langflow.services.storage.utils import build_content_type_from_extension
router = APIRouter(tags=["Files"], prefix="/files")
# Create dep that gets the flow_id from the request
# then finds it in the database and returns it while
# using the current user as the owner
def get_flow_id(
flow_id: str,
current_user=Depends(get_current_active_user),
session=Depends(get_session),
):
# AttributeError: 'SelectOfScalar' object has no attribute 'first'
flow = session.get(Flow, flow_id)
if not flow:
raise HTTPException(status_code=404, detail="Flow not found")
if flow.user_id != current_user.id:
raise HTTPException(status_code=403, detail="You don't have access to this flow")
return flow_id
@router.post("/upload/{flow_id}", status_code=HTTPStatus.CREATED)
async def upload_file(
file: UploadFile,
flow_id: str = Depends(get_flow_id),
storage_service: StorageService = Depends(get_storage_service),
):
try:
file_content = await file.read()
file_name = file.filename or hashlib.sha256(file_content).hexdigest()
folder = flow_id
await storage_service.save_file(flow_id=folder, file_name=file_name, data=file_content)
return UploadFileResponse(flowId=flow_id, file_path=f"{folder}/{file_name}")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/download/{flow_id}/{file_name}")
async def download_file(file_name: str, flow_id: str, storage_service: StorageService = Depends(get_storage_service)):
try:
extension = file_name.split(".")[-1]
if not extension:
raise HTTPException(status_code=500, detail=f"Extension not found for file {file_name}")
content_type = build_content_type_from_extension(extension)
if not content_type:
raise HTTPException(status_code=500, detail=f"Content type not found for extension {extension}")
file_content = await storage_service.get_file(flow_id=flow_id, file_name=file_name)
headers = {
"Content-Disposition": f"attachment; filename={file_name} filename*=UTF-8''{file_name}",
"Content-Type": "application/octet-stream",
"Content-Length": str(len(file_content)),
}
return StreamingResponse(BytesIO(file_content), media_type=content_type, headers=headers)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/images/{flow_id}/{file_name}")
async def download_image(file_name: str, flow_id: str, storage_service: StorageService = Depends(get_storage_service)):
try:
extension = file_name.split(".")[-1]
if not extension:
raise HTTPException(status_code=500, detail=f"Extension not found for file {file_name}")
content_type = build_content_type_from_extension(extension)
if not content_type:
raise HTTPException(status_code=500, detail=f"Content type not found for extension {extension}")
elif not content_type.startswith("image"):
raise HTTPException(status_code=500, detail=f"Content type {content_type} is not an image")
file_content = await storage_service.get_file(flow_id=flow_id, file_name=file_name)
return StreamingResponse(BytesIO(file_content), media_type=content_type)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/list/{flow_id}")
async def list_files(
flow_id: str = Depends(get_flow_id), storage_service: StorageService = Depends(get_storage_service)
):
try:
files = await storage_service.list_files(flow_id=flow_id)
return {"files": files}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/delete/{flow_id}/{file_name}")
async def delete_file(
file_name: str, flow_id: str = Depends(get_flow_id), storage_service: StorageService = Depends(get_storage_service)
):
try:
await storage_service.delete_file(flow_id=flow_id, file_name=file_name)
return {"message": f"File {file_name} deleted successfully"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

View file

@ -5,14 +5,22 @@ from uuid import UUID
import orjson
from fastapi import APIRouter, Depends, File, HTTPException, UploadFile
from fastapi.encoders import jsonable_encoder
from loguru import logger
from sqlmodel import Session, select
from langflow.api.utils import remove_api_keys, validate_is_component
from langflow.api.v1.schemas import FlowListCreate, FlowListRead
from langflow.initial_setup.setup import STARTER_FOLDER_NAME
from langflow.services.auth.utils import get_current_active_user
from langflow.services.database.models.flow import Flow, FlowCreate, FlowRead, FlowUpdate
from langflow.services.database.models.flow import (
Flow,
FlowCreate,
FlowRead,
FlowUpdate,
)
from langflow.services.database.models.user.model import User
from langflow.services.deps import get_session, get_settings_service
from langflow.services.settings.service import SettingsService
# build router
router = APIRouter(prefix="/flows", tags=["Flows"])
@ -42,11 +50,36 @@ def create_flow(
def read_flows(
*,
current_user: User = Depends(get_current_active_user),
session: Session = Depends(get_session),
settings_service: "SettingsService" = Depends(get_settings_service),
):
"""Read all flows."""
try:
flows = current_user.flows
flows = validate_is_component(flows)
auth_settings = settings_service.auth_settings
if auth_settings.AUTO_LOGIN:
flows = session.exec(
select(Flow).where(
(Flow.user_id == None) | (Flow.user_id == current_user.id) # noqa
)
).all()
else:
flows = current_user.flows
flows = validate_is_component(flows) # type: ignore
flow_ids = [flow.id for flow in flows]
# with the session get the flows that DO NOT have a user_id
try:
example_flows = session.exec(
select(Flow).where(
Flow.user_id == None, # noqa
Flow.folder == STARTER_FOLDER_NAME,
)
).all()
for example_flow in example_flows:
if example_flow.id not in flow_ids:
flows.append(example_flow) # type: ignore
except Exception as e:
logger.error(e)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e)) from e
return [jsonable_encoder(flow) for flow in flows]
@ -58,9 +91,18 @@ def read_flow(
session: Session = Depends(get_session),
flow_id: UUID,
current_user: User = Depends(get_current_active_user),
settings_service: "SettingsService" = Depends(get_settings_service),
):
"""Read a flow."""
if user_flow := (session.exec(select(Flow).where(Flow.id == flow_id, Flow.user_id == current_user.id)).first()):
auth_settings = settings_service.auth_settings
stmt = select(Flow).where(Flow.id == flow_id)
if auth_settings.AUTO_LOGIN:
# If auto login is enable user_id can be current_user.id or None
# so write an OR
stmt = stmt.where(
(Flow.user_id == current_user.id) | (Flow.user_id == None) # noqa
) # noqa
if user_flow := session.exec(stmt).first():
return user_flow
else:
raise HTTPException(status_code=404, detail="Flow not found")
@ -77,7 +119,12 @@ def update_flow(
):
"""Update a flow."""
db_flow = read_flow(session=session, flow_id=flow_id, current_user=current_user)
db_flow = read_flow(
session=session,
flow_id=flow_id,
current_user=current_user,
settings_service=settings_service,
)
if not db_flow:
raise HTTPException(status_code=404, detail="Flow not found")
flow_data = flow.model_dump(exclude_unset=True)
@ -99,9 +146,15 @@ def delete_flow(
session: Session = Depends(get_session),
flow_id: UUID,
current_user: User = Depends(get_current_active_user),
settings_service=Depends(get_settings_service),
):
"""Delete a flow."""
flow = read_flow(session=session, flow_id=flow_id, current_user=current_user)
flow = read_flow(
session=session,
flow_id=flow_id,
current_user=current_user,
settings_service=settings_service,
)
if not flow:
raise HTTPException(status_code=404, detail="Flow not found")
session.delete(flow)
@ -109,9 +162,6 @@ def delete_flow(
return {"message": "Flow deleted successfully"}
# Define a new model to handle multiple flows
@router.post("/batch/", response_model=List[FlowRead], status_code=201)
def create_flows(
*,
@ -157,8 +207,9 @@ async def upload_file(
async def download_file(
*,
session: Session = Depends(get_session),
settings_service: "SettingsService" = Depends(get_settings_service),
current_user: User = Depends(get_current_active_user),
):
"""Download all flows as a file."""
flows = read_flows(current_user=current_user)
flows = read_flows(current_user=current_user, session=session, settings_service=settings_service)
return FlowListRead(flows=flows)

View file

@ -20,7 +20,9 @@ async def login_to_get_access_token(
form_data: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(get_session),
# _: Session = Depends(get_current_active_user)
settings_service=Depends(get_settings_service),
):
auth_settings = settings_service.auth_settings
try:
user = authenticate_user(form_data.username, form_data.password, db)
except Exception as exc:
@ -33,8 +35,22 @@ async def login_to_get_access_token(
if user:
tokens = create_user_tokens(user_id=user.id, db=db, update_last_login=True)
response.set_cookie("refresh_token_lf", tokens["refresh_token"], httponly=True)
response.set_cookie("access_token_lf", tokens["access_token"], httponly=False)
response.set_cookie(
"refresh_token_lf",
tokens["refresh_token"],
httponly=auth_settings.REFRESH_HTTPONLY,
samesite=auth_settings.REFRESH_SAME_SITE,
secure=auth_settings.REFRESH_SECURE,
expires=auth_settings.REFRESH_TOKEN_EXPIRE_MINUTES * 60,
)
response.set_cookie(
"access_token_lf",
tokens["access_token"],
httponly=auth_settings.ACCESS_HTTPONLY,
samesite=auth_settings.ACCESS_SAME_SITE,
secure=auth_settings.ACCESS_SECURE,
expires=auth_settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60,
)
return tokens
else:
raise HTTPException(
@ -46,11 +62,21 @@ async def login_to_get_access_token(
@router.get("/auto_login")
async def auto_login(
response: Response, db: Session = Depends(get_session), settings_service=Depends(get_settings_service)
response: Response,
db: Session = Depends(get_session),
settings_service=Depends(get_settings_service),
):
auth_settings = settings_service.auth_settings
if settings_service.auth_settings.AUTO_LOGIN:
tokens = create_user_longterm_token(db)
response.set_cookie("access_token_lf", tokens["access_token"], httponly=False)
response.set_cookie(
"access_token_lf",
tokens["access_token"],
httponly=auth_settings.ACCESS_HTTPONLY,
samesite=auth_settings.ACCESS_SAME_SITE,
secure=auth_settings.ACCESS_SECURE,
expires=auth_settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60,
)
return tokens
raise HTTPException(
@ -63,12 +89,29 @@ async def auto_login(
@router.post("/refresh")
async def refresh_token(request: Request, response: Response):
async def refresh_token(request: Request, response: Response, settings_service=Depends(get_settings_service)):
auth_settings = settings_service.auth_settings
token = request.cookies.get("refresh_token_lf")
if token:
tokens = create_refresh_token(token)
response.set_cookie("refresh_token_lf", tokens["refresh_token"], httponly=True)
response.set_cookie("access_token_lf", tokens["access_token"], httponly=False)
response.set_cookie(
"refresh_token_lf",
tokens["refresh_token"],
httponly=auth_settings.REFRESH_HTTPONLY,
samesite=auth_settings.REFRESH_SAME_SITE,
secure=auth_settings.REFRESH_SECURE,
expires=auth_settings.REFRESH_TOKEN_EXPIRE_MINUTES * 60,
)
response.set_cookie(
"access_token_lf",
tokens["access_token"],
httponly=auth_settings.ACCESS_HTTPONLY,
samesite=auth_settings.ACCESS_SAME_SITE,
secure=auth_settings.ACCESS_SECURE,
expires=auth_settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60,
)
return tokens
else:
raise HTTPException(

View file

@ -0,0 +1,71 @@
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from langflow.services.deps import get_monitor_service
from langflow.services.monitor.schema import VertexBuildMapModel
from langflow.services.monitor.service import MonitorService
router = APIRouter(prefix="/monitor", tags=["Monitor"])
# Get vertex_builds data from the monitor service
@router.get("/builds", response_model=VertexBuildMapModel)
async def get_vertex_builds(
flow_id: Optional[str] = Query(None),
vertex_id: Optional[str] = Query(None),
valid: Optional[bool] = Query(None),
order_by: Optional[str] = Query("timestamp"),
monitor_service: MonitorService = Depends(get_monitor_service),
):
try:
vertex_build_dicts = monitor_service.get_vertex_builds(
flow_id=flow_id, vertex_id=vertex_id, valid=valid, order_by=order_by
)
vertex_build_map = VertexBuildMapModel.from_list_of_dicts(vertex_build_dicts)
return vertex_build_map
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/builds", status_code=204)
async def delete_vertex_builds(
flow_id: Optional[str] = Query(None),
monitor_service: MonitorService = Depends(get_monitor_service),
):
try:
monitor_service.delete_vertex_builds(flow_id=flow_id)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/messages")
async def get_messages(
session_id: Optional[str] = Query(None),
sender: Optional[str] = Query(None),
sender_name: Optional[str] = Query(None),
order_by: Optional[str] = Query("timestamp"),
monitor_service: MonitorService = Depends(get_monitor_service),
):
try:
return monitor_service.get_messages(
sender=sender,
sender_name=sender_name,
session_id=session_id,
order_by=order_by,
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/transactions")
async def get_transactions(
source: Optional[str] = Query(None),
target: Optional[str] = Query(None),
status: Optional[str] = Query(None),
order_by: Optional[str] = Query("timestamp"),
monitor_service: MonitorService = Depends(get_monitor_service),
):
try:
return monitor_service.get_transactions(source=source, target=target, status=status, order_by=order_by)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

View file

@ -1,13 +1,24 @@
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from uuid import UUID
from pydantic import (
BaseModel,
ConfigDict,
Field,
RootModel,
field_validator,
model_serializer,
)
from langflow.graph.schema import RunOutputs
from langflow.schema import dotdict
from langflow.services.database.models.api_key.model import ApiKeyRead
from langflow.services.database.models.base import orjson_dumps
from langflow.services.database.models.flow import FlowCreate, FlowRead
from langflow.services.database.models.user import UserRead
from pydantic import BaseModel, Field, field_validator
class BuildStatus(Enum):
@ -19,26 +30,6 @@ class BuildStatus(Enum):
IN_PROGRESS = "in_progress"
class GraphData(BaseModel):
"""Data inside the exported flow."""
nodes: List[Dict[str, Any]]
edges: List[Dict[str, Any]]
class ExportedFlow(BaseModel):
"""Exported flow from Langflow."""
description: str
name: str
id: str
data: GraphData
class InputRequest(BaseModel):
input: dict
class TweaksRequest(BaseModel):
tweaks: Optional[Dict[str, Dict[str, str]]] = Field(default_factory=dict)
@ -64,6 +55,26 @@ class ProcessResponse(BaseModel):
backend: Optional[str] = None
class RunResponse(BaseModel):
"""Run response schema."""
outputs: Optional[List[RunOutputs]] = []
session_id: Optional[str] = None
@model_serializer(mode="wrap")
def serialize(self, handler):
# Serialize all the outputs if they are base models
if self.outputs:
serialized_outputs = []
for output in self.outputs:
if isinstance(output, BaseModel):
serialized_outputs.append(output.model_dump(exclude_none=True))
else:
serialized_outputs.append(output)
self.outputs = serialized_outputs
return handler(self)
class PreloadResponse(BaseModel):
"""Preload response schema."""
@ -71,9 +82,6 @@ class PreloadResponse(BaseModel):
is_clear: Optional[bool] = None
# TaskStatusResponse(
# status=task.status, result=task.result if task.ready() else None
# )
class TaskStatusResponse(BaseModel):
"""Task status response schema."""
@ -162,12 +170,21 @@ class StreamData(BaseModel):
return f"event: {self.event}\ndata: {orjson_dumps(self.data, indent_2=False)}\n\n"
class CustomComponentCode(BaseModel):
class CustomComponentRequest(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
code: str
field: Optional[str] = None
frontend_node: Optional[dict] = None
class UpdateCustomComponentRequest(CustomComponentRequest):
field: str
field_value: Optional[Union[str, int, float, bool, dict, list]] = None
template: dict
def get_template(self):
return dotdict(self.template)
class CustomComponentResponseError(BaseModel):
detail: str
traceback: str
@ -212,3 +229,81 @@ class Token(BaseModel):
class ApiKeyCreateRequest(BaseModel):
api_key: str
class VerticesOrderResponse(BaseModel):
ids: List[str]
run_id: UUID
class ResultDataResponse(BaseModel):
results: Optional[Any] = Field(default_factory=dict)
artifacts: Optional[Any] = Field(default_factory=dict)
timedelta: Optional[float] = None
duration: Optional[str] = None
class VertexBuildResponse(BaseModel):
id: Optional[str] = None
inactivated_vertices: Optional[List[str]] = None
next_vertices_ids: Optional[List[str]] = None
valid: bool
params: Optional[Any] = Field(default_factory=dict)
"""JSON string of the params."""
data: ResultDataResponse
"""Mapping of vertex ids to result dict containing the param name and result value."""
timestamp: Optional[datetime] = Field(default_factory=datetime.utcnow)
"""Timestamp of the build."""
class VerticesBuiltResponse(BaseModel):
vertices: List[VertexBuildResponse]
class InputValueRequest(BaseModel):
components: Optional[List[str]] = []
input_value: Optional[str] = None
# add an example
model_config = {
"json_schema_extra": {
"examples": [
{
"components": ["components_id", "Component Name"],
"input_value": "input_value",
},
{"components": ["Component Name"], "input_value": "input_value"},
{"input_value": "input_value"},
]
}
}
class Tweaks(RootModel):
root: dict[str, Union[str, dict[str, str]]] = Field(
description="A dictionary of tweaks to adjust the flow's execution. Allows customizing flow behavior dynamically. All tweaks are overridden by the input values.",
)
model_config = {
"json_schema_extra": {
"examples": [
{
"parameter_name": "value",
"Component Name": {"parameter_name": "value"},
"component_id": {"parameter_name": "value"},
}
]
}
}
# This should behave like a dict
def __getitem__(self, key):
return self.root[key]
def __setitem__(self, key, value):
self.root[key] = value
def __delitem__(self, key):
del self.root[key]
def items(self):
return self.root.items()

View file

@ -1,14 +1,22 @@
from collections import defaultdict
from fastapi import APIRouter, HTTPException
from loguru import logger
from langflow.api.v1.base import (
Code,
CodeValidationResponse,
PromptValidationResponse,
ValidatePromptRequest,
)
from langflow.base.prompts.utils import (
add_new_variables_to_template,
get_old_custom_fields,
remove_old_variables_from_template,
update_input_variables_field,
validate_prompt,
)
from langflow.template.field.base import TemplateField
from langflow.utils.validate import validate_code
from loguru import logger
# build router
router = APIRouter(prefix="/validate", tags=["Validate"])
@ -36,13 +44,26 @@ def post_validate_prompt(prompt_request: ValidatePromptRequest):
input_variables=input_variables,
frontend_node=None,
)
old_custom_fields = get_old_custom_fields(prompt_request)
if not prompt_request.custom_fields:
prompt_request.custom_fields = defaultdict(list)
old_custom_fields = get_old_custom_fields(prompt_request.custom_fields, prompt_request.name)
add_new_variables_to_template(input_variables, prompt_request)
add_new_variables_to_template(
input_variables,
prompt_request.custom_fields,
prompt_request.frontend_node.template,
prompt_request.name,
)
remove_old_variables_from_template(old_custom_fields, input_variables, prompt_request)
remove_old_variables_from_template(
old_custom_fields,
input_variables,
prompt_request.custom_fields,
prompt_request.frontend_node.template,
prompt_request.name,
)
update_input_variables_field(input_variables, prompt_request)
update_input_variables_field(input_variables, prompt_request.frontend_node.template)
return PromptValidationResponse(
input_variables=input_variables,
@ -51,70 +72,3 @@ def post_validate_prompt(prompt_request: ValidatePromptRequest):
except Exception as e:
logger.exception(e)
raise HTTPException(status_code=500, detail=str(e)) from e
def get_old_custom_fields(prompt_request):
try:
if len(prompt_request.frontend_node.custom_fields) == 1 and prompt_request.name == "":
# If there is only one custom field and the name is empty string
# then we are dealing with the first prompt request after the node was created
prompt_request.name = list(prompt_request.frontend_node.custom_fields.keys())[0]
old_custom_fields = prompt_request.frontend_node.custom_fields[prompt_request.name]
if old_custom_fields is None:
old_custom_fields = []
old_custom_fields = old_custom_fields.copy()
except KeyError:
old_custom_fields = []
prompt_request.frontend_node.custom_fields[prompt_request.name] = []
return old_custom_fields
def add_new_variables_to_template(input_variables, prompt_request):
for variable in input_variables:
try:
template_field = TemplateField(
name=variable,
display_name=variable,
field_type="str",
show=True,
advanced=False,
multiline=True,
input_types=["Document", "BaseOutputParser"],
value="", # Set the value to empty string
)
if variable in prompt_request.frontend_node.template:
# Set the new field with the old value
template_field.value = prompt_request.frontend_node.template[variable]["value"]
prompt_request.frontend_node.template[variable] = template_field.to_dict()
# Check if variable is not already in the list before appending
if variable not in prompt_request.frontend_node.custom_fields[prompt_request.name]:
prompt_request.frontend_node.custom_fields[prompt_request.name].append(variable)
except Exception as exc:
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
def remove_old_variables_from_template(old_custom_fields, input_variables, prompt_request):
for variable in old_custom_fields:
if variable not in input_variables:
try:
# Remove the variable from custom_fields associated with the given name
if variable in prompt_request.frontend_node.custom_fields[prompt_request.name]:
prompt_request.frontend_node.custom_fields[prompt_request.name].remove(variable)
# Remove the variable from the template
prompt_request.frontend_node.template.pop(variable, None)
except Exception as exc:
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
def update_input_variables_field(input_variables, prompt_request):
if "input_variables" in prompt_request.frontend_node.template:
prompt_request.frontend_node.template["input_variables"]["value"] = input_variables

View file

Some files were not shown because too many files have changed in this diff Show more