Merge cz/mergeAll into shortcuts_settings

This commit is contained in:
igorrCarvalho 2024-06-07 17:18:40 -03:00
commit e531e36ad1
256 changed files with 19933 additions and 2731 deletions

View file

@ -80,7 +80,10 @@ jobs:
langflowai/langflow-frontend:1.0-alpha
restart-space:
name: Restart HuggingFace Spaces
if: ${{ inputs.release_type == 'main' }}
runs-on: ubuntu-latest
needs: docker_build
strategy:
matrix:
python-version:
@ -100,6 +103,4 @@ jobs:
- name: Restart HuggingFace Spaces Build
run: |
poetry run python ./scripts/factory_restart_space.py
env:
HUGGINGFACE_API_TOKEN: ${{ secrets.HUGGINGFACE_API_TOKEN }}
poetry run python ./scripts/factory_restart_space.py --space "Langflow/Langflow-Preview" --token ${{ secrets.HUGGINGFACE_API_TOKEN }}

View file

@ -35,6 +35,10 @@ jobs:
with:
python-version: "3.10"
cache: "poetry"
- name: Set up Nodejs 20
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Check Version
id: check-version
run: |

View file

@ -168,6 +168,7 @@ build_and_install:
build_frontend:
cd src/frontend && CI='' npm run build
rm -rf src/backend/base/langflow/frontend
cp -r src/frontend/build src/backend/base/langflow/frontend
build:

171
README.PT.md Normal file
View file

@ -0,0 +1,171 @@
<!-- markdownlint-disable MD030 -->
# [![Langflow](./docs/static/img/hero.png)](https://www.langflow.org)
<p align="center"><strong>
Um framework visual para criar apps de agentes autônomos e RAG
</strong></p>
<p align="center" style="font-size: 12px;">
Open-source, construído em Python, totalmente personalizável, agnóstico em relação a modelos e databases
</p>
<p align="center" style="font-size: 12px;">
<a href="https://docs.langflow.org" style="text-decoration: underline;">Docs</a> -
<a href="https://discord.com/invite/EqksyE2EX9" style="text-decoration: underline;">Junte-se ao nosso Discord</a> -
<a href="https://twitter.com/langflow_ai" style="text-decoration: underline;">Siga-nos no X</a> -
<a href="https://huggingface.co/spaces/Langflow/Langflow-Preview" style="text-decoration: underline;">Demonstração</a>
</p>
<p align="center">
<a href="https://github.com/langflow-ai/langflow">
<img src="https://img.shields.io/github/stars/langflow-ai/langflow">
</a>
<a href="https://discord.com/invite/EqksyE2EX9">
<img src="https://img.shields.io/discord/1116803230643527710?label=Discord">
</a>
</p>
<div align="center">
<a href="./README.md"><img alt="README em Inglês" src="https://img.shields.io/badge/English-d9d9d9"></a>
<a href="./README.zh_CN.md"><img alt="README em Chinês Simplificado" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
</div>
<p align="center">
<img src="./docs/static/img/langflow_basic_howto.gif" alt="Seu GIF" style="border: 3px solid #211C43;">
</p>
# 📝 Conteúdo
- [📝 Conteúdo](#-conteúdo)
- [📦 Introdução](#-introdução)
- [🎨 Criar Fluxos](#-criar-fluxos)
- [Deploy](#deploy)
- [Deploy usando Google Cloud Platform](#deploy-usando-google-cloud-platform)
- [Deploy on Railway](#deploy-on-railway)
- [Deploy on Render](#deploy-on-render)
- [🖥️ Interface de Linha de Comando (CLI)](#-interface-de-linha-de-comando-cli)
- [Uso](#uso)
- [Variáveis de Ambiente](#variáveis-de-ambiente)
- [👋 Contribuir](#-contribuir)
- [🌟 Contribuidores](#-contribuidores)
- [📄 Licença](#-licença)
# 📦 Introdução
Você pode instalar o Langflow com pip:
```shell
# Certifique-se de ter >=Python 3.10 instalado no seu sistema.
# Instale a versão pré-lançamento (recomendada para as atualizações mais recentes)
python -m pip install langflow --pre --force-reinstall
# ou versão estável
python -m pip install langflow -U
```
Então, execute o Langflow com:
```shell
python -m langflow run
```
Você também pode visualizar o Langflow no [HuggingFace Spaces](https://huggingface.co/spaces/Langflow/Langflow-Preview). [Clone o Space usando este link](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true) para criar seu próprio workspace do Langflow em minutos.
# 🎨 Criar Fluxos
Criar fluxos com Langflow é fácil. Basta arrastar componentes da barra lateral para o canvas e conectá-los para começar a construir sua aplicação.
Explore editando os parâmetros do prompt, agrupando componentes e construindo seus próprios componentes personalizados (Custom Components).
Quando terminar, você pode exportar seu fluxo como um arquivo JSON.
Carregue o fluxo com:
```python
from langflow.load import run_flow_from_json
results = run_flow_from_json("path/to/flow.json", input_value="Hello, World!")
```
# Deploy
## Deploy usando Google Cloud Platform
Siga nosso passo a passo para fazer deploy do Langflow no Google Cloud Platform (GCP) usando o Google Cloud Shell. O guia está disponível no documento [**Langflow on Google Cloud Platform**](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/deployment/gcp-deployment.md).
Alternativamente, clique no botão **"Open in Cloud Shell"** abaixo para iniciar o Google Cloud Shell, clonar o repositório do Langflow e começar um **tutorial interativo** que o guiará pelo processo de configuração dos recursos necessários e deploy do Langflow no seu projeto GCP.
[![Open on Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## Deploy on Railway
Use este template para implantar o Langflow 1.0 Preview no Railway:
[![Deploy 1.0 Preview on Railway](https://railway.app/button.svg)](https://railway.app/template/UsJ1uB?referralCode=MnPSdg)
Ou este para implantar o Langflow 0.6.x:
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
## Deploy on Render
<a href="https://render.com/deploy?repo=https://github.com/langflow-ai/langflow/tree/dev">
<img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
</a>
# 🖥️ Interface de Linha de Comando (CLI)
O Langflow fornece uma interface de linha de comando (CLI) para fácil gerenciamento e configuração.
## Uso
Você pode executar o Langflow usando o seguinte comando:
```shell
langflow run [OPTIONS]
```
Cada opção é detalhada abaixo:
- `--help`: Exibe todas as opções disponíveis.
- `--host`: Define o host para vincular o servidor. Pode ser configurado usando a variável de ambiente `LANGFLOW_HOST`. O padrão é `127.0.0.1`.
- `--workers`: Define o número de processos. Pode ser configurado usando a variável de ambiente `LANGFLOW_WORKERS`. O padrão é `1`.
- `--timeout`: Define o tempo limite do worker em segundos. O padrão é `60`.
- `--port`: Define a porta para escutar. Pode ser configurado usando a variável de ambiente `LANGFLOW_PORT`. O padrão é `7860`.
- `--env-file`: Especifica o caminho para o arquivo .env contendo variáveis de ambiente. O padrão é `.env`.
- `--log-level`: Define o nível de log. Pode ser configurado usando a variável de ambiente `LANGFLOW_LOG_LEVEL`. O padrão é `critical`.
- `--components-path`: Especifica o caminho para o diretório contendo componentes personalizados. Pode ser configurado usando a variável de ambiente `LANGFLOW_COMPONENTS_PATH`. O padrão é `langflow/components`.
- `--log-file`: Especifica o caminho para o arquivo de log. Pode ser configurado usando a variável de ambiente `LANGFLOW_LOG_FILE`. O padrão é `logs/langflow.log`.
- `--cache`: Seleciona o tipo de cache a ser usado. As opções são `InMemoryCache` e `SQLiteCache`. Pode ser configurado usando a variável de ambiente `LANGFLOW_LANGCHAIN_CACHE`. O padrão é `SQLiteCache`.
- `--dev/--no-dev`: Alterna o modo de desenvolvimento. O padrão é `no-dev`.
- `--path`: Especifica o caminho para o diretório frontend contendo os arquivos de build. Esta opção é apenas para fins de desenvolvimento. Pode ser configurado usando a variável de ambiente `LANGFLOW_FRONTEND_PATH`.
- `--open-browser/--no-open-browser`: Alterna a opção de abrir o navegador após iniciar o servidor. Pode ser configurado usando a variável de ambiente `LANGFLOW_OPEN_BROWSER`. O padrão é `open-browser`.
- `--remove-api-keys/--no-remove-api-keys`: Alterna a opção de remover as chaves de API dos projetos salvos no banco de dados. Pode ser configurado usando a variável de ambiente `LANGFLOW_REMOVE_API_KEYS`. O padrão é `no-remove-api-keys`.
- `--install-completion [bash|zsh|fish|powershell|pwsh]`: Instala a conclusão para o shell especificado.
- `--show-completion [bash|zsh|fish|powershell|pwsh]`: Exibe a conclusão para o shell especificado, permitindo que você copie ou personalize a instalação.
- `--backend-only`: Este parâmetro, com valor padrão `False`, permite executar apenas o servidor backend sem o frontend. Também pode ser configurado usando a variável de ambiente `LANGFLOW_BACKEND_ONLY`.
- `--store`: Este parâmetro, com valor padrão `True`, ativa os recursos da loja, use `--no-store` para desativá-los. Pode ser configurado usando a variável de ambiente `LANGFLOW_STORE`.
Esses parâmetros são importantes para usuários que precisam personalizar o comportamento do Langflow, especialmente em cenários de desenvolvimento ou deploy especializado.
### Variáveis de Ambiente
Você pode configurar muitas das opções de CLI usando variáveis de ambiente. Estas podem ser exportadas no seu sistema operacional ou adicionadas a um arquivo `.env` e carregadas usando a opção `--env-file`.
Um arquivo de exemplo `.env` chamado `.env.example` está incluído no projeto. Copie este arquivo para um novo arquivo chamado `.env` e substitua os valores de exemplo pelas suas configurações reais. Se você estiver definindo valores tanto no seu sistema operacional quanto no arquivo `.env`, as configurações do `.env` terão precedência.
# 👋 Contribuir
Aceitamos contribuições de desenvolvedores de todos os níveis para nosso projeto open-source no GitHub. Se você deseja contribuir, por favor, confira nossas [diretrizes de contribuição](./CONTRIBUTING.md) e ajude a tornar o Langflow mais acessível.
---
[![Star History Chart](https://api.star-history.com/svg?repos=langflow-ai/langflow&type=Timeline)](https://star-history.com/#langflow-ai/langflow&Date)
# 🌟 Contribuidores
[![langflow contributors](https://contrib.rocks/image?repo=langflow-ai/langflow)](https://github.com/langflow-ai/langflow/graphs/contributors)
# 📄 Licença
O Langflow é lançado sob a licença MIT. Veja o arquivo [LICENSE](LICENSE) para detalhes.

View file

@ -27,6 +27,7 @@
<div align="center">
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
<a href="./README.PT.md"><img alt="README in Portuguese" src="https://img.shields.io/badge/Portuguese-d9d9d9"></a>
<a href="./README.zh_CN.md"><img alt="README in Simplified Chinese" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
</div>
@ -36,7 +37,6 @@
# 📝 Content
- [](#)
- [📝 Content](#-content)
- [📦 Get Started](#-get-started)
- [🎨 Create Flows](#-create-flows)

View file

@ -1,6 +1,14 @@
# syntax=docker/dockerfile:1
# Keep this syntax directive! It's used to enable Docker BuildKit
FROM node:20-bookworm-slim as builder-node
WORKDIR /app
COPY src/frontend/package.json src/frontend/package-lock.json ./
RUN npm install
COPY src/frontend/ ./
RUN npm run build
################################
# BUILDER-BASE
# Used to build deps + create our virtual environment
@ -47,12 +55,11 @@ WORKDIR /app
COPY pyproject.toml poetry.lock README.md ./
COPY src/ ./src
COPY scripts/ ./scripts
RUN python -m pip install requests --user && cd ./scripts && python update_dependencies.py
COPY --from=builder-node /app/build ./src/backend/base/langflow/frontend
RUN $POETRY_HOME/bin/poetry lock --no-update \
&& $POETRY_HOME/bin/poetry install --no-interaction --no-ansi -E deploy \
&& $POETRY_HOME/bin/poetry build -f wheel \
&& $POETRY_HOME/bin/poetry run pip install dist/*.whl
&& $POETRY_HOME/bin/poetry run pip install dist/*.whl --force-reinstall
################################
# RUNTIME

View file

@ -10,8 +10,7 @@ Langflow provides an API key functionality that allows users to access their ind
The default user and password are set using the LANGFLOW_SUPERUSER and
LANGFLOW_SUPERUSER_PASSWORD environment variables.
The default values are
langflow and langflow, respectively.
The default values are `langflow` and `langflow`, respectively.
</Admonition>

View file

@ -1,62 +1,51 @@
# Command Line Interface (CLI)
## Overview
Langflow's Command Line Interface (CLI) is a powerful tool that allows you to interact with the Langflow server from the command line. The CLI provides a wide range of commands to help you shape Langflow to your needs.
Running the CLI without any arguments will display a list of available commands and options.
The available commands are below. Navigate to their individual sections of this page to see the parameters.
- [langflow](#overview)
- [langflow api-key](#langflow-api-key)
- [langflow copy-db](#langflow-copy-db)
- [langflow migration](#langflow-migration)
- [langflow run](#langflow-run)
- [langflow superuser](#langflow-superuser)
## Overview
Running the CLI without any arguments displays a list of available options and commands.
```bash
python -m langflow run --help
langflow
# or
python -m langflow run
langflow --help
# or
python -m langflow
```
Each option for `run` command are detailed below:
| Command | Description |
| ----------- | ---------------------------------------------------------------------- |
| `api-key` | Creates an API key for the default superuser if AUTO_LOGIN is enabled. |
| `copy-db` | Copy the database files to the current directory (`which langflow`). |
| `migration` | Run or test migrations. |
| `run` | Run the Langflow. |
| `superuser` | Create a superuser. |
- `--help`: Displays all available options.
- `--host`: Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`.
- `--workers`: Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`.
- `--timeout`: Sets the worker timeout in seconds. The default is `60`.
- `--port`: Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`.
- `--env-file`: Specifies the path to the .env file containing environment variables. The default is `.env`.
- `--log-level`: Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`.
- `--components-path`: Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`.
- `--log-file`: Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`.
- `--cache`: Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`.
- `--dev/--no-dev`: Toggles the development mode. The default is `no-dev`.
- `--path`: Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable.
- `--open-browser/--no-open-browser`: Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`.
- `--remove-api-keys/--no-remove-api-keys`: Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`.
- `--install-completion [bash|zsh|fish|powershell|pwsh]`: Installs completion for the specified shell.
- `--show-completion [bash|zsh|fish|powershell|pwsh]`: Shows completion for the specified shell, allowing you to copy it or customize the installation.
- `--backend-only`: This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable.
- `--store`: This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable.
### Options
These parameters are important for users who need to customize the behavior of Langflow, especially in development or specialized deployment scenarios.
| Option | Description |
| ---------------------- | -------------------------------------------------------------------------------- |
| `--install-completion` | Install completion for the current shell. |
| `--show-completion` | Show completion for the current shell, to copy it or customize the installation. |
| `--help` | Show this message and exit. |
### API Key Command
## langflow api-key
The `api-key` command allows you to create an API key for accessing Langflow's API when `LANGFLOW_AUTO_LOGIN` is set to `True`.
```bash
python -m langflow api-key --help
Usage: langflow api-key [OPTIONS]
Creates an API key for the default superuser if AUTO_LOGIN is enabled.
Args: log_level (str, optional): Logging level. Defaults to "error".
Returns: None
╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ --log-level TEXT Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] │
│ --help Show this message and exit. │
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
```
Once you run the `api-key` command, it will create an API key for the default superuser if `LANGFLOW_AUTO_LOGIN` is set to `True`.
Run the `api-key` command to create an API key for the default superuser if `LANGFLOW_AUTO_LOGIN` is set to `True`.
```bash
langflow api-key
# or
python -m langflow api-key
╭─────────────────────────────────────────────────────────────────────╮
│ API Key Created Successfully: │
@ -67,11 +56,98 @@ python -m langflow api-key
│ Make sure to store it in a secure location. │
│ │
│ The API key has been copied to your clipboard. Cmd + V to paste it. │
╰─────────────────────────────────────────────────────────────────────╯
╰──────────────────────────────
```
### Environment Variables
### Options
| Option | Type | Description |
| ----------- | ---- | ------------------------------------------------------------- |
| --log-level | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
| --help | | Show this message and exit. |
## langflow copy-db
Run the `copy-db` command to copy the cached `langflow.db` and `langflow-pre.db` database files to the current directory.
If the files exist in the cache directory, they will be copied to the same directory as `__main__.py`, which can be found with `which langflow`.
### Options
None.
## langflow migration
Run or test migrations with the [Alembic](https://pypi.org/project/alembic/) database tool.
```bash
langflow migration
# or
python -m langflow migration
```
### Options
| Option | Description |
| ------------------- | -------------------------------------------------------------------------------------------------------------------------- |
| `--test, --no-test` | Run migrations in test mode. [default: test] |
| `--fix, --no-fix` | Fix migrations. This is a destructive operation, and should only be used if you know what you are doing. [default: no-fix] |
| `--help` | Show this message and exit. |
## langflow run
Run Langflow.
```bash
langflow run
# or
python -m langflow run
```
### Options
| Option | Description |
| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `--help` | Displays all available options. |
| `--host` | Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`. |
| `--workers` | Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`. |
| `--timeout` | Sets the worker timeout in seconds. The default is `60`. |
| `--port` | Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`. |
| `--env-file` | Specifies the path to the .env file containing environment variables. The default is `.env`. |
| `--log-level` | Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`. |
| `--components-path` | Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`. |
| `--log-file` | Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`. |
| `--cache` | Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`. |
| `--dev`/`--no-dev` | Toggles the development mode. The default is `no-dev`. |
| `--path` | Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable. |
| `--open-browser`/`--no-open-browser` | Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`. |
| `--remove-api-keys`/`--no-remove-api-keys` | Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`. |
| `--install-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Installs completion for the specified shell. |
| `--show-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Shows completion for the specified shell, allowing you to copy it or customize the installation. |
| `--backend-only` | This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable. For more, see [Backend-only](../deployment/backend-only.md). |
| `--store` | This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable. |
#### Environment Variables
You can configure many of the CLI options using environment variables. These can be exported in your operating system or added to a `.env` file and loaded using the `--env-file` option.
A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence.
## langflow superuser
Create a superuser for Langflow.
```bash
langflow superuser
# or
python -m langflow superuser
```
### Options
| Option | Type | Description |
| ------------- | ---- | ------------------------------------------------------------- |
| `--username` | TEXT | Username for the superuser. [default: None] [required] |
| `--password` | TEXT | Password for the superuser. [default: None] [required] |
| `--log-level` | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
| `--help` | | Show this message and exit. |

View file

@ -10,7 +10,7 @@ Langflow [Discord](https://discord.gg/EqksyE2EX9) server.
---
## 🐦 Stay tunned for **Langflow** on Twitter
## 🐦 Stay tuned for **Langflow** on Twitter
Follow [@langflow_ai](https://twitter.com/langflow_ai) on **Twitter** to get the latest news about **Langflow**.

View file

@ -0,0 +1,123 @@
# Backend-only
You can run Langflow in `--backend-only` mode to expose your Langflow app as an API, without running the frontend UI.
Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
Langflow will now serve requests to its API without the frontend running.
## Prerequisites
- [Langflow installed](../getting-started/install-langflow.mdx)
- [OpenAI API key](https://platform.openai.com)
- [A Langflow flow created](../starter-projects/basic-prompting.mdx)
## Download your flow's curl call
1. Click API.
2. Click **curl** > **Copy code** and save the code to your local machine.
It will look something like this:
```curl
curl -X POST \
"http://127.0.0.1:7864/api/v1/run/ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef?stream=false" \
-H 'Content-Type: application/json'\
-d '{"input_value": "message",
"output_type": "chat",
"input_type": "chat",
"tweaks": {
"Prompt-kvo86": {},
"OpenAIModel-MilkD": {},
"ChatOutput-ktwdw": {},
"ChatInput-xXC4F": {}
}}'
```
Note the flow ID of `ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef`. You can find this ID in the UI as well to ensure you're querying the right flow.
## Start Langflow in backend-only mode
1. Stop Langflow with Ctrl+C.
2. Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
Langflow will now serve requests to its API.
3. Run the curl code you copied from the UI.
You should get a result like this:
```bash
{"session_id":"ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880","outputs":[{"inputs":{"input_value":"hi, are you there?"},"outputs":[{"results":{"result":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?"},"artifacts":{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI"},"messages":[{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI","component_id":"ChatOutput-ktwdw"}],"component_display_name":"Chat Output","component_id":"ChatOutput-ktwdw","used_frozen_result":false}]}]}%
```
Again, note that the flow ID matches.
Langflow is receiving your POST request, running the flow, and returning the result, all without running the frontend. Cool!
## Download your flow's Python API call
Instead of using curl, you can download your flow as a Python API call instead.
1. Click API.
2. Click **Python API** > **Copy code** and save the code to your local machine.
The code will look something like this:
```python
import requests
from typing import Optional
BASE_API_URL = "http://127.0.0.1:7864/api/v1/run"
FLOW_ID = "ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef"
# You can tweak the flow by adding a tweaks dictionary
# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}}
def run_flow(message: str,
flow_id: str,
output_type: str = "chat",
input_type: str = "chat",
tweaks: Optional[dict] = None,
api_key: Optional[str] = None) -> dict:
"""
Run a flow with a given message and optional tweaks.
:param message: The message to send to the flow
:param flow_id: The ID of the flow to run
:param tweaks: Optional tweaks to customize the flow
:return: The JSON response from the flow
"""
api_url = f"{BASE_API_URL}/{flow_id}"
payload = {
"input_value": message,
"output_type": output_type,
"input_type": input_type,
}
headers = None
if tweaks:
payload["tweaks"] = tweaks
if api_key:
headers = {"x-api-key": api_key}
response = requests.post(api_url, json=payload, headers=headers)
return response.json()
# Setup any tweaks you want to apply to the flow
message = "message"
print(run_flow(message=message, flow_id=FLOW_ID))
```
3. Run your Python app:
```python
python3 app.py
```
The result is similar to the curl call:
```bash
{'session_id': 'ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880', 'outputs': [{'inputs': {'input_value': 'message'}, 'outputs': [{'results': {'result': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!"}, 'artifacts': {'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI'}, 'messages': [{'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI', 'component_id': 'ChatOutput-ktwdw'}], 'component_display_name': 'Chat Output', 'component_id': 'ChatOutput-ktwdw', 'used_frozen_result': False}]}]}
```
Your Python app POSTs to your Langflow server, and the server runs the flow and returns the result.
See [API](../administration/api.mdx) for more ways to interact with your headless Langflow server.

View file

@ -0,0 +1,65 @@
# Docker
This guide will help you get LangFlow up and running using Docker and Docker Compose.
## Prerequisites
- Docker
- Docker Compose
## Steps
1. Clone the LangFlow repository:
```sh
git clone https://github.com/langflow-ai/langflow.git
```
2. Navigate to the `docker_example` directory:
```sh
cd langflow/docker_example
```
3. Run the Docker Compose file:
```sh
docker compose up
```
LangFlow will now be accessible at [http://localhost:7860/](http://localhost:7860/).
## Docker Compose Configuration
The Docker Compose configuration spins up two services: `langflow` and `postgres`.
### LangFlow Service
The `langflow` service uses the `langflowai/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service.
Environment variables:
- `LANGFLOW_DATABASE_URL`: The connection string for the PostgreSQL database.
- `LANGFLOW_CONFIG_DIR`: The directory where LangFlow stores logs, file storage, monitor data, and secret keys.
Volumes:
- `langflow-data`: This volume is mapped to `/var/lib/langflow` in the container.
### PostgreSQL Service
The `postgres` service uses the `postgres:16` Docker image and exposes port 5432.
Environment variables:
- `POSTGRES_USER`: The username for the PostgreSQL database.
- `POSTGRES_PASSWORD`: The password for the PostgreSQL database.
- `POSTGRES_DB`: The name of the PostgreSQL database.
Volumes:
- `langflow-postgres`: This volume is mapped to `/var/lib/postgresql/data` in the container.
## Switching to a Specific LangFlow Version
If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `langflowai/langflow:latest` to `langflowai/langflow:1.0-alpha`.

View file

@ -9,14 +9,11 @@ The `AddContentToPage` component converts markdown text to Notion blocks and app
[Notion Reference](https://developers.notion.com/reference/patch-block-children)
<Admonition type="tip" title="Component Functionality">
The `AddContentToPage` component enables you to:
- Convert markdown text to Notion blocks.
- Append the converted blocks to a specified Notion page.
- Seamlessly integrate Notion content creation into Langflow workflows.
</Admonition>
## Component Usage
@ -100,8 +97,6 @@ class NotionPageCreator(CustomComponent):
## Example Usage
<Admonition type="info" title="Example Usage">
Example of using the `AddContentToPage` component in a Langflow flow using Markdown as input:
<ZoomableImage
@ -115,8 +110,6 @@ style={{ width: "100%", margin: "20px 0" }}
In this example, the `AddContentToPage` component connects to a `MarkdownLoader` component to provide the markdown text input. The converted Notion blocks are appended to the specified Notion page using the provided `block_id` and `notion_secret`.
</Admonition>
## Best Practices
When using the `AddContentToPage` component:

View file

@ -9,13 +9,11 @@ The `NotionUserList` component retrieves users from Notion. It provides a conven
[Notion Reference](https://developers.notion.com/reference/get-users)
<Admonition type="tip" title="Component Functionality">
The `NotionUserList` component enables you to:
The `NotionUserList` component enables you to:
- Retrieve user data from Notion
- Access user information such as ID, type, name, and avatar URL
- Integrate Notion user data seamlessly into your Langflow workflows
</Admonition>
## Component Usage
@ -95,7 +93,6 @@ class NotionUserList(CustomComponent):
## Example Usage
<Admonition type="info" title="Example Usage">
Here's an example of how you can use the `NotionUserList` component in a Langflow flow and passing the outputs to the Prompt component:
<ZoomableImage
@ -107,8 +104,6 @@ sources={{
style={{ width: "100%", margin: "20px 0" }}
/>
</Admonition>
## Best Practices
When using the `NotionUserList` component, consider the following best practices:

View file

@ -113,7 +113,11 @@ module.exports = {
type: "category",
label: "Deployment",
collapsed: true,
items: ["deployment/gcp-deployment"],
items: [
"deployment/docker",
"deployment/backend-only",
"deployment/gcp-deployment",
],
},
{
type: "category",

View file

@ -1,3 +1,3 @@
<svg width="18" height="18" viewBox="0 0 24 20" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M24 2.36764C23.1181 2.76923 22.1687 3.04081 21.1728 3.16215C22.1898 2.5381 22.9703 1.54857 23.338 0.369812C22.3856 0.947636 21.3334 1.368 20.2092 1.59334C19.3133 0.612492 18.0328 0 16.6156 0C13.8983 0 11.6936 2.26074 11.6936 5.04874C11.6936 5.44456 11.7359 5.82881 11.8204 6.19862C7.72812 5.98771 4.10072 3.97977 1.67071 0.921629C1.24669 1.66992 1.00439 2.5381 1.00439 3.46262C1.00439 5.21343 1.87357 6.75911 3.19493 7.66485C2.38915 7.64029 1.62845 7.41061 0.963547 7.03502V7.09713C0.963547 9.54422 2.66102 11.5854 4.91495 12.0476C4.5022 12.1661 4.06691 12.2253 3.61754 12.2253C3.30058 12.2253 2.99066 12.195 2.69062 12.1358C3.31748 14.1408 5.1347 15.6013 7.29001 15.6403C5.6052 16.9953 3.48089 17.8028 1.17485 17.8028C0.777598 17.8028 0.384575 17.7796 0 17.7334C2.17926 19.1636 4.76844 20 7.54781 20C16.6057 20 21.5573 12.3077 21.5573 5.63524C21.5573 5.41566 21.5531 5.19609 21.5447 4.98084C22.5067 4.26868 23.3422 3.38027 24 2.36764Z" fill="#00AAEC"/>
<svg width="1200" height="1227" viewBox="0 0 1200 1227" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M714.163 519.284L1160.89 0H1055.03L667.137 450.887L357.328 0H0L468.492 681.821L0 1226.37H105.866L515.491 750.218L842.672 1226.37H1200L714.137 519.284H714.163ZM569.165 687.828L521.697 619.934L144.011 79.6944H306.615L611.412 515.685L658.88 583.579L1055.08 1150.3H892.476L569.165 687.854V687.828Z" fill="white"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 430 B

Before After
Before After

311
poetry.lock generated
View file

@ -261,13 +261,13 @@ extras = ["pyaudio (>=0.2.13)"]
[[package]]
name = "astrapy"
version = "1.2.0"
version = "1.2.1"
description = "AstraPy is a Pythonic SDK for DataStax Astra and its Data API"
optional = false
python-versions = "<4.0.0,>=3.8.0"
files = [
{file = "astrapy-1.2.0-py3-none-any.whl", hash = "sha256:5d65242771934c38ebe16f330e9e517968c1437846dabdbe7e48470f7b1782e8"},
{file = "astrapy-1.2.0.tar.gz", hash = "sha256:6ce1b421d1ae21fe73373fa36048d8d56c775367886525504f01c48cbb742842"},
{file = "astrapy-1.2.1-py3-none-any.whl", hash = "sha256:0d7ca1e6f18a6a4e9a41ffaf2aa4cc585d36de3e983b5c5ce0bbb30a1595e30b"},
{file = "astrapy-1.2.1.tar.gz", hash = "sha256:c4ba88ef16ac1e990ccba322d376b6ea256513a3004a0894c14bfa2403f1d646"},
]
[package.dependencies]
@ -471,17 +471,17 @@ files = [
[[package]]
name = "boto3"
version = "1.34.119"
version = "1.34.121"
description = "The AWS SDK for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "boto3-1.34.119-py3-none-any.whl", hash = "sha256:8f9c43c54b3dfaa36c4a0d7b42c417227a515bc7a2e163e62802780000a5a3e2"},
{file = "boto3-1.34.119.tar.gz", hash = "sha256:cea2365a25b2b83a97e77f24ac6f922ef62e20636b42f9f6ee9f97188f9c1c03"},
{file = "boto3-1.34.121-py3-none-any.whl", hash = "sha256:4e79e400d6d44b4eee5deda6ac0ecd08a3f5a30c45a0d30712795cdc4459fd79"},
{file = "boto3-1.34.121.tar.gz", hash = "sha256:ec89f3e0b0dc959c418df29e14d3748c0b05ab7acf7c0b90c839e9f340a659fa"},
]
[package.dependencies]
botocore = ">=1.34.119,<1.35.0"
botocore = ">=1.34.121,<1.35.0"
jmespath = ">=0.7.1,<2.0.0"
s3transfer = ">=0.10.0,<0.11.0"
@ -490,13 +490,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
version = "1.34.119"
version = "1.34.121"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">=3.8"
files = [
{file = "botocore-1.34.119-py3-none-any.whl", hash = "sha256:4bdf7926a1290b2650d62899ceba65073dd2693e61c35f5cdeb3a286a0aaa27b"},
{file = "botocore-1.34.119.tar.gz", hash = "sha256:b253f15b24b87b070e176af48e8ef146516090429d30a7d8b136a4c079b28008"},
{file = "botocore-1.34.121-py3-none-any.whl", hash = "sha256:25b05c7646a9f240cde1c8f839552a43f27e71e15c42600275dea93e219f7dd9"},
{file = "botocore-1.34.121.tar.gz", hash = "sha256:1a8f94b917c47dfd84a0b531ab607dc53570efb0d073d8686600f2d2be985323"},
]
[package.dependencies]
@ -505,7 +505,7 @@ python-dateutil = ">=2.1,<3.0.0"
urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""}
[package.extras]
crt = ["awscrt (==0.20.9)"]
crt = ["awscrt (==0.20.11)"]
[[package]]
name = "brotli"
@ -698,13 +698,13 @@ graph = ["gremlinpython (==3.4.6)"]
[[package]]
name = "cassio"
version = "0.1.7"
version = "0.1.8"
description = "A framework-agnostic Python library to seamlessly integrate Apache Cassandra(R) with ML/LLM/genAI workloads."
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "cassio-0.1.7-py3-none-any.whl", hash = "sha256:08d1028a20d09bd207de0e17eaf7ae821b3c8e4788555e2d337aa440e0846d87"},
{file = "cassio-0.1.7.tar.gz", hash = "sha256:44f705dff8a9a1c48527db2c9e968686358c960fa21ba940d9e66de00639ad78"},
{file = "cassio-0.1.8-py3-none-any.whl", hash = "sha256:c09e7c884ba7227ff5277c86f3b0f31c523672ea407f56d093c7227e69c54d94"},
{file = "cassio-0.1.8.tar.gz", hash = "sha256:4e09929506cb3dd6fad217e89846d0a1a59069afd24b82c72526ef6f2e9271af"},
]
[package.dependencies]
@ -1824,13 +1824,13 @@ develop = ["aiohttp", "furo", "httpx", "mock", "opentelemetry-api", "opentelemet
[[package]]
name = "elasticsearch"
version = "8.13.2"
version = "8.14.0"
description = "Python client for Elasticsearch"
optional = false
python-versions = ">=3.7"
files = [
{file = "elasticsearch-8.13.2-py3-none-any.whl", hash = "sha256:7412ceae9c0e437a72854ab3123aa1f37110d1635cc645366988b8c0fee98598"},
{file = "elasticsearch-8.13.2.tar.gz", hash = "sha256:d51c93431a459b2b7c6c919b6e92a2adc8ac712758de9aeeb16cd4997fc148ad"},
{file = "elasticsearch-8.14.0-py3-none-any.whl", hash = "sha256:cef8ef70a81af027f3da74a4f7d9296b390c636903088439087b8262a468c130"},
{file = "elasticsearch-8.14.0.tar.gz", hash = "sha256:aa2490029dd96f4015b333c1827aa21fd6c0a4d223b00dfb0fe933b8d09a511b"},
]
[package.dependencies]
@ -2897,13 +2897,13 @@ pydantic = ">=1.10,<3"
[[package]]
name = "gprof2dot"
version = "2024.6.5"
version = "2024.6.6"
description = "Generate a dot graph from the output of several profilers."
optional = false
python-versions = ">=2.7"
python-versions = ">=3.8"
files = [
{file = "gprof2dot-2024.6.5-py2.py3-none-any.whl", hash = "sha256:0be69ac4f5e0d6f57e0c627fa8f6053bdca6a7a226ea6fd8a74b69c845c7d2df"},
{file = "gprof2dot-2024.6.5.tar.gz", hash = "sha256:7564e4483f710d463bca1f27668aa595faaf0beee8ad0461df063a44305122a0"},
{file = "gprof2dot-2024.6.6-py2.py3-none-any.whl", hash = "sha256:45b14ad7ce64e299c8f526881007b9eb2c6b75505d5613e96e66ee4d5ab33696"},
{file = "gprof2dot-2024.6.6.tar.gz", hash = "sha256:fa1420c60025a9eb7734f65225b4da02a10fc6dd741b37fa129bc6b41951e5ab"},
]
[[package]]
@ -3446,100 +3446,105 @@ files = [
[[package]]
name = "ijson"
version = "3.2.3"
version = "3.3.0"
description = "Iterative JSON parser with standard Python iterator interfaces"
optional = false
python-versions = "*"
files = [
{file = "ijson-3.2.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0a4ae076bf97b0430e4e16c9cb635a6b773904aec45ed8dcbc9b17211b8569ba"},
{file = "ijson-3.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cfced0a6ec85916eb8c8e22415b7267ae118eaff2a860c42d2cc1261711d0d31"},
{file = "ijson-3.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b9d1141cfd1e6d6643aa0b4876730d0d28371815ce846d2e4e84a2d4f471cf3"},
{file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0a27db6454edd6013d40a956d008361aac5bff375a9c04ab11fc8c214250b5"},
{file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0d526ccb335c3c13063c273637d8611f32970603dfb182177b232d01f14c23"},
{file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:545a30b3659df2a3481593d30d60491d1594bc8005f99600e1bba647bb44cbb5"},
{file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9680e37a10fedb3eab24a4a7e749d8a73f26f1a4c901430e7aa81b5da15f7307"},
{file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2a80c0bb1053055d1599e44dc1396f713e8b3407000e6390add72d49633ff3bb"},
{file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f05ed49f434ce396ddcf99e9fd98245328e99f991283850c309f5e3182211a79"},
{file = "ijson-3.2.3-cp310-cp310-win32.whl", hash = "sha256:b4eb2304573c9fdf448d3fa4a4fdcb727b93002b5c5c56c14a5ffbbc39f64ae4"},
{file = "ijson-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:923131f5153c70936e8bd2dd9dcfcff43c67a3d1c789e9c96724747423c173eb"},
{file = "ijson-3.2.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:904f77dd3d87736ff668884fe5197a184748eb0c3e302ded61706501d0327465"},
{file = "ijson-3.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0974444c1f416e19de1e9f567a4560890095e71e81623c509feff642114c1e53"},
{file = "ijson-3.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1a4b8eb69b6d7b4e94170aa991efad75ba156b05f0de2a6cd84f991def12ff9"},
{file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d052417fd7ce2221114f8d3b58f05a83c1a2b6b99cafe0b86ac9ed5e2fc889df"},
{file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b8064a85ec1b0beda7dd028e887f7112670d574db606f68006c72dd0bb0e0e2"},
{file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaac293853f1342a8d2a45ac1f723c860f700860e7743fb97f7b76356df883a8"},
{file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6c32c18a934c1dc8917455b0ce478fd7a26c50c364bd52c5a4fb0fc6bb516af7"},
{file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:713a919e0220ac44dab12b5fed74f9130f3480e55e90f9d80f58de129ea24f83"},
{file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"},
{file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"},
{file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"},
{file = "ijson-3.2.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:055b71bbc37af5c3c5861afe789e15211d2d3d06ac51ee5a647adf4def19c0ea"},
{file = "ijson-3.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c075a547de32f265a5dd139ab2035900fef6653951628862e5cdce0d101af557"},
{file = "ijson-3.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:457f8a5fc559478ac6b06b6d37ebacb4811f8c5156e997f0d87d708b0d8ab2ae"},
{file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9788f0c915351f41f0e69ec2618b81ebfcf9f13d9d67c6d404c7f5afda3e4afb"},
{file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa234ab7a6a33ed51494d9d2197fb96296f9217ecae57f5551a55589091e7853"},
{file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd0dc5da4f9dc6d12ab6e8e0c57d8b41d3c8f9ceed31a99dae7b2baf9ea769a"},
{file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c6beb80df19713e39e68dc5c337b5c76d36ccf69c30b79034634e5e4c14d6904"},
{file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a2973ce57afb142d96f35a14e9cfec08308ef178a2c76b8b5e1e98f3960438bf"},
{file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:105c314fd624e81ed20f925271ec506523b8dd236589ab6c0208b8707d652a0e"},
{file = "ijson-3.2.3-cp312-cp312-win32.whl", hash = "sha256:ac44781de5e901ce8339352bb5594fcb3b94ced315a34dbe840b4cff3450e23b"},
{file = "ijson-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:0567e8c833825b119e74e10a7c29761dc65fcd155f5d4cb10f9d3b8916ef9912"},
{file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"},
{file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"},
{file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"},
{file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85afdb3f3a5d0011584d4fa8e6dccc5936be51c27e84cd2882fe904ca3bd04c5"},
{file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4fc35d569eff3afa76bfecf533f818ecb9390105be257f3f83c03204661ace70"},
{file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:455d7d3b7a6aacfb8ab1ebcaf697eedf5be66e044eac32508fccdc633d995f0e"},
{file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c63f3d57dbbac56cead05b12b81e8e1e259f14ce7f233a8cbe7fa0996733b628"},
{file = "ijson-3.2.3-cp36-cp36m-win32.whl", hash = "sha256:a4d7fe3629de3ecb088bff6dfe25f77be3e8261ed53d5e244717e266f8544305"},
{file = "ijson-3.2.3-cp36-cp36m-win_amd64.whl", hash = "sha256:96190d59f015b5a2af388a98446e411f58ecc6a93934e036daa75f75d02386a0"},
{file = "ijson-3.2.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:35194e0b8a2bda12b4096e2e792efa5d4801a0abb950c48ade351d479cd22ba5"},
{file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1053fb5f0b010ee76ca515e6af36b50d26c1728ad46be12f1f147a835341083"},
{file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:211124cff9d9d139dd0dfced356f1472860352c055d2481459038b8205d7d742"},
{file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92dc4d48e9f6a271292d6079e9fcdce33c83d1acf11e6e12696fb05c5889fe74"},
{file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3dcc33ee56f92a77f48776014ddb47af67c33dda361e84371153c4f1ed4434e1"},
{file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98c6799925a5d1988da4cd68879b8eeab52c6e029acc45e03abb7921a4715c4b"},
{file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4252e48c95cd8ceefc2caade310559ab61c37d82dfa045928ed05328eb5b5f65"},
{file = "ijson-3.2.3-cp37-cp37m-win32.whl", hash = "sha256:644f4f03349ff2731fd515afd1c91b9e439e90c9f8c28292251834154edbffca"},
{file = "ijson-3.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:ba33c764afa9ecef62801ba7ac0319268a7526f50f7601370d9f8f04e77fc02b"},
{file = "ijson-3.2.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4b2ec8c2a3f1742cbd5f36b65e192028e541b5fd8c7fd97c1fc0ca6c427c704a"},
{file = "ijson-3.2.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dc357da4b4ebd8903e77dbcc3ce0555ee29ebe0747c3c7f56adda423df8ec89"},
{file = "ijson-3.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bcc51c84bb220ac330122468fe526a7777faa6464e3b04c15b476761beea424f"},
{file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8d54b624629f9903005c58d9321a036c72f5c212701bbb93d1a520ecd15e370"},
{file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6ea7c7e3ec44742e867c72fd750c6a1e35b112f88a917615332c4476e718d40"},
{file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:916acdc5e504f8b66c3e287ada5d4b39a3275fc1f2013c4b05d1ab9933671a6c"},
{file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81815b4184b85ce124bfc4c446d5f5e5e643fc119771c5916f035220ada29974"},
{file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b49fd5fe1cd9c1c8caf6c59f82b08117dd6bea2ec45b641594e25948f48f4169"},
{file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:86b3c91fdcb8ffb30556c9669930f02b7642de58ca2987845b04f0d7fe46d9a8"},
{file = "ijson-3.2.3-cp38-cp38-win32.whl", hash = "sha256:a729b0c8fb935481afe3cf7e0dadd0da3a69cc7f145dbab8502e2f1e01d85a7c"},
{file = "ijson-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:d34e049992d8a46922f96483e96b32ac4c9cffd01a5c33a928e70a283710cd58"},
{file = "ijson-3.2.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9c2a12dcdb6fa28f333bf10b3a0f80ec70bc45280d8435be7e19696fab2bc706"},
{file = "ijson-3.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1844c5b57da21466f255a0aeddf89049e730d7f3dfc4d750f0e65c36e6a61a7c"},
{file = "ijson-3.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ec3e5ff2515f1c40ef6a94983158e172f004cd643b9e4b5302017139b6c96e4"},
{file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46bafb1b9959872a1f946f8dd9c6f1a30a970fc05b7bfae8579da3f1f988e598"},
{file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab4db9fee0138b60e31b3c02fff8a4c28d7b152040553b6a91b60354aebd4b02"},
{file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4bc87e69d1997c6a55fff5ee2af878720801ff6ab1fb3b7f94adda050651e37"},
{file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e9fd906f0c38e9f0bfd5365e1bed98d649f506721f76bb1a9baa5d7374f26f19"},
{file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e84d27d1acb60d9102728d06b9650e5b7e5cb0631bd6e3dfadba8fb6a80d6c2f"},
{file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2cc04fc0a22bb945cd179f614845c8b5106c0b3939ee0d84ce67c7a61ac1a936"},
{file = "ijson-3.2.3-cp39-cp39-win32.whl", hash = "sha256:e641814793a037175f7ec1b717ebb68f26d89d82cfd66f36e588f32d7e488d5f"},
{file = "ijson-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:6bd3e7e91d031f1e8cea7ce53f704ab74e61e505e8072467e092172422728b22"},
{file = "ijson-3.2.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:06f9707da06a19b01013f8c65bf67db523662a9b4a4ff027e946e66c261f17f0"},
{file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be8495f7c13fa1f622a2c6b64e79ac63965b89caf664cc4e701c335c652d15f2"},
{file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7596b42f38c3dcf9d434dddd50f46aeb28e96f891444c2b4b1266304a19a2c09"},
{file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbac4e9609a1086bbad075beb2ceec486a3b138604e12d2059a33ce2cba93051"},
{file = "ijson-3.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:db2d6341f9cb538253e7fe23311d59252f124f47165221d3c06a7ed667ecd595"},
{file = "ijson-3.2.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fa8b98be298efbb2588f883f9953113d8a0023ab39abe77fe734b71b46b1220a"},
{file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:674e585361c702fad050ab4c153fd168dc30f5980ef42b64400bc84d194e662d"},
{file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd12e42b9cb9c0166559a3ffa276b4f9fc9d5b4c304e5a13668642d34b48b634"},
{file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d31e0d771d82def80cd4663a66de277c3b44ba82cd48f630526b52f74663c639"},
{file = "ijson-3.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ce4c70c23521179d6da842bb9bc2e36bb9fad1e0187e35423ff0f282890c9ca"},
{file = "ijson-3.2.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39f551a6fbeed4433c85269c7c8778e2aaea2501d7ebcb65b38f556030642c17"},
{file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b14d322fec0de7af16f3ef920bf282f0dd747200b69e0b9628117f381b7775b"},
{file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7851a341429b12d4527ca507097c959659baf5106c7074d15c17c387719ffbcd"},
{file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db3bf1b42191b5cc9b6441552fdcb3b583594cb6b19e90d1578b7cbcf80d0fae"},
{file = "ijson-3.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6f662dc44362a53af3084d3765bb01cd7b4734d1f484a6095cad4cb0cbfe5374"},
{file = "ijson-3.2.3.tar.gz", hash = "sha256:10294e9bf89cb713da05bc4790bdff616610432db561964827074898e174f917"},
{file = "ijson-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7f7a5250599c366369fbf3bc4e176f5daa28eb6bc7d6130d02462ed335361675"},
{file = "ijson-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f87a7e52f79059f9c58f6886c262061065eb6f7554a587be7ed3aa63e6b71b34"},
{file = "ijson-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b73b493af9e947caed75d329676b1b801d673b17481962823a3e55fe529c8b8b"},
{file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5576415f3d76290b160aa093ff968f8bf6de7d681e16e463a0134106b506f49"},
{file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e9ffe358d5fdd6b878a8a364e96e15ca7ca57b92a48f588378cef315a8b019e"},
{file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8643c255a25824ddd0895c59f2319c019e13e949dc37162f876c41a283361527"},
{file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:df3ab5e078cab19f7eaeef1d5f063103e1ebf8c26d059767b26a6a0ad8b250a3"},
{file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dc1fb02c6ed0bae1b4bf96971258bf88aea72051b6e4cebae97cff7090c0607"},
{file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e9afd97339fc5a20f0542c971f90f3ca97e73d3050cdc488d540b63fae45329a"},
{file = "ijson-3.3.0-cp310-cp310-win32.whl", hash = "sha256:844c0d1c04c40fd1b60f148dc829d3f69b2de789d0ba239c35136efe9a386529"},
{file = "ijson-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:d654d045adafdcc6c100e8e911508a2eedbd2a1b5f93f930ba13ea67d7704ee9"},
{file = "ijson-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:501dce8eaa537e728aa35810656aa00460a2547dcb60937c8139f36ec344d7fc"},
{file = "ijson-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:658ba9cad0374d37b38c9893f4864f284cdcc7d32041f9808fba8c7bcaadf134"},
{file = "ijson-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2636cb8c0f1023ef16173f4b9a233bcdb1df11c400c603d5f299fac143ca8d70"},
{file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd174b90db68c3bcca273e9391934a25d76929d727dc75224bf244446b28b03b"},
{file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97a9aea46e2a8371c4cf5386d881de833ed782901ac9f67ebcb63bb3b7d115af"},
{file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c594c0abe69d9d6099f4ece17763d53072f65ba60b372d8ba6de8695ce6ee39e"},
{file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e0ff16c224d9bfe4e9e6bd0395826096cda4a3ef51e6c301e1b61007ee2bd24"},
{file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0015354011303175eae7e2ef5136414e91de2298e5a2e9580ed100b728c07e51"},
{file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034642558afa57351a0ffe6de89e63907c4cf6849070cc10a3b2542dccda1afe"},
{file = "ijson-3.3.0-cp311-cp311-win32.whl", hash = "sha256:192e4b65495978b0bce0c78e859d14772e841724d3269fc1667dc6d2f53cc0ea"},
{file = "ijson-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:72e3488453754bdb45c878e31ce557ea87e1eb0f8b4fc610373da35e8074ce42"},
{file = "ijson-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:988e959f2f3d59ebd9c2962ae71b97c0df58323910d0b368cc190ad07429d1bb"},
{file = "ijson-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b2f73f0d0fce5300f23a1383d19b44d103bb113b57a69c36fd95b7c03099b181"},
{file = "ijson-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ee57a28c6bf523d7cb0513096e4eb4dac16cd935695049de7608ec110c2b751"},
{file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0155a8f079c688c2ccaea05de1ad69877995c547ba3d3612c1c336edc12a3a5"},
{file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ab00721304af1ae1afa4313ecfa1bf16b07f55ef91e4a5b93aeaa3e2bd7917c"},
{file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40ee3821ee90be0f0e95dcf9862d786a7439bd1113e370736bfdf197e9765bfb"},
{file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3b6987a0bc3e6d0f721b42c7a0198ef897ae50579547b0345f7f02486898f5"},
{file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:63afea5f2d50d931feb20dcc50954e23cef4127606cc0ecf7a27128ed9f9a9e6"},
{file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b5c3e285e0735fd8c5a26d177eca8b52512cdd8687ca86ec77a0c66e9c510182"},
{file = "ijson-3.3.0-cp312-cp312-win32.whl", hash = "sha256:907f3a8674e489abdcb0206723e5560a5cb1fa42470dcc637942d7b10f28b695"},
{file = "ijson-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8f890d04ad33262d0c77ead53c85f13abfb82f2c8f078dfbf24b78f59534dfdd"},
{file = "ijson-3.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b9d85a02e77ee8ea6d9e3fd5d515bcc3d798d9c1ea54817e5feb97a9bc5d52fe"},
{file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6576cdc36d5a09b0c1a3d81e13a45d41a6763188f9eaae2da2839e8a4240bce"},
{file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5589225c2da4bb732c9c370c5961c39a6db72cf69fb2a28868a5413ed7f39e6"},
{file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad04cf38164d983e85f9cba2804566c0160b47086dcca4cf059f7e26c5ace8ca"},
{file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:a3b730ef664b2ef0e99dec01b6573b9b085c766400af363833e08ebc1e38eb2f"},
{file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:4690e3af7b134298055993fcbea161598d23b6d3ede11b12dca6815d82d101d5"},
{file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:aaa6bfc2180c31a45fac35d40e3312a3d09954638ce0b2e9424a88e24d262a13"},
{file = "ijson-3.3.0-cp36-cp36m-win32.whl", hash = "sha256:44367090a5a876809eb24943f31e470ba372aaa0d7396b92b953dda953a95d14"},
{file = "ijson-3.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7e2b3e9ca957153557d06c50a26abaf0d0d6c0ddf462271854c968277a6b5372"},
{file = "ijson-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:47c144117e5c0e2babb559bc8f3f76153863b8dd90b2d550c51dab5f4b84a87f"},
{file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29ce02af5fbf9ba6abb70765e66930aedf73311c7d840478f1ccecac53fefbf3"},
{file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ac6c3eeed25e3e2cb9b379b48196413e40ac4e2239d910bb33e4e7f6c137745"},
{file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d92e339c69b585e7b1d857308ad3ca1636b899e4557897ccd91bb9e4a56c965b"},
{file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:8c85447569041939111b8c7dbf6f8fa7a0eb5b2c4aebb3c3bec0fb50d7025121"},
{file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:542c1e8fddf082159a5d759ee1412c73e944a9a2412077ed00b303ff796907dc"},
{file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:30cfea40936afb33b57d24ceaf60d0a2e3d5c1f2335ba2623f21d560737cc730"},
{file = "ijson-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:6b661a959226ad0d255e49b77dba1d13782f028589a42dc3172398dd3814c797"},
{file = "ijson-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:0b003501ee0301dbf07d1597482009295e16d647bb177ce52076c2d5e64113e0"},
{file = "ijson-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3e8d8de44effe2dbd0d8f3eb9840344b2d5b4cc284a14eb8678aec31d1b6bea8"},
{file = "ijson-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9cd5c03c63ae06d4f876b9844c5898d0044c7940ff7460db9f4cd984ac7862b5"},
{file = "ijson-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04366e7e4a4078d410845e58a2987fd9c45e63df70773d7b6e87ceef771b51ee"},
{file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de7c1ddb80fa7a3ab045266dca169004b93f284756ad198306533b792774f10a"},
{file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8851584fb931cffc0caa395f6980525fd5116eab8f73ece9d95e6f9c2c326c4c"},
{file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdcfc88347fd981e53c33d832ce4d3e981a0d696b712fbcb45dcc1a43fe65c65"},
{file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3917b2b3d0dbbe3296505da52b3cb0befbaf76119b2edaff30bd448af20b5400"},
{file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:e10c14535abc7ddf3fd024aa36563cd8ab5d2bb6234a5d22c77c30e30fa4fb2b"},
{file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3aba5c4f97f4e2ce854b5591a8b0711ca3b0c64d1b253b04ea7b004b0a197ef6"},
{file = "ijson-3.3.0-cp38-cp38-win32.whl", hash = "sha256:b325f42e26659df1a0de66fdb5cde8dd48613da9c99c07d04e9fb9e254b7ee1c"},
{file = "ijson-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:ff835906f84451e143f31c4ce8ad73d83ef4476b944c2a2da91aec8b649570e1"},
{file = "ijson-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3c556f5553368dff690c11d0a1fb435d4ff1f84382d904ccc2dc53beb27ba62e"},
{file = "ijson-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e4396b55a364a03ff7e71a34828c3ed0c506814dd1f50e16ebed3fc447d5188e"},
{file = "ijson-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6850ae33529d1e43791b30575070670070d5fe007c37f5d06aebc1dd152ab3f"},
{file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36aa56d68ea8def26778eb21576ae13f27b4a47263a7a2581ab2ef58b8de4451"},
{file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7ec759c4a0fc820ad5dc6a58e9c391e7b16edcb618056baedbedbb9ea3b1524"},
{file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b51bab2c4e545dde93cb6d6bb34bf63300b7cd06716f195dd92d9255df728331"},
{file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:92355f95a0e4da96d4c404aa3cff2ff033f9180a9515f813255e1526551298c1"},
{file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8795e88adff5aa3c248c1edce932db003d37a623b5787669ccf205c422b91e4a"},
{file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8f83f553f4cde6d3d4eaf58ec11c939c94a0ec545c5b287461cafb184f4b3a14"},
{file = "ijson-3.3.0-cp39-cp39-win32.whl", hash = "sha256:ead50635fb56577c07eff3e557dac39533e0fe603000684eea2af3ed1ad8f941"},
{file = "ijson-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:c8a9befb0c0369f0cf5c1b94178d0d78f66d9cebb9265b36be6e4f66236076b8"},
{file = "ijson-3.3.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2af323a8aec8a50fa9effa6d640691a30a9f8c4925bd5364a1ca97f1ac6b9b5c"},
{file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f64f01795119880023ba3ce43072283a393f0b90f52b66cc0ea1a89aa64a9ccb"},
{file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a716e05547a39b788deaf22725490855337fc36613288aa8ae1601dc8c525553"},
{file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:473f5d921fadc135d1ad698e2697025045cd8ed7e5e842258295012d8a3bc702"},
{file = "ijson-3.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd26b396bc3a1e85f4acebeadbf627fa6117b97f4c10b177d5779577c6607744"},
{file = "ijson-3.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:25fd49031cdf5fd5f1fd21cb45259a64dad30b67e64f745cc8926af1c8c243d3"},
{file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b72178b1e565d06ab19319965022b36ef41bcea7ea153b32ec31194bec032a2"},
{file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d0b6b637d05dbdb29d0bfac2ed8425bb369e7af5271b0cc7cf8b801cb7360c2"},
{file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5378d0baa59ae422905c5f182ea0fd74fe7e52a23e3821067a7d58c8306b2191"},
{file = "ijson-3.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:99f5c8ab048ee4233cc4f2b461b205cbe01194f6201018174ac269bf09995749"},
{file = "ijson-3.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:45ff05de889f3dc3d37a59d02096948ce470699f2368b32113954818b21aa74a"},
{file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1efb521090dd6cefa7aafd120581947b29af1713c902ff54336b7c7130f04c47"},
{file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c727691858fd3a1c085d9980d12395517fcbbf02c69fbb22dede8ee03422da"},
{file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0420c24e50389bc251b43c8ed379ab3e3ba065ac8262d98beb6735ab14844460"},
{file = "ijson-3.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8fdf3721a2aa7d96577970f5604bd81f426969c1822d467f07b3d844fa2fecc7"},
{file = "ijson-3.3.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:891f95c036df1bc95309951940f8eea8537f102fa65715cdc5aae20b8523813b"},
{file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed1336a2a6e5c427f419da0154e775834abcbc8ddd703004108121c6dd9eba9d"},
{file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0c819f83e4f7b7f7463b2dc10d626a8be0c85fbc7b3db0edc098c2b16ac968e"},
{file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33afc25057377a6a43c892de34d229a86f89ea6c4ca3dd3db0dcd17becae0dbb"},
{file = "ijson-3.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7914d0cf083471856e9bc2001102a20f08e82311dfc8cf1a91aa422f9414a0d6"},
{file = "ijson-3.3.0.tar.gz", hash = "sha256:7f172e6ba1bee0d4c8f8ebd639577bfe429dee0f3f96775a067b8bae4492d8a0"},
]
[[package]]
@ -3994,13 +3999,13 @@ zookeeper = ["kazoo (>=2.8.0)"]
[[package]]
name = "kubernetes"
version = "29.0.0"
version = "30.1.0"
description = "Kubernetes python client"
optional = false
python-versions = ">=3.6"
files = [
{file = "kubernetes-29.0.0-py2.py3-none-any.whl", hash = "sha256:ab8cb0e0576ccdfb71886366efb102c6a20f268d817be065ce7f9909c631e43e"},
{file = "kubernetes-29.0.0.tar.gz", hash = "sha256:c4812e227ae74d07d53c88293e564e54b850452715a59a927e7e1bc6b9a60459"},
{file = "kubernetes-30.1.0-py2.py3-none-any.whl", hash = "sha256:e212e8b7579031dd2e512168b617373bc1e03888d41ac4e04039240a292d478d"},
{file = "kubernetes-30.1.0.tar.gz", hash = "sha256:41e4c77af9f28e7a6c314e3bd06a8c6229ddd787cad684e0ab9f69b498e98ebc"},
]
[package.dependencies]
@ -4322,7 +4327,7 @@ types-requests = ">=2.31.0.2,<3.0.0.0"
[[package]]
name = "langflow-base"
version = "0.0.57"
version = "0.0.59"
description = "A Python package with a built-in web application"
optional = false
python-versions = ">=3.10,<3.13"
@ -4379,13 +4384,13 @@ url = "src/backend/base"
[[package]]
name = "langfuse"
version = "2.34.1"
version = "2.35.0"
description = "A client library for accessing langfuse"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langfuse-2.34.1-py3-none-any.whl", hash = "sha256:2bb76d8ead3837798fc1b43e74b012cfca6cf8f433be36e0d53e7498a8b9ba6f"},
{file = "langfuse-2.34.1.tar.gz", hash = "sha256:c40220b66a8ba429a4b23d42e02fcfbbe9bd755615f6410854eef1454c36f6ff"},
{file = "langfuse-2.35.0-py3-none-any.whl", hash = "sha256:e9df2474a01f8e167b7b13674c554915415b27064e48ad207054475f7fa8f82d"},
{file = "langfuse-2.35.0.tar.gz", hash = "sha256:b1d4b478233eefbc8a6fc63ca00ca82f6afecf2b0fdc1835ca65e751cf901577"},
]
[package.dependencies]
@ -4403,13 +4408,13 @@ openai = ["openai (>=0.27.8)"]
[[package]]
name = "langsmith"
version = "0.1.72"
version = "0.1.75"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langsmith-0.1.72-py3-none-any.whl", hash = "sha256:a4456707669521bd75b7431b9205a6b99579fb9ff01bd338f52d29df11a7662d"},
{file = "langsmith-0.1.72.tar.gz", hash = "sha256:262ae9e8aceaba50f3a0f5b6eb559d6110886f0afc6b0ed5270e7d3d3f1fd8d6"},
{file = "langsmith-0.1.75-py3-none-any.whl", hash = "sha256:d08b08dd6b3fa4da170377f95123d77122ef4c52999d10fff4ae08ff70d07aed"},
{file = "langsmith-0.1.75.tar.gz", hash = "sha256:61274e144ea94c297dd78ce03e6dfae18459fe9bd8ab5094d61a0c4816561279"},
]
[package.dependencies]
@ -4419,13 +4424,13 @@ requests = ">=2,<3"
[[package]]
name = "litellm"
version = "1.40.2"
version = "1.40.4"
description = "Library to easily interface with LLM API providers"
optional = false
python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8"
files = [
{file = "litellm-1.40.2-py3-none-any.whl", hash = "sha256:56ee777eed30ee9acb86e74401d090dcac4adb57b5c8a8714f791b0c97a34afc"},
{file = "litellm-1.40.2.tar.gz", hash = "sha256:1f5dc4eab7100962c3a2985c7d8c13070ff5793b341540d19b98a2bd85955cb0"},
{file = "litellm-1.40.4-py3-none-any.whl", hash = "sha256:b3b8e4401f717c3a18595446bfdb80fc6bb74974aac4eae537fb7b3be37fbf9e"},
{file = "litellm-1.40.4.tar.gz", hash = "sha256:3edaa1189742afd7c7df2b122f77373d47154a8fb6df6187ff5875e188baa3e1"},
]
[package.dependencies]
@ -4796,13 +4801,13 @@ files = [
[[package]]
name = "marshmallow"
version = "3.21.2"
version = "3.21.3"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.8"
files = [
{file = "marshmallow-3.21.2-py3-none-any.whl", hash = "sha256:70b54a6282f4704d12c0a41599682c5c5450e843b9ec406308653b47c59648a1"},
{file = "marshmallow-3.21.2.tar.gz", hash = "sha256:82408deadd8b33d56338d2182d455db632c6313aa2af61916672146bb32edc56"},
{file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"},
{file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"},
]
[package.dependencies]
@ -5583,13 +5588,13 @@ sympy = "*"
[[package]]
name = "openai"
version = "1.31.1"
version = "1.32.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.7.1"
files = [
{file = "openai-1.31.1-py3-none-any.whl", hash = "sha256:a746cf070798a4048cfea00b0fc7cb9760ee7ead5a08c48115b914d1afbd1b53"},
{file = "openai-1.31.1.tar.gz", hash = "sha256:a15266827de20f407d4bf9837030b168074b5b29acd54f10bb38d5f53e95f083"},
{file = "openai-1.32.0-py3-none-any.whl", hash = "sha256:953d57669f309002044fd2f678aba9f07a43256d74b3b00cd04afb5b185568ea"},
{file = "openai-1.32.0.tar.gz", hash = "sha256:a6df15a7ab9344b1bc2bc8d83639f68b7a7e2453c0f5e50c1666547eee86f0bd"},
]
[package.dependencies]
@ -7575,13 +7580,13 @@ websockets = ">=11,<13"
[[package]]
name = "redis"
version = "5.0.4"
version = "5.0.5"
description = "Python client for Redis database and key-value store"
optional = true
python-versions = ">=3.7"
files = [
{file = "redis-5.0.4-py3-none-any.whl", hash = "sha256:7adc2835c7a9b5033b7ad8f8918d09b7344188228809c98df07af226d39dec91"},
{file = "redis-5.0.4.tar.gz", hash = "sha256:ec31f2ed9675cc54c21ba854cfe0462e6faf1d83c8ce5944709db8a4700b9c61"},
{file = "redis-5.0.5-py3-none-any.whl", hash = "sha256:30b47d4ebb6b7a0b9b40c1275a19b87bb6f46b3bed82a89012cf56dea4024ada"},
{file = "redis-5.0.5.tar.gz", hash = "sha256:3417688621acf6ee368dec4a04dd95881be24efd34c79f00d31f62bb528800ae"},
]
[package.dependencies]
@ -8313,13 +8318,13 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7
[[package]]
name = "storage3"
version = "0.7.5"
version = "0.7.6"
description = "Supabase Storage client for Python."
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "storage3-0.7.5-py3-none-any.whl", hash = "sha256:a2d9fdacafdcbcdb6776a54987a7d84c3e3195a5e4782955c4ccfb36cb021f14"},
{file = "storage3-0.7.5.tar.gz", hash = "sha256:ffe43f3877898b43a94024e68c2aaf4cebb3ad73dbbbd67747041d1d70bbf032"},
{file = "storage3-0.7.6-py3-none-any.whl", hash = "sha256:d8c23bf87b3a88cafb03761b7f936e4e49daca67741d571513edf746e0f8ba72"},
{file = "storage3-0.7.6.tar.gz", hash = "sha256:0b7781cea7fe6382e6b9349b84395808c5f4203dfcac31478304eedc2f81acf6"},
]
[package.dependencies]
@ -8381,13 +8386,13 @@ supafunc = ">=0.3.1,<0.5.0"
[[package]]
name = "supafunc"
version = "0.4.5"
version = "0.4.6"
description = "Library for Supabase Functions"
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "supafunc-0.4.5-py3-none-any.whl", hash = "sha256:2208045f8f5c797924666f6a332efad75ad368f8030b2e4ceb9d2bf63f329373"},
{file = "supafunc-0.4.5.tar.gz", hash = "sha256:a6466d78bdcaa58b7f0303793643103baae8106a87acd5d01e196179a9d0d024"},
{file = "supafunc-0.4.6-py3-none-any.whl", hash = "sha256:f7ca7b244365e171da7055a64edb462c2ec449cdaa210fc418cfccd132f4cf98"},
{file = "supafunc-0.4.6.tar.gz", hash = "sha256:92db51f8f8568d1430285219c9c0072e44207409c416622d7387f609e31928a6"},
]
[package.dependencies]
@ -8707,22 +8712,22 @@ optree = ["optree (>=0.9.1)"]
[[package]]
name = "tornado"
version = "6.4"
version = "6.4.1"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
optional = false
python-versions = ">= 3.8"
python-versions = ">=3.8"
files = [
{file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"},
{file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"},
{file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"},
{file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"},
{file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"},
{file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"},
{file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"},
{file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"},
{file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"},
{file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"},
{file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"},
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"},
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"},
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"},
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"},
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"},
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"},
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"},
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"},
{file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"},
{file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"},
{file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"},
]
[[package]]
@ -10054,4 +10059,4 @@ local = ["ctransformers", "llama-cpp-python", "sentence-transformers"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.13"
content-hash = "83c94ed0fa28b968553221385251b871139a7440ab0420f867efbe16568b8411"
content-hash = "2ba268be17a69253c9631ec721ece465a85a22949c2df7c712b7aa12d1a002fa"

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "1.0.0a46"
version = "1.0.0a48"
description = "A Python package with a built-in web application"
authors = ["Langflow <contact@langflow.org>"]
maintainers = [
@ -66,7 +66,7 @@ qianfan = "0.3.5"
pgvector = "^0.2.3"
pyautogen = "^0.2.0"
langchain-google-genai = "^1.0.1"
langchain-cohere = "^0.1.0rc1"
langchain-cohere = "^0.1.5"
elasticsearch = "^8.12.0"
pytube = "^15.0.0"
dspy-ai = "^2.4.0"

View file

@ -1,4 +1,4 @@
import os
import argparse
from huggingface_hub import HfApi, list_models
from rich import print
@ -6,11 +6,27 @@ from rich import print
# Use root method
models = list_models()
args = argparse.ArgumentParser(description="Restart a space in the Hugging Face Hub.")
args.add_argument("--space", type=str, help="The space to restart.")
args.add_argument("--token", type=str, help="The Hugging Face API token.")
parsed_args = args.parse_args()
space = parsed_args.space
if not space:
print("Please provide a space to restart.")
exit()
if not parsed_args.token:
print("Please provide an API token.")
exit()
# Or configure a HfApi client
hf_api = HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=os.getenv("HUGGINFACE_API_TOKEN"),
token=parsed_args.token,
)
space_runtime = hf_api.restart_space("Langflow/Langflow-Preview", factory_reboot=True)
space_runtime = hf_api.restart_space(space, factory_reboot=True)
print(space_runtime)

View file

@ -204,16 +204,18 @@ def format_elapsed_time(elapsed_time: float) -> str:
return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}"
async def build_and_cache_graph_from_db(
flow_id: str,
session: Session,
chat_service: "ChatService",
):
async def build_and_cache_graph_from_db(flow_id: str, session: Session, chat_service: "ChatService"):
"""Build and cache the graph."""
flow: Optional[Flow] = session.get(Flow, flow_id)
if not flow or not flow.data:
raise ValueError("Invalid flow ID")
graph = Graph.from_payload(flow.data, flow_id)
for vertex_id in graph._has_session_id_vertices:
vertex = graph.get_vertex(vertex_id)
if vertex is None:
raise ValueError(f"Vertex {vertex_id} not found")
if not vertex._raw_params.get("session_id"):
vertex.update_raw_params({"session_id": flow_id})
await chat_service.set_cache(flow_id, graph)
return graph
@ -317,3 +319,4 @@ def parse_exception(exc):
if hasattr(exc, "body"):
return exc.body["message"]
return str(exc)
return str(exc)

View file

@ -22,6 +22,7 @@ from langflow.api.v1.schemas import (
VertexBuildResponse,
VerticesOrderResponse,
)
from langflow.schema.schema import Log
from langflow.services.auth.utils import get_current_active_user
from langflow.services.chat.service import ChatService
from langflow.services.deps import get_chat_service, get_session, get_session_service
@ -123,6 +124,7 @@ async def build_vertex(
vertex_id: str,
background_tasks: BackgroundTasks,
inputs: Annotated[Optional[InputValueRequest], Body(embed=True)] = None,
files: Optional[list[str]] = None,
chat_service: "ChatService" = Depends(get_chat_service),
current_user=Depends(get_current_active_user),
):
@ -159,6 +161,7 @@ async def build_vertex(
else:
graph = cache.get("result")
vertex = graph.get_vertex(vertex_id)
log_object = None
try:
lock = chat_service._cache_locks[flow_id_str]
(
@ -175,19 +178,25 @@ async def build_vertex(
vertex_id=vertex_id,
user_id=current_user.id,
inputs_dict=inputs.model_dump() if inputs else {},
files=files,
)
log_obj = Log(message=vertex.artifacts_raw, type=vertex.artifacts_type)
result_data_response = ResultDataResponse(**result_dict.model_dump())
except Exception as exc:
logger.exception(f"Error building vertex: {exc}")
params = format_exception_message(exc)
valid = False
log_obj = Log(message=params, type="error")
result_data_response = ResultDataResponse(results={})
artifacts = {}
# If there's an error building the vertex
# we need to clear the cache
await chat_service.clear_cache(flow_id_str)
result_data_response.message = artifacts
result_data_response.logs.append(log_obj)
# Log the vertex build
if not vertex.will_stream:
background_tasks.add_task(

View file

@ -1,5 +1,7 @@
from typing import List
from langflow.helpers.flow import generate_unique_flow_name
from langflow.helpers.folders import generate_unique_folder_name
import orjson
from fastapi import APIRouter, Depends, File, HTTPException, Response, UploadFile, status
from sqlalchemy import or_, update
@ -203,16 +205,9 @@ async def upload_file(
if not data:
raise HTTPException(status_code=400, detail="No flows found in the file")
folder_results = session.exec(
select(Folder).where(
Folder.name == data["folder_name"],
Folder.user_id == current_user.id,
)
)
existing_folder_names = [folder.name for folder in folder_results]
folder_name = generate_unique_folder_name(data["folder_name"], current_user.id, session)
if existing_folder_names:
data["folder_name"] = f"{data['folder_name']} ({len(existing_folder_names) + 1})"
data["folder_name"] = folder_name
folder = FolderCreate(name=data["folder_name"], description=data["folder_description"])
@ -232,6 +227,8 @@ async def upload_file(
raise HTTPException(status_code=400, detail="No flows found in the data")
# Now we set the user_id for all flows
for flow in flow_list.flows:
flow_name = generate_unique_flow_name(flow.name, current_user.id, session)
flow.name = flow_name
flow.user_id = current_user.id
flow.folder_id = new_folder.id

View file

@ -71,9 +71,7 @@ async def login_to_get_access_token(
@router.get("/auto_login")
async def auto_login(
response: Response,
db: Session = Depends(get_session),
settings_service=Depends(get_settings_service)
response: Response, db: Session = Depends(get_session), settings_service=Depends(get_settings_service)
):
auth_settings = settings_service.auth_settings
if settings_service.auth_settings.AUTO_LOGIN:

View file

@ -1,9 +1,9 @@
from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from langflow.services.deps import get_monitor_service
from langflow.services.monitor.schema import (
MessageModelRequest,
MessageModelResponse,
TransactionModelResponse,
VertexBuildMapModel,
@ -66,6 +66,44 @@ async def get_messages(
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/messages", status_code=204)
async def delete_messages(
message_ids: List[int],
monitor_service: MonitorService = Depends(get_monitor_service),
):
try:
monitor_service.delete_messages(message_ids=message_ids)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/messages/{message_id}", response_model=MessageModelResponse)
async def update_message(
message_id: str,
message: MessageModelRequest,
monitor_service: MonitorService = Depends(get_monitor_service),
):
try:
message_dict = message.model_dump(exclude_none=True)
message_dict.pop("index", None)
monitor_service.update_message(message_id=message_id, **message_dict)
return MessageModelResponse(index=message_id, **message_dict)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/messages/session/{session_id}", status_code=204)
async def delete_messages_session(
session_id: str,
monitor_service: MonitorService = Depends(get_monitor_service),
):
try:
monitor_service.delete_messages_session(session_id=session_id)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/transactions", response_model=List[TransactionModelResponse])
async def get_transactions(
source: Optional[str] = Query(None),

View file

@ -2,6 +2,7 @@ from datetime import datetime, timezone
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from typing_extensions import TypedDict
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_serializer
@ -9,11 +10,12 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator, model_serial
from langflow.graph.schema import RunOutputs
from langflow.schema import dotdict
from langflow.schema.graph import Tweaks
from langflow.schema.schema import InputType, OutputType
from langflow.schema.schema import InputType, Log, OutputType
from langflow.services.database.models.api_key.model import ApiKeyRead
from langflow.services.database.models.base import orjson_dumps
from langflow.services.database.models.flow import FlowCreate, FlowRead
from langflow.services.database.models.user import UserRead
from langflow.utils.schemas import ChatOutputResponse
class BuildStatus(Enum):
@ -242,9 +244,10 @@ class VerticesOrderResponse(BaseModel):
run_id: UUID
vertices_to_run: List[str]
class ResultDataResponse(BaseModel):
results: Optional[Any] = Field(default_factory=dict)
logs: List[Log | None] = Field(default_factory=list)
message: Optional[Any] = Field(default_factory=dict)
artifacts: Optional[Any] = Field(default_factory=dict)
timedelta: Optional[float] = None
duration: Optional[str] = None

View file

@ -15,10 +15,25 @@ import shlex
from collections import OrderedDict, namedtuple
from http.cookies import SimpleCookie
from uncurl.api import parser # type: ignore
parser.add_argument("-x", "--proxy", default={})
parser.add_argument("-U", "--proxy-user", default="")
ParsedArgs = namedtuple(
"ParsedContext",
[
"command",
"url",
"data",
"data_binary",
"method",
"headers",
"compressed",
"insecure",
"user",
"include",
"silent",
"proxy",
"proxy_user",
"cookies",
],
)
ParsedContext = namedtuple("ParsedContext", ["method", "url", "data", "headers", "cookies", "verify", "auth", "proxy"])
@ -27,24 +42,90 @@ def normalize_newlines(multiline_text):
return multiline_text.replace(" \\\n", " ")
def parse_curl_command(curl_command):
tokens = shlex.split(normalize_newlines(curl_command))
tokens = [token for token in tokens if token and token != " "]
if "curl" not in tokens[0]:
raise ValueError("Invalid curl command")
args_template = {
"command": None,
"url": None,
"data": None,
"data_binary": None,
"method": "get",
"headers": [],
"compressed": False,
"insecure": False,
"user": (),
"include": False,
"silent": False,
"proxy": None,
"proxy_user": None,
"cookies": {},
}
args = args_template.copy()
method_on_curl = None
i = 0
while i < len(tokens):
token = tokens[i]
if token == "-X":
i += 1
args["method"] = tokens[i].lower()
method_on_curl = tokens[i].lower()
elif token in ("-d", "--data"):
i += 1
args["data"] = tokens[i]
elif token in ("-b", "--data-binary", "--data-raw"):
i += 1
args["data_binary"] = tokens[i]
elif token in ("-H", "--header"):
i += 1
args["headers"].append(tokens[i])
elif token == "--compressed":
args["compressed"] = True
elif token in ("-k", "--insecure"):
args["insecure"] = True
elif token in ("-u", "--user"):
i += 1
args["user"] = tuple(tokens[i].split(":"))
elif token in ("-I", "--include"):
args["include"] = True
elif token in ("-s", "--silent"):
args["silent"] = True
elif token in ("-x", "--proxy"):
i += 1
args["proxy"] = tokens[i]
elif token in ("-U", "--proxy-user"):
i += 1
args["proxy_user"] = tokens[i]
elif not token.startswith("-"):
if args["command"] is None:
args["command"] = token
else:
args["url"] = token
i += 1
args["method"] = method_on_curl or args["method"]
return ParsedArgs(**args)
def parse_context(curl_command):
method = "get"
tokens = shlex.split(normalize_newlines(curl_command))
tokens = [token for token in tokens if token and token != " "]
parsed_args = parser.parse_args(tokens)
parsed_args: ParsedArgs = parse_curl_command(curl_command)
post_data = parsed_args.data or parsed_args.data_binary
if post_data:
method = "post"
if parsed_args.X:
method = parsed_args.X.lower()
if parsed_args.method:
method = parsed_args.method.lower()
cookie_dict = OrderedDict()
quoted_headers = OrderedDict()
for curl_header in parsed_args.header:
for curl_header in parsed_args.headers:
if curl_header.startswith(":"):
occurrence = [m.start() for m in re.finditer(":", curl_header)]
header_key, header_value = curl_header[: occurrence[1]], curl_header[occurrence[1] + 1 :]

View file

@ -3,6 +3,7 @@ import xml.etree.ElementTree as ET
from concurrent import futures
from pathlib import Path
from typing import Callable, List, Optional, Text
import unicodedata
import chardet
import yaml
@ -31,6 +32,17 @@ TEXT_FILE_TYPES = [
"tsx",
]
IMG_FILE_TYPES = [
"jpg",
"jpeg",
"png",
"bmp",
]
def normalize_text(text):
return unicodedata.normalize("NFKD", text)
def is_hidden(path: Path) -> bool:
return path.name.startswith(".")
@ -92,7 +104,10 @@ def read_text_file(file_path: str) -> str:
with open(file_path, "rb") as f:
raw_data = f.read()
result = chardet.detect(raw_data)
encoding = result['encoding']
encoding = result["encoding"]
if encoding in ["Windows-1254", "MacRoman"]:
encoding = "utf-8"
with open(file_path, "r", encoding=encoding) as f:
return f.read()
@ -121,9 +136,15 @@ def parse_text_file_to_record(file_path: str, silent_errors: bool) -> Optional[R
text = read_docx_file(file_path)
else:
text = read_text_file(file_path)
# if file is json, yaml, or xml, we can parse it
if file_path.endswith(".json"):
text = json.loads(text)
if isinstance(text, dict):
text = {k: normalize_text(v) if isinstance(v, str) else v for k, v in text.items()}
elif isinstance(text, list):
text = [normalize_text(item) if isinstance(item, str) else item for item in text]
elif file_path.endswith(".yaml") or file_path.endswith(".yml"):
text = yaml.safe_load(text)
elif file_path.endswith(".xml"):

View file

@ -1,5 +1,6 @@
from typing import Optional, Union
from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES
from langflow.custom import CustomComponent
from langflow.field_typing import Text
from langflow.helpers.record import records_to_text
@ -40,6 +41,13 @@ class ChatComponent(CustomComponent):
"info": "In case of Message being a Record, this template will be used to convert it to text.",
"advanced": True,
},
"files": {
"field_type": "file",
"display_name": "Files",
"file_types": TEXT_FILE_TYPES + IMG_FILE_TYPES,
"info": "Files to be sent with the message.",
"advanced": True,
},
}
def store_message(
@ -65,6 +73,7 @@ class ChatComponent(CustomComponent):
sender: Optional[str] = "User",
sender_name: Optional[str] = "User",
input_value: Optional[Union[str, Record]] = None,
files: Optional[list[str]] = None,
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
record_template: str = "Text: {text}\nData: {data}",
@ -76,6 +85,7 @@ class ChatComponent(CustomComponent):
input_value.data["sender"] = sender
input_value.data["sender_name"] = sender_name
input_value.data["session_id"] = session_id
input_value.data["files"] = files
else:
input_value_record = Record(
text=input_value,
@ -83,6 +93,7 @@ class ChatComponent(CustomComponent):
"sender": sender,
"sender_name": sender_name,
"session_id": session_id,
"files": files,
},
)
elif isinstance(input_value, Record):
@ -103,17 +114,21 @@ class ChatComponent(CustomComponent):
sender: Optional[str] = "User",
sender_name: Optional[str] = "User",
input_value: Optional[str] = None,
files: Optional[list[str]] = None,
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
record_template: str = "Text: {text}\nData: {data}",
) -> Union[Text, Record]:
input_value_record: Optional[Record] = None
if files and not return_record:
raise ValueError("Files can only be provided when Return Record is enabled.")
if return_record:
if isinstance(input_value, Record):
# Update the data of the record
input_value.data["sender"] = sender
input_value.data["sender_name"] = sender_name
input_value.data["session_id"] = session_id
input_value.data["files"] = files
else:
input_value_record = Record(
text=input_value,
@ -121,6 +136,7 @@ class ChatComponent(CustomComponent):
"sender": sender,
"sender_name": sender_name,
"session_id": session_id,
"files": files,
},
)
elif isinstance(input_value, Record):

View file

@ -1,10 +1,13 @@
import warnings
from typing import Optional, Union
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.llms import LLM
from langchain_core.load import load
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
class LCModelComponent(CustomComponent):
@ -53,19 +56,28 @@ class LCModelComponent(CustomComponent):
key in response_metadata["token_usage"] for key in inner_openai_keys
):
token_usage = response_metadata["token_usage"]
completion_tokens = token_usage["completion_tokens"]
prompt_tokens = token_usage["prompt_tokens"]
total_tokens = token_usage["total_tokens"]
finish_reason = response_metadata["finish_reason"]
status_message = f"Tokens:\nInput: {prompt_tokens}\nOutput: {completion_tokens}\nTotal Tokens: {total_tokens}\nStop Reason: {finish_reason}\nResponse: {content}"
status_message = {
"tokens": {
"input": token_usage["prompt_tokens"],
"output": token_usage["completion_tokens"],
"total": token_usage["total_tokens"],
"stop_reason": response_metadata["finish_reason"],
"response": content,
}
}
elif all(key in response_metadata for key in anthropic_keys) and all(
key in response_metadata["usage"] for key in inner_anthropic_keys
):
usage = response_metadata["usage"]
input_tokens = usage["input_tokens"]
output_tokens = usage["output_tokens"]
stop_reason = response_metadata["stop_reason"]
status_message = f"Tokens:\nInput: {input_tokens}\nOutput: {output_tokens}\nStop Reason: {stop_reason}\nResponse: {content}"
status_message = {
"tokens": {
"input": usage["input_tokens"],
"output": usage["output_tokens"],
"stop_reason": response_metadata["stop_reason"],
"response": content,
}
}
else:
status_message = f"Response: {content}"
else:
@ -73,7 +85,7 @@ class LCModelComponent(CustomComponent):
return status_message
def get_chat_result(
self, runnable: BaseChatModel, stream: bool, input_value: str, system_message: Optional[str] = None
self, runnable: BaseChatModel, stream: bool, input_value: str | Record, system_message: Optional[str] = None
):
messages: list[Union[HumanMessage, SystemMessage]] = []
if not input_value and not system_message:
@ -81,7 +93,16 @@ class LCModelComponent(CustomComponent):
if system_message:
messages.append(SystemMessage(content=system_message))
if input_value:
messages.append(HumanMessage(content=input_value))
if isinstance(input_value, Record):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if "prompt" in input_value:
prompt = load(input_value.prompt)
runnable = prompt | runnable
else:
messages.append(input_value.to_lc_message())
else:
messages.append(HumanMessage(content=input_value))
if stream:
return runnable.stream(messages)
else:

View file

@ -1,9 +1,10 @@
import base64
from copy import deepcopy
from langchain_core.documents import Document
from langflow.schema import Record
from langflow.services.deps import get_storage_service
def record_to_string(record: Record) -> str:
@ -19,7 +20,7 @@ def record_to_string(record: Record) -> str:
return record.get_text()
def dict_values_to_string(d: dict) -> dict:
async def dict_values_to_string(d: dict) -> dict:
"""
Converts the values of a dictionary to strings.
@ -36,16 +37,43 @@ def dict_values_to_string(d: dict) -> dict:
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, Record):
d_copy[key][i] = record_to_string(item)
d_copy[key][i] = item.to_lc_message()
elif isinstance(item, Document):
d_copy[key][i] = document_to_string(item)
elif isinstance(value, Record):
d_copy[key] = record_to_string(value)
if "files" in value and value.files:
files = await get_file_paths(value.files)
value.files = files
d_copy[key] = value.to_lc_message()
elif isinstance(value, Document):
d_copy[key] = document_to_string(value)
return d_copy
async def get_file_paths(files: list[str]):
storage_service = get_storage_service()
file_paths = []
for file in files:
flow_id, file_name = file.split("/")
file_paths.append(storage_service.build_full_path(flow_id=flow_id, file_name=file_name))
return file_paths
async def get_files(
file_paths: str,
convert_to_base64: bool = False,
):
storage_service = get_storage_service()
file_objects = []
for file_path in file_paths:
flow_id, file_name = file_path.split("/")
file_object = await storage_service.get_file(flow_id=flow_id, file_name=file_name)
if convert_to_base64:
file_object = base64.b64encode(file_object).decode("utf-8")
file_objects.append(file_object)
return file_objects
def document_to_string(document: Document) -> str:
"""
Convert a document to a string.

View file

@ -25,6 +25,7 @@ class ChatInput(ChatComponent):
sender: Optional[str] = "User",
sender_name: Optional[str] = "User",
input_value: Optional[str] = None,
files: Optional[list[str]] = None,
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
) -> Union[Text, Record]:
@ -32,6 +33,7 @@ class ChatInput(ChatComponent):
sender=sender,
sender_name=sender_name,
input_value=input_value,
files=files,
session_id=session_id,
return_record=return_record,
)

View file

@ -1,7 +1,9 @@
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts import ChatPromptTemplate
from langflow.base.prompts.utils import dict_values_to_string
from langflow.custom import CustomComponent
from langflow.field_typing import Prompt, TemplateField, Text
from langflow.schema.schema import Record
class PromptComponent(CustomComponent):
@ -15,19 +17,14 @@ class PromptComponent(CustomComponent):
"code": TemplateField(advanced=True),
}
def build(
async def build(
self,
template: Prompt,
**kwargs,
) -> Text:
from langflow.base.prompts.utils import dict_values_to_string
prompt_template = PromptTemplate.from_template(Text(template))
kwargs = dict_values_to_string(kwargs)
kwargs = {k: "\n".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}
try:
formated_prompt = prompt_template.format(**kwargs)
except Exception as exc:
raise ValueError(f"Error formatting prompt: {exc}") from exc
self.status = f'Prompt:\n"{formated_prompt}"'
return formated_prompt
) -> Record:
prompt_template = ChatPromptTemplate.from_template(Text(template))
kwargs = await dict_values_to_string(kwargs)
messages = list(kwargs.values())
prompt = prompt_template + messages
self.status = f'Prompt:\n"{template}"'
return Record(data={"prompt": prompt.to_json()})

View file

@ -58,7 +58,7 @@ class AmazonBedrockComponent(LCModelComponent):
"advanced": True,
},
"cache": {"display_name": "Cache"},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",

View file

@ -63,7 +63,7 @@ class AnthropicLLM(LCModelComponent):
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"advanced": True,

View file

@ -78,7 +78,7 @@ class AzureChatOpenAIComponent(LCModelComponent):
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -81,7 +81,7 @@ class QianfanChatEndpointComponent(LCModelComponent):
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -111,7 +111,7 @@ class ChatLiteLLMModelComponent(LCModelComponent):
"required": False,
"default": False,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -1,10 +1,11 @@
from typing import Optional
from langchain_cohere import ChatCohere
from pydantic.v1 import SecretStr
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langchain_cohere import ChatCohere
from langflow.field_typing import Text
class CohereComponent(LCModelComponent):
@ -42,7 +43,7 @@ class CohereComponent(LCModelComponent):
"type": "float",
"show": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
@ -69,3 +70,4 @@ class CohereComponent(LCModelComponent):
temperature=temperature,
)
return self.get_chat_result(output, stream, input_value, system_message)
return self.get_chat_result(output, stream, input_value, system_message)

View file

@ -2,9 +2,10 @@ from typing import Optional
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class HuggingFaceEndpointsComponent(LCModelComponent):
@ -36,7 +37,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
"advanced": True,
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
@ -72,3 +73,4 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
output = ChatHuggingFace(llm=llm)
return self.get_chat_result(output, stream, input_value, system_message)
return self.get_chat_result(output, stream, input_value, system_message)

View file

@ -27,7 +27,7 @@ class MistralAIModelComponent(LCModelComponent):
def build_config(self):
return {
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,

View file

@ -194,7 +194,7 @@ class ChatOllamaComponent(LCModelComponent):
"info": "Template to use for generating text.",
"advanced": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -28,7 +28,7 @@ class OpenAIModelComponent(LCModelComponent):
def build_config(self):
return {
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,

View file

@ -1,6 +1,5 @@
from typing import Optional
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
@ -74,7 +73,7 @@ class ChatVertexAIComponent(LCModelComponent):
"value": False,
"advanced": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -18,6 +18,7 @@ class ChatOutput(ChatComponent):
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
record_template: Optional[str] = "{text}",
files: Optional[list[str]] = None,
) -> Union[Text, Record]:
return super().build_with_record(
sender=sender,
@ -26,4 +27,5 @@ class ChatOutput(ChatComponent):
session_id=session_id,
return_record=return_record,
record_template=record_template or "",
files=files,
)

View file

@ -1,27 +0,0 @@
from .AstraDBSearch import AstraDBSearchComponent
from .ChromaSearch import ChromaSearchComponent
from .FAISSSearch import FAISSSearchComponent
from .MongoDBAtlasVectorSearch import MongoDBAtlasSearchComponent
from .PineconeSearch import PineconeSearchComponent
from .QdrantSearch import QdrantSearchComponent
from .RedisSearch import RedisSearchComponent
from .SupabaseVectorStoreSearch import SupabaseSearchComponent
from .VectaraSearch import VectaraSearchComponent
from .WeaviateSearch import WeaviateSearchVectorStore
from .pgvectorSearch import PGVectorSearchComponent
from .Couchbase import CouchbaseSearchComponent # type: ignore
__all__ = [
"AstraDBSearchComponent",
"ChromaSearchComponent",
"CouchbaseSearchComponent",
"FAISSSearchComponent",
"MongoDBAtlasSearchComponent",
"PineconeSearchComponent",
"QdrantSearchComponent",
"RedisSearchComponent",
"SupabaseSearchComponent",
"VectaraSearchComponent",
"WeaviateSearchVectorStore",
"PGVectorSearchComponent",
]

View file

@ -1,28 +0,0 @@
from .AstraDB import AstraDBVectorStoreComponent
from .Chroma import ChromaComponent
from .FAISS import FAISSComponent
from .MongoDBAtlasVector import MongoDBAtlasComponent
from .Pinecone import PineconeComponent
from .Qdrant import QdrantComponent
from .Redis import RedisComponent
from .SupabaseVectorStore import SupabaseComponent
from .Vectara import VectaraComponent
from .Weaviate import WeaviateVectorStoreComponent
from .pgvector import PGVectorComponent
from .Couchbase import CouchbaseComponent
__all__ = [
"AstraDBVectorStoreComponent",
"ChromaComponent",
"CouchbaseComponent",
"FAISSComponent",
"MongoDBAtlasComponent",
"PineconeComponent",
"QdrantComponent",
"RedisComponent",
"SupabaseComponent",
"VectaraComponent",
"WeaviateVectorStoreComponent",
"base",
"PGVectorComponent",
]

View file

@ -297,7 +297,7 @@ class CodeParser:
bases = self.execute_and_inspect_classes(self.code)
except Exception as e:
# If the code cannot be executed, return an empty list
logger.exception(e)
logger.debug(e)
bases = []
raise e
return bases

View file

@ -78,7 +78,8 @@ class DirectoryReader:
component_tuple = (*build_component(component), component)
components.append(component_tuple)
except Exception as e:
logger.error(f"Error while loading component { component['name']}: {e}")
logger.debug(f"Error while loading component { component['name']}")
logger.debug(e)
continue
items.append({"name": menu["name"], "path": menu["path"], "components": components})
filtered = [menu for menu in items if menu["components"]]
@ -266,8 +267,7 @@ class DirectoryReader:
if validation_result:
try:
output_types = self.get_output_types_from_code(result_content)
except Exception as exc:
logger.exception(f"Error while getting output types from code: {str(exc)}")
except Exception:
output_types = [component_name_camelcase]
else:
output_types = [component_name_camelcase]

View file

@ -710,6 +710,7 @@ class Graph:
chat_service: ChatService,
vertex_id: str,
inputs_dict: Optional[Dict[str, str]] = None,
files: Optional[list[str]] = None,
user_id: Optional[str] = None,
fallback_to_env_vars: bool = False,
):
@ -737,7 +738,9 @@ class Graph:
# Check the cache for the vertex
cached_result = await chat_service.get_cache(key=vertex.id)
if isinstance(cached_result, CacheMiss):
await vertex.build(user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars)
await vertex.build(
user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars, files=files
)
await chat_service.set_cache(key=vertex.id, data=vertex)
else:
cached_vertex = cached_result["result"]
@ -751,7 +754,9 @@ class Graph:
vertex.result.used_frozen_result = True
else:
await vertex.build(user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars)
await vertex.build(
user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars, files=files
)
if vertex.result is not None:
params = f"{vertex._built_object_repr()}{params}"

View file

@ -1,15 +1,17 @@
from enum import Enum
from typing import Any, List, Optional
from pydantic import BaseModel, Field, field_serializer
from pydantic import BaseModel, Field, field_serializer, model_validator
from langflow.graph.utils import serialize_field
from langflow.schema.schema import Log, StreamURL
from langflow.utils.schemas import ChatOutputResponse, ContainsEnumMeta
class ResultData(BaseModel):
results: Optional[Any] = Field(default_factory=dict)
artifacts: Optional[Any] = Field(default_factory=dict)
logs: Optional[List[dict]] = Field(default_factory=list)
messages: Optional[list[ChatOutputResponse]] = Field(default_factory=list)
timedelta: Optional[float] = None
duration: Optional[str] = None
@ -23,6 +25,19 @@ class ResultData(BaseModel):
return {key: serialize_field(val) for key, val in value.items()}
return serialize_field(value)
@model_validator(mode="before")
@classmethod
def validate_model(cls, values):
if not values.get("logs") and values.get("artifacts"):
# Build the log from the artifacts
message = values["artifacts"]
if "stream_url" in message and "type" in message:
stream_url = StreamURL(location=message["stream_url"])
values["logs"] = [Log(message=stream_url, type=message["type"])]
elif "type" in message:
values["logs"] = [Log(message=message, type=message["type"])]
return values
class InterfaceComponentTypes(str, Enum, metaclass=ContainsEnumMeta):
# ChatInput and ChatOutput are the only ones that are

View file

@ -1,9 +1,12 @@
from typing import Any, Union
from enum import Enum
from typing import Any, Generator, Union
from langchain_core.documents import Document
from langflow.schema.schema import Record
from pydantic import BaseModel
from langflow.interface.utils import extract_input_variables_from_prompt
from langflow.schema.schema import Record
class UnbuiltObject:
@ -14,6 +17,15 @@ class UnbuiltResult:
pass
class ArtifactType(str, Enum):
TEXT = "text"
RECORD = "record"
OBJECT = "object"
ARRAY = "array"
STREAM = "stream"
UNKNOWN = "unknown"
def validate_prompt(prompt: str):
"""Validate prompt."""
if extract_input_variables_from_prompt(prompt):
@ -50,3 +62,33 @@ def serialize_field(value):
elif isinstance(value, str):
return {"result": value}
return value
def get_artifact_type(custom_component, build_result) -> str:
result = ArtifactType.UNKNOWN
value = custom_component.repr_value
match value:
case Record():
result = ArtifactType.RECORD
case str():
result = ArtifactType.TEXT
case dict():
result = ArtifactType.OBJECT
case list():
result = ArtifactType.ARRAY
if result == ArtifactType.UNKNOWN:
if isinstance(build_result, Generator):
result = ArtifactType.STREAM
return result.value
def post_process_raw(raw, artifact_type: str):
if artifact_type == ArtifactType.STREAM.value:
raw = ""
return raw

View file

@ -9,12 +9,12 @@ from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, Dict, Iterator,
from loguru import logger
from langflow.graph.schema import INPUT_COMPONENTS, OUTPUT_COMPONENTS, InterfaceComponentTypes, ResultData
from langflow.graph.utils import UnbuiltObject, UnbuiltResult
from langflow.graph.vertex.utils import log_transaction
from langflow.graph.utils import ArtifactType, UnbuiltObject, UnbuiltResult
from langflow.interface.initialize import loading
from langflow.interface.listing import lazy_load_dict
from langflow.schema.schema import INPUT_FIELD_NAME
from langflow.services.deps import get_storage_service
from langflow.services.monitor.utils import log_transaction
from langflow.utils.constants import DIRECT_TYPES
from langflow.utils.schemas import ChatOutputResponse
from langflow.utils.util import sync_to_async, unescape_string
@ -63,6 +63,8 @@ class Vertex:
self._built_result = None
self._built = False
self.artifacts: Dict[str, Any] = {}
self.artifacts_raw: Any = None
self.artifacts_type: Optional[str] = None
self.steps: List[Callable] = [self._build]
self.steps_ran: List[Callable] = []
self.task_id: Optional[str] = None
@ -371,7 +373,7 @@ class Vertex:
self.load_from_db_fields = load_from_db_fields
self._raw_params = params.copy()
def update_raw_params(self, new_params: Dict[str, str], overwrite: bool = False):
def update_raw_params(self, new_params: Dict[str, str | list[str]], overwrite: bool = False):
"""
Update the raw parameters of the vertex with the given new parameters.
@ -426,7 +428,10 @@ class Vertex:
sender=artifacts.get("sender"),
sender_name=artifacts.get("sender_name"),
session_id=artifacts.get("session_id"),
stream_url=artifacts.get("stream_url"),
files=[{"path": file} if isinstance(file, str) else file for file in artifacts.get("files", [])],
component_id=self.id,
type=self.artifacts_type,
).model_dump(exclude_none=True)
]
except KeyError:
@ -444,7 +449,6 @@ class Vertex:
messages = self.extract_messages_from_artifacts(artifacts)
else:
messages = []
result_dict = ResultData(
results=result_dict,
artifacts=artifacts,
@ -526,11 +530,11 @@ class Vertex:
The built result if use_result is True, else the built object.
"""
if not self._built:
log_transaction(source=self, target=requester, flow_id=self.graph.flow_id, status="error")
log_transaction(vertex=self, target=requester, status="error")
raise ValueError(f"Component {self.display_name} has not been built yet")
result = self._built_result if self.use_result else self._built_object
log_transaction(source=self, target=requester, flow_id=self.graph.flow_id, status="success")
log_transaction(vertex=self, target=requester, status="success")
return result
async def _build_vertex_and_update_params(self, key, vertex: "Vertex"):
@ -624,6 +628,8 @@ class Vertex:
self._built_object, self.artifacts = result
elif len(result) == 3:
self._custom_component, self._built_object, self.artifacts = result
self.artifacts_raw = self.artifacts.get("raw", None)
self.artifacts_type = self.artifacts.get("type", None) or ArtifactType.UNKNOWN.value
else:
self._built_object = result
@ -664,6 +670,7 @@ class Vertex:
self,
user_id=None,
inputs: Optional[Dict[str, Any]] = None,
files: Optional[list[str]] = None,
requester: Optional["Vertex"] = None,
**kwargs,
) -> Any:
@ -681,9 +688,14 @@ class Vertex:
return await self.get_requester_result(requester)
self._reset()
if self._is_chat_input() and inputs:
inputs = {"input_value": inputs.get(INPUT_FIELD_NAME, "")}
self.update_raw_params(inputs, overwrite=True)
if self._is_chat_input() and (inputs or files):
chat_input = {}
if inputs:
chat_input.update({"input_value": inputs.get(INPUT_FIELD_NAME, "")})
if files:
chat_input.update({"files": files})
self.update_raw_params(chat_input, overwrite=True)
# Run steps
for step in self.steps:

View file

@ -2,11 +2,11 @@ import json
from typing import AsyncIterator, Dict, Iterator, List
import yaml
from langchain_core.messages import AIMessage
from langchain_core.messages import AIMessage, AIMessageChunk
from loguru import logger
from langflow.graph.schema import CHAT_COMPONENTS, RECORDS_COMPONENTS, InterfaceComponentTypes
from langflow.graph.utils import UnbuiltObject, serialize_field
from langflow.graph.utils import ArtifactType, UnbuiltObject, serialize_field
from langflow.graph.vertex.base import Vertex
from langflow.schema import Record
from langflow.schema.schema import INPUT_FIELD_NAME
@ -83,10 +83,11 @@ class InterfaceVertex(Vertex):
sender = self.params.get("sender", None)
sender_name = self.params.get("sender_name", None)
message = self.params.get(INPUT_FIELD_NAME, None)
files = [{"path": file} if isinstance(file, str) else file for file in self.params.get("files", [])]
if isinstance(message, str):
message = unescape_string(message)
stream_url = None
if isinstance(self._built_object, AIMessage):
if isinstance(self._built_object, (AIMessage, AIMessageChunk)):
artifacts = ChatOutputResponse.from_message(
self._built_object,
sender=sender,
@ -108,12 +109,14 @@ class InterfaceVertex(Vertex):
# it means that it is a stream of messages
else:
message = self._built_object
artifact_type = ArtifactType.STREAM if stream_url is not None else ArtifactType.OBJECT
artifacts = ChatOutputResponse(
message=message,
sender=sender,
sender_name=sender_name,
stream_url=stream_url,
files=files,
type=artifact_type,
)
self.will_stream = stream_url is not None
@ -195,6 +198,8 @@ class InterfaceVertex(Vertex):
message=complete_message,
sender=self.params.get("sender", ""),
sender_name=self.params.get("sender_name", ""),
files=[{"path": file} if isinstance(file, str) else file for file in self.params.get("files", [])],
type=ArtifactType.OBJECT.value,
).model_dump()
self.params[INPUT_FIELD_NAME] = complete_message
self._built_object = Record(text=complete_message, data=self.artifacts)

View file

@ -1,9 +1,5 @@
from typing import TYPE_CHECKING
from loguru import logger
from langflow.services.deps import get_monitor_service
if TYPE_CHECKING:
from langflow.graph.vertex.base import Vertex
@ -21,34 +17,3 @@ def build_clean_params(target: "Vertex") -> dict:
if isinstance(value, list):
params[key] = [item for item in value if isinstance(item, (str, int, bool, float, list, dict))]
return params
def log_transaction(source: "Vertex", target: "Vertex", flow_id, status, error=None):
"""
Logs a transaction between two vertices.
Args:
source (Vertex): The source vertex of the transaction.
target (Vertex): The target vertex of the transaction.
status: The status of the transaction.
error (Optional): Any error associated with the transaction.
Raises:
Exception: If there is an error while logging the transaction.
"""
try:
monitor_service = get_monitor_service()
clean_params = build_clean_params(target)
data = {
"source": source.vertex_type,
"target": target.vertex_type,
"target_args": clean_params,
"timestamp": monitor_service.get_timestamp(),
"status": status,
"error": error,
"flow_id": flow_id,
}
monitor_service.add_row(table_name="transactions", data=data)
except Exception as e:
logger.error(f"Error logging transaction: {e}")

View file

@ -90,7 +90,9 @@ async def run_flow(
fallback_to_env_vars = get_settings_service().settings.fallback_to_env_var
return await graph.arun(inputs_list, inputs_components=inputs_components, types=types, fallback_to_env_vars=fallback_to_env_vars)
return await graph.arun(
inputs_list, inputs_components=inputs_components, types=types, fallback_to_env_vars=fallback_to_env_vars
)
def generate_function_for_flow(
@ -257,3 +259,24 @@ def get_flow_by_id_or_endpoint_name(
raise HTTPException(status_code=404, detail=f"Flow identifier {flow_id_or_name} not found")
return flow
def generate_unique_flow_name(flow_name, user_id, session):
original_name = flow_name
n = 1
while True:
# Check if a flow with the given name exists
existing_flow = session.exec(
select(Flow).where(
Flow.name == flow_name,
Flow.user_id == user_id,
)
).first()
# If no flow with the given name exists, return the name
if not existing_flow:
return flow_name
# If a flow with the name already exists, append (n) to the name and increment n
flow_name = f"{original_name} ({n})"
n += 1

View file

@ -0,0 +1,23 @@
from langflow.services.database.models.folder.model import Folder
from sqlalchemy import select
def generate_unique_folder_name(folder_name, user_id, session):
original_name = folder_name
n = 1
while True:
# Check if a folder with the given name exists
existing_folder = session.exec(
select(Folder).where(
Folder.name == folder_name,
Folder.user_id == user_id,
)
).first()
# If no folder with the given name exists, return the name
if not existing_folder:
return folder_name
# If a folder with the name already exists, append (n) to the name and increment n
folder_name = f"{original_name} ({n})"
n += 1

View file

@ -20,7 +20,7 @@ from langflow.services.database.models.user.crud import get_user_by_username
from langflow.services.deps import get_settings_service, session_scope
from langflow.services.database.models.folder.utils import create_default_folder_if_it_doesnt_exist
from langflow.services.deps import get_settings_service, session_scope, get_variable_service
from langflow.services.deps import get_variable_service
STARTER_FOLDER_NAME = "Starter Projects"
@ -221,6 +221,7 @@ def _is_valid_uuid(val):
return False
return str(uuid_obj) == val
def load_flows_from_directory():
settings_service = get_settings_service()
flows_path = settings_service.settings.load_flows_path
@ -262,6 +263,7 @@ def load_flows_from_directory():
session.add(flow)
session.commit()
def find_existing_flow(session, flow_id, flow_endpoint_name):
if flow_endpoint_name:
stmt = select(Flow).where(Flow.endpoint_name == flow_endpoint_name)
@ -271,6 +273,8 @@ def find_existing_flow(session, flow_id, flow_endpoint_name):
if existing := session.exec(stmt).first():
return existing
return None
def create_or_update_starter_projects():
components_paths = get_settings_service().settings.components_path
try:

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -140,7 +140,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -149,7 +149,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -392,7 +392,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n files: Optional[list[str]] = None,\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n files=files,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -571,7 +571,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n files: Optional[list[str]] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n files=files,\n session_id=session_id,\n return_record=return_record,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -260,7 +260,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n files: Optional[list[str]] = None,\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n files=files,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -444,7 +444,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -453,7 +453,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -262,7 +262,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n files: Optional[list[str]] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n files=files,\n session_id=session_id,\n return_record=return_record,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -421,7 +421,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n files: Optional[list[str]] = None,\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n files=files,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -589,7 +589,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -598,7 +598,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -22,7 +22,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n files: Optional[list[str]] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n files=files,\n session_id=session_id,\n return_record=return_record,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -182,7 +182,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n files: Optional[list[str]] = None,\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n files=files,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -524,7 +524,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -670,7 +670,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -679,7 +679,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -130,7 +130,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -236,7 +236,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n files: Optional[list[str]] = None,\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n files=files,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -411,7 +411,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n files: Optional[list[str]] = None,\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n files=files,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -789,7 +789,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -798,7 +798,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -1146,7 +1146,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -1155,7 +1155,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n files: Optional[list[str]] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n files=files,\n session_id=session_id,\n return_record=return_record,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -784,7 +784,7 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": ["Text", "Record"]
},
"code": {
"type": "code",
@ -793,7 +793,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -1034,7 +1034,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n",
"value": "from langchain_core.prompts import ChatPromptTemplate\n\nfrom langflow.base.prompts.utils import dict_values_to_string\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\nfrom langflow.schema.schema import Record\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Record:\n prompt_template = ChatPromptTemplate.from_template(Text(template))\n kwargs = await dict_values_to_string(kwargs)\n messages = list(kwargs.values())\n prompt = prompt_template + messages\n self.status = f'Prompt:\\n\"{template}\"'\n return Record(data={\"prompt\": prompt.to_json()})\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -1170,7 +1170,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n files: Optional[list[str]] = None,\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n files=files,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,

View file

@ -7,6 +7,7 @@ import orjson
from loguru import logger
from langflow.custom.eval import eval_custom_component_code
from langflow.graph.utils import get_artifact_type, post_process_raw
from langflow.schema.schema import Record
if TYPE_CHECKING:
@ -124,4 +125,14 @@ async def instantiate_custom_component(params, user_id, vertex, fallback_to_env_
custom_repr = build_result
if not isinstance(custom_repr, str):
custom_repr = str(custom_repr)
return custom_component, build_result, {"repr": custom_repr}
raw = custom_component.repr_value
if hasattr(raw, "data"):
raw = raw.data
elif hasattr(raw, "model_dump"):
raw = raw.model_dump()
artifact_type = get_artifact_type(custom_component, build_result)
raw = post_process_raw(raw, artifact_type)
artifact = {"repr": custom_repr, "raw": raw, "type": artifact_type}
return custom_component, build_result, artifact

View file

@ -59,7 +59,7 @@ async def run_graph_internal(
outputs or [],
stream=stream,
session_id=session_id_str or "",
fallback_to_env_vars=fallback_to_env_vars
fallback_to_env_vars=fallback_to_env_vars,
)
if session_id_str and session_service:
await session_service.update_session(session_id_str, (graph, artifacts))

View file

@ -1,10 +1,12 @@
import copy
import json
from typing import Literal, Optional, cast
from typing_extensions import TypedDict
from langchain_core.documents import Document
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from pydantic import BaseModel, model_validator
from langchain_core.prompts.image import ImagePromptTemplate
from pydantic import BaseModel, model_serializer, model_validator
class Record(BaseModel):
@ -29,6 +31,11 @@ class Record(BaseModel):
values["data"][key] = values[key]
return values
@model_serializer(mode="plain", when_used="json")
def serialize_model(self):
data = {k: v.to_json() if hasattr(v, "to_json") else v for k, v in self.data.items()}
return data
def get_text(self):
"""
Retrieves the text value from the data dictionary.
@ -102,7 +109,9 @@ class Record(BaseModel):
text = self.data.pop(self.text_key, self.default_value)
return Document(page_content=text, metadata=self.data)
def to_lc_message(self) -> BaseMessage:
def to_lc_message(
self,
) -> BaseMessage:
"""
Converts the Record to a BaseMessage.
@ -118,8 +127,22 @@ class Record(BaseModel):
raise ValueError(f"Missing required keys ('text', 'sender') in Record: {self.data}")
sender = self.data.get("sender", "Machine")
text = self.data.get("text", "")
files = self.data.get("files", [])
if sender == "User":
return HumanMessage(content=text)
if files:
contents = [{"type": "text", "text": text}]
for file_path in files:
image_template = ImagePromptTemplate()
image_prompt_value = image_template.invoke(input={"path": file_path})
contents.append({"type": "image_url", "image_url": image_prompt_value.image_url})
human_message = HumanMessage(content=contents)
else:
human_message = HumanMessage(
content=[{"type": "text", "text": text}],
)
return human_message
return AIMessage(content=text)
def __getattr__(self, key):
@ -169,11 +192,26 @@ class Record(BaseModel):
def __str__(self) -> str:
# return a JSON string representation of the Record atributes
try:
data = {k: v.to_json() if hasattr(v, "to_json") else v for k, v in self.data.items()}
return json.dumps(data, indent=4)
except Exception:
return str(self.data)
return json.dumps(self.data)
def __contains__(self, key):
return key in self.data
INPUT_FIELD_NAME = "input_value"
InputType = Literal["chat", "text", "any"]
OutputType = Literal["chat", "text", "any", "debug"]
class StreamURL(TypedDict):
location: str
class Log(TypedDict):
message: str | dict | StreamURL
type: str

View file

@ -215,10 +215,7 @@ def create_user_longterm_token(db: Session = Depends(get_session)) -> tuple[UUID
username = settings_service.auth_settings.SUPERUSER
super_user = get_user_by_username(db, username)
if not super_user:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Super user hasn't been created"
)
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Super user hasn't been created")
access_token_expires_longterm = timedelta(days=365)
access_token = create_token(
data={"sub": str(super_user.id)},

View file

@ -23,6 +23,9 @@ class CacheMiss:
def __repr__(self):
return "<CACHE_MISS>"
def __bool__(self):
return False
def create_cache_folder(func):
def wrapper(*args, **kwargs):

View file

@ -55,6 +55,7 @@ class ApiKeyRead(ApiKeyBase):
id: UUID
api_key: str = Field(schema_extra={"validate_default": True})
user_id: UUID = Field()
created_at: datetime = Field()
@field_validator("api_key")
@classmethod

View file

@ -29,6 +29,7 @@ class FlowBase(SQLModel):
is_component: Optional[bool] = Field(default=False, nullable=True)
updated_at: Optional[datetime] = Field(default_factory=lambda: datetime.now(timezone.utc), nullable=True)
webhook: Optional[bool] = Field(default=False, nullable=True, description="Can be used on the webhook endpoint")
folder_id: Optional[UUID] = Field(default=None, nullable=True)
endpoint_name: Optional[str] = Field(default=None, nullable=True, index=True)
@field_validator("endpoint_name")

View file

@ -11,10 +11,10 @@ if TYPE_CHECKING:
class TransactionModel(BaseModel):
index: Optional[int] = Field(default=None)
timestamp: Optional[datetime] = Field(default_factory=datetime.now, alias="timestamp")
flow_id: str
source: str
target: str
target_args: dict
vertex_id: str
target_id: str | None = None
inputs: dict
outputs: dict
status: str
error: Optional[str] = None
@ -23,13 +23,13 @@ class TransactionModel(BaseModel):
populate_by_name = True
# validate target_args in case it is a JSON
@field_validator("target_args", mode="before")
@field_validator("outputs", "inputs", mode="before")
def validate_target_args(cls, v):
if isinstance(v, str):
return json.loads(v)
return v
@field_serializer("target_args")
@field_serializer("outputs", "inputs")
def serialize_target_args(v):
if isinstance(v, dict):
return json.dumps(v)
@ -39,10 +39,9 @@ class TransactionModel(BaseModel):
class TransactionModelResponse(BaseModel):
index: Optional[int] = Field(default=None)
timestamp: Optional[datetime] = Field(default_factory=datetime.now, alias="timestamp")
flow_id: str
source: str
target: str
target_args: dict
vertex_id: str
inputs: dict
outputs: dict
status: str
error: Optional[str] = None
@ -51,7 +50,7 @@ class TransactionModelResponse(BaseModel):
populate_by_name = True
# validate target_args in case it is a JSON
@field_validator("target_args", mode="before")
@field_validator("outputs", "inputs", mode="before")
def validate_target_args(cls, v):
if isinstance(v, str):
return json.loads(v)
@ -75,14 +74,14 @@ class MessageModel(BaseModel):
sender_name: str
session_id: str
message: str
artifacts: dict
files: list[str] = []
class Config:
from_attributes = True
populate_by_name = True
@field_validator("artifacts", mode="before")
def validate_target_args(cls, v):
@field_validator("files", mode="before")
def validate_files(cls, v):
if isinstance(v, str):
return json.loads(v)
return v
@ -97,6 +96,7 @@ class MessageModel(BaseModel):
sender_name=record.sender_name,
message=record.text,
session_id=record.session_id,
files=record.files or [],
artifacts=record.artifacts or {},
timestamp=record.timestamp,
flow_id=flow_id,
@ -106,12 +106,6 @@ class MessageModel(BaseModel):
class MessageModelResponse(MessageModel):
index: Optional[int] = Field(default=None)
@field_validator("artifacts", mode="before")
def serialize_artifacts(v):
if isinstance(v, str):
return json.loads(v)
return v
@field_validator("index", mode="before")
def validate_id(cls, v):
if isinstance(v, float):
@ -122,6 +116,13 @@ class MessageModelResponse(MessageModel):
return v
class MessageModelRequest(MessageModel):
message: str = Field(default="")
sender: str = Field(default="")
sender_name: str = Field(default="")
session_id: str = Field(default="")
class VertexBuildModel(BaseModel):
index: Optional[int] = Field(default=None, alias="index", exclude=True)
id: Optional[str] = Field(default=None, alias="id")

View file

@ -32,6 +32,10 @@ class MonitorService(Service):
except Exception as e:
logger.exception(f"Error initializing monitor service: {e}")
def exec_query(self, query: str):
with duckdb.connect(str(self.db_path)) as conn:
return conn.execute(query).df()
def to_df(self, table_name):
return self.load_table_as_dataframe(table_name)
@ -69,7 +73,7 @@ class MonitorService(Service):
valid: Optional[bool] = None,
order_by: Optional[str] = "timestamp",
):
query = "SELECT index,flow_id, valid, params, data, artifacts, timestamp FROM vertex_builds"
query = "SELECT id, index,flow_id, valid, params, data, artifacts, timestamp FROM vertex_builds"
conditions = []
if flow_id:
conditions.append(f"flow_id = '{flow_id}'")
@ -88,6 +92,8 @@ class MonitorService(Service):
with duckdb.connect(str(self.db_path)) as conn:
df = conn.execute(query).df()
print(query)
return df.to_dict(orient="records")
def delete_vertex_builds(self, flow_id: Optional[str] = None):
@ -98,11 +104,22 @@ class MonitorService(Service):
with duckdb.connect(str(self.db_path)) as conn:
conn.execute(query)
def delete_messages(self, session_id: str):
def delete_messages_session(self, session_id: str):
query = f"DELETE FROM messages WHERE session_id = '{session_id}'"
with duckdb.connect(str(self.db_path)) as conn:
conn.execute(query)
return self.exec_query(query)
def delete_messages(self, message_ids: list[int]):
query = f"DELETE FROM messages WHERE index IN ({','.join(map(str, message_ids))})"
return self.exec_query(query)
def update_message(self, message_id: int, **kwargs):
query = (
f"""UPDATE messages SET {', '.join(f"{k} = '{v}'" for k, v in kwargs.items())} WHERE index = {message_id}"""
)
return self.exec_query(query)
def add_message(self, message: MessageModel):
self.add_row("messages", message)
@ -117,7 +134,7 @@ class MonitorService(Service):
order: Optional[str] = "DESC",
limit: Optional[int] = None,
):
query = "SELECT index, flow_id, sender_name, sender, session_id, message, artifacts, timestamp FROM messages"
query = "SELECT index, flow_id, sender_name, sender, session_id, message, timestamp FROM messages"
conditions = []
if sender:
conditions.append(f"sender = '{sender}'")

View file

@ -119,21 +119,16 @@ async def log_message(
sender_name: str,
message: str,
session_id: str,
artifacts: Optional[dict] = None,
files: Optional[list] = None,
flow_id: Optional[str] = None,
):
try:
from langflow.graph.vertex.base import Vertex
if isinstance(session_id, Vertex):
session_id = await session_id.build() # type: ignore
monitor_service = get_monitor_service()
row = {
"sender": sender,
"sender_name": sender_name,
"message": message,
"artifacts": artifacts or {},
"files": files or [],
"session_id": session_id,
"timestamp": monitor_service.get_timestamp(),
"flow_id": flow_id,
@ -183,14 +178,15 @@ def build_clean_params(target: "Vertex") -> dict:
return params
def log_transaction(vertex: "Vertex", status, error=None):
def log_transaction(vertex: "Vertex", status, target: Optional["Vertex"] = None, error=None):
try:
monitor_service = get_monitor_service()
clean_params = build_clean_params(vertex)
data = {
"vertex_id": vertex.id,
"vertex_id": str(vertex.id),
"target_id": str(target.id) if target else None,
"inputs": clean_params,
"output": str(vertex.result),
"outputs": vertex.result.model_dump_json(),
"timestamp": monitor_service.get_timestamp(),
"status": status,
"error": error,

View file

@ -70,7 +70,7 @@ class Settings(BaseSettings):
"""Database URL for Langflow. If not provided, Langflow will use a SQLite database."""
pool_size: int = 10
"""The number of connections to keep open in the connection pool. If not provided, the default is 10."""
max_overflow: int = 10
max_overflow: int = 20
"""The number of connections to allow that can be opened beyond the pool size. If not provided, the default is 10."""
cache_type: str = "async"
remove_api_keys: bool = False
@ -78,7 +78,6 @@ class Settings(BaseSettings):
langchain_cache: str = "InMemoryCache"
load_flows_path: Optional[str] = None
# Redis
redis_host: str = "localhost"
redis_port: int = 6379

View file

@ -1,5 +1,4 @@
import os
from typing import Optional
import yaml
from loguru import logger
@ -8,6 +7,7 @@ from langflow.services.base import Service
from langflow.services.settings.auth import AuthSettings
from langflow.services.settings.base import Settings
class SettingsService(Service):
name = "settings_service"

View file

@ -2,7 +2,18 @@ import enum
from typing import Dict, List, Optional, Union
from langchain_core.messages import BaseMessage
from pydantic import BaseModel, model_validator
from pydantic import BaseModel, field_validator, model_validator
from typing_extensions import TypedDict
from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES
class File(TypedDict):
"""File schema."""
path: str
name: str
type: str
class ChatOutputResponse(BaseModel):
@ -14,6 +25,47 @@ class ChatOutputResponse(BaseModel):
session_id: Optional[str] = None
stream_url: Optional[str] = None
component_id: Optional[str] = None
files: List[File] = []
type: str
@field_validator("files", mode="before")
def validate_files(cls, files):
"""Validate files."""
if not files:
return files
for file in files:
if not isinstance(file, dict):
raise ValueError("Files must be a list of dictionaries.")
if not all(key in file for key in ["path", "name", "type"]):
# If any of the keys are missing, we should extract the
# values from the file path
path = file.get("path")
if not path:
raise ValueError("File path is required.")
name = file.get("name")
if not name:
name = path.split("/")[-1]
file["name"] = name
_type = file.get("type")
if not _type:
# get the file type from the path
extension = path.split(".")[-1]
file_types = set(TEXT_FILE_TYPES + IMG_FILE_TYPES)
if extension and extension in file_types:
_type = extension
else:
for file_type in file_types:
if file_type in path:
_type = file_type
break
if not _type:
raise ValueError("File type is required.")
file["type"] = _type
return files
@classmethod
def from_message(

View file

@ -1296,13 +1296,13 @@ types-requests = ">=2.31.0.2,<3.0.0.0"
[[package]]
name = "langsmith"
version = "0.1.72"
version = "0.1.75"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langsmith-0.1.72-py3-none-any.whl", hash = "sha256:a4456707669521bd75b7431b9205a6b99579fb9ff01bd338f52d29df11a7662d"},
{file = "langsmith-0.1.72.tar.gz", hash = "sha256:262ae9e8aceaba50f3a0f5b6eb559d6110886f0afc6b0ed5270e7d3d3f1fd8d6"},
{file = "langsmith-0.1.75-py3-none-any.whl", hash = "sha256:d08b08dd6b3fa4da170377f95123d77122ef4c52999d10fff4ae08ff70d07aed"},
{file = "langsmith-0.1.75.tar.gz", hash = "sha256:61274e144ea94c297dd78ce03e6dfae18459fe9bd8ab5094d61a0c4816561279"},
]
[package.dependencies]
@ -1600,13 +1600,13 @@ files = [
[[package]]
name = "marshmallow"
version = "3.21.2"
version = "3.21.3"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.8"
files = [
{file = "marshmallow-3.21.2-py3-none-any.whl", hash = "sha256:70b54a6282f4704d12c0a41599682c5c5450e843b9ec406308653b47c59648a1"},
{file = "marshmallow-3.21.2.tar.gz", hash = "sha256:82408deadd8b33d56338d2182d455db632c6313aa2af61916672146bb32edc56"},
{file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"},
{file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"},
]
[package.dependencies]

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow-base"
version = "0.0.57"
version = "0.0.59"
description = "A Python package with a built-in web application"
authors = ["Langflow <contact@langflow.org>"]
maintainers = [

13972
src/frontend/package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -22,6 +22,7 @@
"@radix-ui/react-slot": "^1.0.2",
"@radix-ui/react-switch": "^1.0.3",
"@radix-ui/react-tabs": "^1.0.4",
"@radix-ui/react-toggle": "^1.0.3",
"@radix-ui/react-tooltip": "^1.0.6",
"@tabler/icons-react": "^2.32.0",
"@tailwindcss/forms": "^0.5.6",
@ -36,9 +37,9 @@
"class-variance-authority": "^0.6.1",
"clsx": "^1.2.1",
"cmdk": "^1.0.0",
"debounce-promise": "^3.1.2",
"dompurify": "^3.0.5",
"dotenv": "^16.4.5",
"emoji-regex": "^10.3.0",
"esbuild": "^0.17.19",
"file-saver": "^2.0.5",
"framer-motion": "^11.0.6",
@ -47,6 +48,7 @@
"million": "^3.0.6",
"moment": "^2.29.4",
"openseadragon": "^4.1.1",
"p-debounce": "^4.0.0",
"playwright": "^1.42.0",
"react": "^18.2.21",
"react-ace": "^10.1.0",

View file

@ -45,6 +45,9 @@ export default defineConfig({
name: "chromium",
use: {
...devices["Desktop Chrome"],
launchOptions: {
// headless: false,
},
contextOptions: {
// chromium-specific permissions
permissions: ["clipboard-read", "clipboard-write"],
@ -57,6 +60,7 @@ export default defineConfig({
// use: {
// ...devices["Desktop Firefox"],
// launchOptions: {
// headless: false,
// firefoxUserPrefs: {
// "dom.events.asyncClipboard.readText": true,
// "dom.events.testing.asyncClipboard": true,

View file

@ -164,3 +164,13 @@ body {
.ag-body-vertical-scroll-viewport::-webkit-scrollbar-thumb:hover {
background-color: #bbb;
}
/* This CSS is to not apply the border for the column having 'no-border' class */
.no-border.ag-cell:focus {
border: none !important;
outline: none;
}
.no-border.ag-cell {
border: none !important;
outline: none;
}

View file

@ -1,4 +1,3 @@
import axios from "axios";
import { useContext, useEffect, useState } from "react";
import { ErrorBoundary } from "react-error-boundary";
import { useNavigate } from "react-router-dom";

View file

@ -0,0 +1,12 @@
import { Textarea } from "../../../../../../../components/ui/textarea";
export default function ErrorOutput({ value }: { value: string }) {
return (
<Textarea
className={`h-full w-full text-destructive custom-scroll`}
placeholder={"Empty"}
value={value}
readOnly
/>
);
}

View file

@ -0,0 +1,4 @@
export const convertToTableRows = (obj: Object) => {
const tokensArray = [Object.values(obj)[0]];
return tokensArray;
};

View file

@ -0,0 +1,90 @@
import ForwardedIconComponent from "../../../../../../components/genericIconComponent";
import RecordsOutputComponent from "../../../../../../components/recordsOutputComponent";
import {
Alert,
AlertDescription,
AlertTitle,
} from "../../../../../../components/ui/alert";
import { Case } from "../../../../../../shared/components/caseComponent";
import TextOutputView from "../../../../../../shared/components/textOutputView";
import useFlowStore from "../../../../../../stores/flowStore";
import ErrorOutput from "./components";
export default function SwitchOutputView(nodeId): JSX.Element {
const nodeIdentity = nodeId.nodeId;
const nodes = useFlowStore((state) => state.nodes);
const flowPool = useFlowStore((state) => state.flowPool);
const node = nodes.find((node) => node?.id === nodeIdentity);
const flowPoolNode = (flowPool[nodeIdentity] ?? [])[
(flowPool[nodeIdentity]?.length ?? 1) - 1
];
const results = flowPoolNode?.data?.logs[0] ?? "";
const resultType = results?.type;
let resultMessage = results?.message;
if (resultMessage.raw) {
resultMessage = resultMessage.raw;
}
console.log("resultType", results);
return (
<>
<Case condition={!resultType || resultType === "unknown"}>
<div>NO OUTPUT</div>
</Case>
<Case condition={resultType === "ValueError"}>
<ErrorOutput value={resultMessage}></ErrorOutput>
</Case>
<Case condition={node && resultType === "text"}>
<TextOutputView left={false} value={resultMessage} />
</Case>
<Case condition={resultType === "record"}>
<RecordsOutputComponent
rows={[resultMessage] ?? []}
pagination={true}
columnMode="union"
/>
</Case>
<Case condition={resultType === "object"}>
<RecordsOutputComponent
rows={[resultMessage]}
pagination={true}
columnMode="union"
/>
</Case>
{Array.isArray(resultMessage) && (
<Case condition={resultType === "array"}>
<RecordsOutputComponent
rows={
(resultMessage as Array<any>).every((item) => item.data)
? (resultMessage as Array<any>).map((item) => item.data)
: resultMessage
}
pagination={true}
columnMode="union"
/>
</Case>
)}
<Case condition={resultType === "stream"}>
<div className="flex h-full w-full items-center justify-center align-middle">
<Alert variant={"default"} className="w-fit">
<ForwardedIconComponent
name="AlertCircle"
className="h-5 w-5 text-primary"
/>
<AlertTitle>{"Streaming is not supported"}</AlertTitle>
<AlertDescription>
{
"Use the playground to interact with components that stream data"
}
</AlertDescription>
</Alert>
</div>
</Case>
</>
);
}

View file

@ -0,0 +1,25 @@
import { Button } from "../../../../components/ui/button";
import BaseModal from "../../../../modals/baseModal";
import SwitchOutputView from "./components/switchOutputView";
export default function OutputModal({ open, setOpen, nodeId }): JSX.Element {
return (
<BaseModal open={open} setOpen={setOpen} size="medium">
<BaseModal.Header description="Inspect the output of the component below.">
<div className="flex items-center">
<span className="pr-2">Component Output</span>
</div>
</BaseModal.Header>
<BaseModal.Content>
<SwitchOutputView nodeId={nodeId} />
</BaseModal.Content>
<BaseModal.Footer>
<div className="flex w-full justify-end pt-2">
<Button className="flex gap-2 px-3" onClick={() => setOpen(false)}>
Close
</Button>
</div>
</BaseModal.Footer>
</BaseModal>
);
}

View file

@ -22,7 +22,6 @@ import {
TOOLTIP_EMPTY,
} from "../../../../constants/constants";
import { Case } from "../../../../shared/components/caseComponent";
import useAlertStore from "../../../../stores/alertStore";
import useFlowStore from "../../../../stores/flowStore";
import useFlowsManagerStore from "../../../../stores/flowsManagerStore";
import { useTypesStore } from "../../../../stores/typesStore";
@ -45,6 +44,7 @@ import useFetchDataOnMount from "../../../hooks/use-fetch-data-on-mount";
import useHandleOnNewValue from "../../../hooks/use-handle-new-value";
import useHandleNodeClass from "../../../hooks/use-handle-node-class";
import useHandleRefreshButtonPress from "../../../hooks/use-handle-refresh-buttons";
import OutputModal from "../outputModal";
import TooltipRenderComponent from "../tooltipRenderComponent";
import { TEXT_FIELD_TYPES } from "./constants";
@ -67,7 +67,6 @@ export default function ParameterComponent({
const ref = useRef<HTMLDivElement>(null);
const refHtml = useRef<HTMLDivElement & ReactNode>(null);
const infoHtml = useRef<HTMLDivElement & ReactNode>(null);
const setErrorData = useAlertStore((state) => state.setErrorData);
const currentFlow = useFlowsManagerStore((state) => state.currentFlow);
const nodes = useFlowStore((state) => state.nodes);
const edges = useFlowStore((state) => state.edges);
@ -80,6 +79,16 @@ export default function ParameterComponent({
const flow = currentFlow?.data?.nodes ?? null;
const groupedEdge = useRef(null);
const setFilterEdge = useFlowStore((state) => state.setFilterEdge);
const [openOutputModal, setOpenOutputModal] = useState(false);
const flowPool = useFlowStore((state) => state.flowPool);
const displayOutputPreview = !!flowPool[data.id];
const unknownOutput = !!(
flowPool[data.id] &&
flowPool[data.id][flowPool[data.id].length - 1]?.data?.logs[0]?.type ===
"unknown"
);
const { handleOnNewValue: handleOnNewValueHook } = useHandleOnNewValue(
data,
@ -251,9 +260,38 @@ export default function ParameterComponent({
</span>
</ShadTooltip>
) : (
<span className={!left && data.node?.frozen ? " text-ice" : ""}>
{title}
</span>
<div className="flex gap-2">
<span className={!left && data.node?.frozen ? " text-ice" : ""}>
{title}
</span>
{!left && (
<ShadTooltip
content={
displayOutputPreview
? unknownOutput
? "Output can't be displayed"
: "Inspect Output"
: "Please build the component first"
}
>
<button
disabled={!displayOutputPreview || unknownOutput}
onClick={() => setOpenOutputModal(true)}
data-testid={`output-inspection-${title.toLowerCase()}`}
>
<IconComponent
className={classNames(
"h-5 w-5 rounded-md",
displayOutputPreview && !unknownOutput
? " hover:bg-secondary-foreground/5 hover:text-medium-indigo"
: " cursor-not-allowed text-muted-foreground",
)}
name={"ScanEye"}
/>
</button>
</ShadTooltip>
)}
</div>
)}
<span className={(required ? "ml-2 " : "") + "text-status-red"}>
{required ? "*" : ""}
@ -392,7 +430,7 @@ export default function ParameterComponent({
});
}}
name={name}
data={data}
data={data.node?.template[name]!}
/>
</div>
{data.node?.template[name]?.refresh_button && (
@ -448,8 +486,8 @@ export default function ParameterComponent({
data.node?.template[name]?.real_time_refresh)
}
>
<div className="mt-2 flex w-full items-center">
<div className="w-5/6 flex-grow">
<div className="mt-2 flex w-full items-center gap-2">
<div className="flex-1">
<Dropdown
disabled={disabled}
isLoading={isLoading}
@ -467,7 +505,6 @@ export default function ParameterComponent({
name={name}
data={data}
button_text={data.node?.template[name]?.refresh_button_text}
className="extra-side-bar-buttons ml-2 mt-1"
handleUpdateValues={handleRefreshButtonPress}
id={"refresh-button-" + name}
/>
@ -580,6 +617,13 @@ export default function ParameterComponent({
/>
</div>
</Case>
{openOutputModal && (
<OutputModal
open={openOutputModal}
nodeId={data.id}
setOpen={setOpenOutputModal}
/>
)}
</>
</div>
);

View file

@ -1,32 +1,36 @@
import { cloneDeep } from "lodash";
import { useCallback, useEffect, useMemo, useState } from "react";
import emojiRegex from "emoji-regex";
import { useEffect, useMemo, useState } from "react";
import { NodeToolbar, useUpdateNodeInternals } from "reactflow";
import IconComponent from "../../components/genericIconComponent";
import InputComponent from "../../components/inputComponent";
import ShadTooltip from "../../components/shadTooltipComponent";
import { Button } from "../../components/ui/button";
import Checkmark from "../../components/ui/checkmark";
import Loading from "../../components/ui/loading";
import { Textarea } from "../../components/ui/textarea";
import Xmark from "../../components/ui/xmark";
import {
RUN_TIMESTAMP_PREFIX,
STATUS_BUILD,
STATUS_BUILDING,
} from "../../constants/constants";
import { BuildStatus } from "../../constants/enums";
import { countHandlesFn } from "../helpers/count-handles";
import { getSpecificClassFromBuildStatus } from "../helpers/get-class-from-build-status";
import NodeToolbarComponent from "../../pages/FlowPage/components/nodeToolbarComponent";
import useAlertStore from "../../stores/alertStore";
import { useDarkStore } from "../../stores/darkStore";
import useFlowStore from "../../stores/flowStore";
import useFlowsManagerStore from "../../stores/flowsManagerStore";
import { useTypesStore } from "../../stores/typesStore";
import { APIClassType } from "../../types/api";
import { validationStatusType } from "../../types/components";
import { VertexBuildTypeAPI } from "../../types/api";
import { NodeDataType } from "../../types/flow";
import { handleKeyDown, scapedJSONStringfy } from "../../utils/reactflowUtils";
import { nodeColors, nodeIconsLucide } from "../../utils/styleUtils";
import { classNames, cn } from "../../utils/utils";
import useCheckCodeValidity from "../hooks/use-check-code-validity";
import useIconNodeRender from "../hooks/use-icon-render";
import useIconStatus from "../hooks/use-icons-status";
import useUpdateNodeCode from "../hooks/use-update-node-code";
import useUpdateValidationStatus from "../hooks/use-update-validation-status";
import useValidationStatusString from "../hooks/use-validation-status-string";
import getFieldTitle from "../utils/get-field-title";
import sortFields from "../utils/sort-fields";
import isWrappedWithClass from "../../pages/FlowPage/components/PageComponent/utils/is-wrapped-with-class";
@ -34,14 +38,13 @@ import ParameterComponent from "./components/parameterComponent";
export default function GenericNode({
data,
xPos,
yPos,
selected,
}: {
data: NodeDataType;
selected: boolean;
xPos: number;
yPos: number;
xPos?: number;
yPos?: number;
}): JSX.Element {
const types = useTypesStore((state) => state.types);
const templates = useTypesStore((state) => state.templates);
@ -51,7 +54,15 @@ export default function GenericNode({
const setNode = useFlowStore((state) => state.setNode);
const updateNodeInternals = useUpdateNodeInternals();
const setErrorData = useAlertStore((state) => state.setErrorData);
const name = nodeIconsLucide[data.type] ? data.type : types[data.type];
const isDark = useDarkStore((state) => state.dark);
const buildStatus = useFlowStore(
(state) => state.flowBuildStatus[data.id]?.status,
);
const lastRunTime = useFlowStore(
(state) => state.flowBuildStatus[data.id]?.timestamp,
);
const takeSnapshot = useFlowsManagerStore((state) => state.takeSnapshot);
const [inputName, setInputName] = useState(false);
const [nodeName, setNodeName] = useState(data.node!.display_name);
const [inputDescription, setInputDescription] = useState(false);
@ -59,185 +70,25 @@ export default function GenericNode({
data.node?.description!,
);
const [isOutdated, setIsOutdated] = useState(false);
const buildStatus = useFlowStore(
(state) => state.flowBuildStatus[data.id]?.status,
);
const lastRunTime = useFlowStore(
(state) => state.flowBuildStatus[data.id]?.timestamp,
);
const [validationStatus, setValidationStatus] =
useState<validationStatusType | null>(null);
useState<VertexBuildTypeAPI | null>(null);
const [handles, setHandles] = useState<number>(0);
const [validationString, setValidationString] = useState<string>("");
const takeSnapshot = useFlowsManagerStore((state) => state.takeSnapshot);
useEffect(() => {
// This one should run only once
// first check if data.type in NATIVE_CATEGORIES
// if not return
if (!data.node?.template?.code?.value) return;
const thisNodeTemplate = templates[data.type]?.template;
// if the template does not have a code key
// return
if (!thisNodeTemplate?.code) return;
const currentCode = thisNodeTemplate.code?.value;
const thisNodesCode = data.node!.template?.code?.value;
const componentsToIgnore = ["Custom Component"];
if (
currentCode !== thisNodesCode &&
!componentsToIgnore.includes(data.node!.display_name)
) {
setIsOutdated(true);
} else {
setIsOutdated(false);
}
// template.code can be undefined
}, [data.node?.template?.code?.value]);
const updateNodeCode = useCallback(
(newNodeClass: APIClassType, code: string, name: string) => {
setNode(data.id, (oldNode) => {
let newNode = cloneDeep(oldNode);
newNode.data = {
...newNode.data,
node: newNodeClass,
description: newNodeClass.description ?? data.node!.description,
display_name: newNodeClass.display_name ?? data.node!.display_name,
};
newNode.data.node.template[name].value = code;
setIsOutdated(false);
return newNode;
});
updateNodeInternals(data.id);
},
[data.id, data.node, setNode, setIsOutdated],
);
if (!data.node!.template) {
setErrorData({
title: `Error in component ${data.node!.display_name}`,
list: [
`The component ${data.node!.display_name} has no template.`,
`Please contact the developer of the component to fix this issue.`,
],
});
takeSnapshot();
deleteNode(data.id);
}
function countHandles(): void {
let count = Object.keys(data.node!.template)
.filter((templateField) => templateField.charAt(0) !== "_")
.map((templateCamp) => {
const { template } = data.node!;
if (template[templateCamp].input_types) return true;
if (!template[templateCamp].show) return false;
switch (template[templateCamp].type) {
case "str":
case "bool":
case "float":
case "code":
case "prompt":
case "file":
case "int":
return false;
default:
return true;
}
})
.reduce((total, value) => total + (value ? 1 : 0), 0);
setHandles(count);
}
useEffect(() => {
countHandles();
}, [data, data.node]);
useEffect(() => {
if (!selected) {
setInputName(false);
setInputDescription(false);
}
}, [selected]);
const iconStatus = useIconStatus(buildStatus, validationStatus);
const [showNode, setShowNode] = useState(data.showNode ?? true);
// State for outline color
const isBuilding = useFlowStore((state) => state.isBuilding);
// should be empty string if no duration
// else should be `Duration: ${duration}`
const getDurationString = (duration: number | undefined): string => {
if (duration === undefined) {
return "";
} else {
return `${duration}`;
}
};
const durationString = getDurationString(validationStatus?.data.duration);
const updateNodeCode = useUpdateNodeCode(
data?.id,
data.node!,
setNode,
setIsOutdated,
updateNodeInternals,
);
useEffect(() => {
setNodeDescription(data.node!.description);
}, [data.node!.description]);
useEffect(() => {
setNodeName(data.node!.display_name);
}, [data.node!.display_name]);
useEffect(() => {
const relevantData =
flowPool[data.id] && flowPool[data.id]?.length > 0
? flowPool[data.id][flowPool[data.id].length - 1]
: null;
if (relevantData) {
// Extract validation information from relevantData and update the validationStatus state
setValidationStatus(relevantData);
} else {
setValidationStatus(null);
}
}, [flowPool[data.id], data.id]);
useEffect(() => {
if (validationStatus?.params) {
// if it is not a string turn it into a string
let newValidationString = validationStatus.params;
if (typeof newValidationString !== "string") {
newValidationString = JSON.stringify(validationStatus.params);
}
setValidationString(newValidationString);
}
}, [validationStatus, validationStatus?.params]);
const [showNode, setShowNode] = useState(data.showNode ?? true);
useEffect(() => {
setShowNode(data.showNode ?? true);
}, [data.showNode]);
const nameEditable = true;
const emojiRegex = /\p{Emoji}/u;
const isEmoji = emojiRegex.test(data?.node?.icon!);
const iconNodeRender = useCallback(() => {
const iconElement = data?.node?.icon;
const iconColor = nodeColors[types[data.type]];
const iconName =
iconElement || (data.node?.flow ? "group_components" : name);
const iconClassName = `generic-node-icon ${
!showNode ? " absolute inset-x-6 h-12 w-12 " : ""
}`;
if (iconElement && isEmoji) {
return nodeIconFragment(iconElement);
} else {
return checkNodeIconFragment(iconColor, iconName, iconClassName);
}
}, [data, isEmoji, name, showNode]);
const name = nodeIconsLucide[data.type] ? data.type : types[data.type];
const nodeIconFragment = (icon) => {
return <span className="text-lg">{icon}</span>;
@ -253,79 +104,24 @@ export default function GenericNode({
);
};
const isDark = useDarkStore((state) => state.dark);
const renderIconStatus = (
buildStatus: BuildStatus | undefined,
validationStatus: validationStatusType | null,
) => {
if (buildStatus === BuildStatus.BUILDING) {
return <Loading className="text-medium-indigo" />;
} else {
return (
<>
<IconComponent
name="Play"
className="absolute ml-0.5 h-5 fill-current stroke-2 text-medium-indigo opacity-0 transition-all group-hover:opacity-100"
/>
{validationStatus && validationStatus.valid ? (
<Checkmark
className="absolute ml-0.5 h-5 stroke-2 text-status-green opacity-100 transition-all group-hover:opacity-0"
isVisible={true}
/>
) : validationStatus &&
!validationStatus.valid &&
buildStatus === BuildStatus.INACTIVE ? (
<IconComponent
name="Play"
className="absolute ml-0.5 h-5 fill-current stroke-2 text-status-green opacity-30 transition-all group-hover:opacity-0"
/>
) : buildStatus === BuildStatus.ERROR ||
(validationStatus && !validationStatus.valid) ? (
<Xmark
isVisible={true}
className="absolute ml-0.5 h-5 fill-current stroke-2 text-status-red opacity-100 transition-all group-hover:opacity-0"
/>
) : (
<IconComponent
name="Play"
className="absolute ml-0.5 h-5 fill-current stroke-2 text-muted-foreground opacity-100 transition-all group-hover:opacity-0"
/>
)}
</>
);
}
};
const getSpecificClassFromBuildStatus = (
buildStatus: BuildStatus | undefined,
validationStatus: validationStatusType | null,
) => {
let isInvalid = validationStatus && !validationStatus.valid;
if (buildStatus === BuildStatus.INACTIVE) {
// INACTIVE should have its own class
return "inactive-status";
}
if (
(buildStatus === BuildStatus.BUILT && isInvalid) ||
buildStatus === BuildStatus.ERROR
) {
return isDark ? "built-invalid-status-dark" : "built-invalid-status";
} else if (buildStatus === BuildStatus.BUILDING) {
return "building-status";
} else {
return "";
}
const renderIconStatus = () => {
return (
<div className="generic-node-status-position flex items-center justify-center">
{iconStatus}
</div>
);
};
const getNodeBorderClassName = (
selected: boolean,
showNode: boolean,
buildStatus: BuildStatus | undefined,
validationStatus: validationStatusType | null,
validationStatus: VertexBuildTypeAPI | null,
) => {
const specificClassFromBuildStatus = getSpecificClassFromBuildStatus(
buildStatus,
validationStatus,
isDark,
);
const baseBorderClass = getBaseBorderClass(selected);
@ -333,7 +129,7 @@ export default function GenericNode({
const names = classNames(
baseBorderClass,
nodeSizeClass,
"generic-node-div",
"generic-node-div group/node",
specificClassFromBuildStatus,
);
return names;
@ -350,6 +146,64 @@ export default function GenericNode({
const getNodeSizeClass = (showNode) =>
showNode ? "w-96 rounded-lg" : "w-26 h-26 rounded-full";
const nameEditable = true;
const isEmoji = emojiRegex().test(data?.node?.icon!);
if (!data.node!.template) {
setErrorData({
title: `Error in component ${data.node!.display_name}`,
list: [
`The component ${data.node!.display_name} has no template.`,
`Please contact the developer of the component to fix this issue.`,
],
});
takeSnapshot();
deleteNode(data.id);
}
useCheckCodeValidity(data, templates, setIsOutdated, types);
useValidationStatusString(validationStatus, setValidationString);
useUpdateValidationStatus(data?.id, flowPool, setValidationStatus);
const iconNodeRender = useIconNodeRender(
data,
types,
nodeColors,
name,
showNode,
isEmoji,
nodeIconFragment,
checkNodeIconFragment,
);
function countHandles(): void {
const count = countHandlesFn(data);
setHandles(count);
}
useEffect(() => {
countHandles();
}, [data, data.node]);
useEffect(() => {
if (!selected) {
setInputName(false);
setInputDescription(false);
}
}, [selected]);
useEffect(() => {
setNodeDescription(data.node!.description);
}, [data.node!.description]);
useEffect(() => {
setNodeName(data.node!.display_name);
}, [data.node!.display_name]);
useEffect(() => {
setShowNode(data.showNode ?? true);
}, [data.showNode]);
const memoizedNodeToolbarComponent = useMemo(() => {
return (
<NodeToolbar>
@ -593,67 +447,56 @@ export default function GenericNode({
)}
</div>
{showNode && (
<ShadTooltip
content={
buildStatus === BuildStatus.BUILDING ? (
<span> {STATUS_BUILDING} </span>
) : !validationStatus ? (
<span className="flex">{STATUS_BUILD}</span>
) : (
<div className="max-h-100 p-2">
<div>
{lastRunTime && (
<div className="justify-left flex font-normal text-muted-foreground">
<div>{RUN_TIMESTAMP_PREFIX}</div>
<div className="ml-1 text-status-blue">
{lastRunTime}
<>
<ShadTooltip
content={
buildStatus === BuildStatus.BUILDING ? (
<span> {STATUS_BUILDING} </span>
) : !validationStatus ? (
<span className="flex">{STATUS_BUILD}</span>
) : (
<div className="max-h-100 p-2">
<div>
{lastRunTime && (
<div className="justify-left flex font-normal text-muted-foreground">
<div>{RUN_TIMESTAMP_PREFIX}</div>
<div className="ml-1 text-status-blue">
{lastRunTime}
</div>
</div>
)}
</div>
<div className="justify-left flex font-normal text-muted-foreground">
<div>Duration:</div>
<div className="ml-1 text-status-blue">
{validationStatus?.data.duration}
</div>
)}
</div>
<div className="justify-left flex font-normal text-muted-foreground">
<div>Duration:</div>
<div className="mb-3 ml-1 text-status-blue">
{validationStatus?.data.duration}
</div>
</div>
<hr />
<span className="mb-2 mt-2 flex justify-center font-semibold text-muted-foreground">
Output
</span>
<div className="max-h-96 overflow-auto font-normal custom-scroll">
{validationString.split("\n").map((line, index) => (
<div className="font-normal" key={index}>
{line}
</div>
))}
</div>
</div>
)
}
side="bottom"
>
<Button
onClick={() => {
if (buildStatus === BuildStatus.BUILDING || isBuilding)
return;
setValidationStatus(null);
buildFlow({ stopNodeId: data.id });
}}
variant="secondary"
className={"group h-9 px-1.5"}
)
}
side="bottom"
>
<div
data-testid={
`button_run_` + data?.node?.display_name.toLowerCase()
}
<Button
onClick={() => {
if (buildStatus === BuildStatus.BUILDING || isBuilding)
return;
setValidationStatus(null);
buildFlow({ stopNodeId: data.id });
}}
variant="secondary"
className={"group h-9 px-1.5"}
>
<div className="generic-node-status-position flex items-center justify-center">
{renderIconStatus(buildStatus, validationStatus)}
<div
data-testid={
`button_run_` + data?.node?.display_name.toLowerCase()
}
>
{renderIconStatus()}
</div>
</div>
</Button>
</ShadTooltip>
</Button>
</ShadTooltip>
</>
)}
</div>
</div>

View file

@ -0,0 +1,26 @@
import { NodeDataType } from "../../types/flow";
export function countHandlesFn(data: NodeDataType): number {
let count = Object.keys(data.node!.template)
.filter((templateField) => templateField.charAt(0) !== "_")
.map((templateCamp) => {
const { template } = data.node!;
if (template[templateCamp].input_types) return true;
if (!template[templateCamp].show) return false;
switch (template[templateCamp].type) {
case "str":
case "bool":
case "float":
case "code":
case "prompt":
case "file":
case "int":
return false;
default:
return true;
}
})
.reduce((total, value) => total + (value ? 1 : 0), 0);
return count;
}

View file

@ -0,0 +1,25 @@
import { BuildStatus } from "../../constants/enums";
import { VertexBuildTypeAPI } from "../../types/api";
export const getSpecificClassFromBuildStatus = (
buildStatus: BuildStatus | undefined,
validationStatus: VertexBuildTypeAPI | null,
isDark: boolean,
) => {
let isInvalid = validationStatus && !validationStatus.valid;
if (buildStatus === BuildStatus.INACTIVE) {
// INACTIVE should have its own class
return "inactive-status";
}
if (
(buildStatus === BuildStatus.BUILT && isInvalid) ||
buildStatus === BuildStatus.ERROR
) {
return isDark ? "built-invalid-status-dark" : "built-invalid-status";
} else if (buildStatus === BuildStatus.BUILDING) {
return "building-status";
} else {
return "";
}
};

View file

@ -0,0 +1,39 @@
import { useEffect } from "react";
import { NATIVE_CATEGORIES } from "../../constants/constants";
import { NodeDataType } from "../../types/flow";
const useCheckCodeValidity = (
data: NodeDataType,
templates: { [key: string]: any },
setIsOutdated: (value: boolean) => void,
types,
) => {
useEffect(() => {
// This one should run only once
// first check if data.type in NATIVE_CATEGORIES
// if not return
if (
!NATIVE_CATEGORIES.includes(types[data.type]) ||
!data.node?.template?.code?.value
)
return;
const thisNodeTemplate = templates[data.type].template;
// if the template does not have a code key
// return
if (!thisNodeTemplate.code) return;
const currentCode = thisNodeTemplate.code?.value;
const thisNodesCode = data.node!.template?.code?.value;
const componentsToIgnore = ["Custom Component", "Prompt"];
if (
currentCode !== thisNodesCode &&
!componentsToIgnore.includes(data.node!.display_name)
) {
setIsOutdated(true);
} else {
setIsOutdated(false);
}
// template.code can be undefined
}, [data.node?.template?.code?.value, templates, setIsOutdated]);
};
export default useCheckCodeValidity;

View file

@ -0,0 +1,45 @@
import { useCallback } from "react";
import { NodeDataType } from "../../types/flow";
const useIconNodeRender = (
data: NodeDataType,
types: { [key: string]: string },
nodeColors: { [key: string]: string },
name: string,
showNode: boolean,
isEmoji: boolean,
nodeIconFragment: (iconElement: string) => JSX.Element,
checkNodeIconFragment: (
iconColor: string,
iconName: string,
iconClassName: string,
) => JSX.Element,
) => {
const iconNodeRender = useCallback(() => {
const iconElement = data?.node?.icon;
const iconColor = nodeColors[types[data.type]];
const iconName =
iconElement || (data.node?.flow ? "group_components" : name);
const iconClassName = `generic-node-icon ${
!showNode ? " absolute inset-x-6 h-12 w-12 " : ""
}`;
if (iconElement && isEmoji) {
return nodeIconFragment(iconElement);
} else {
return checkNodeIconFragment(iconColor, iconName, iconClassName);
}
}, [
data,
types,
nodeColors,
name,
showNode,
isEmoji,
nodeIconFragment,
checkNodeIconFragment,
]);
return iconNodeRender;
};
export default useIconNodeRender;

View file

@ -0,0 +1,54 @@
import IconComponent from "../../components/genericIconComponent";
import Checkmark from "../../components/ui/checkmark";
import Loading from "../../components/ui/loading";
import Xmark from "../../components/ui/xmark";
import { BuildStatus } from "../../constants/enums";
import { VertexBuildTypeAPI } from "../../types/api";
const useIconStatus = (
buildStatus: BuildStatus | undefined,
validationStatus: VertexBuildTypeAPI | null,
) => {
const renderIconStatus = () => {
if (buildStatus === BuildStatus.BUILDING) {
return <Loading className="text-medium-indigo" />;
} else {
return (
<>
<IconComponent
name="Play"
className="absolute ml-0.5 h-5 fill-current stroke-2 text-medium-indigo opacity-0 transition-all group-hover:opacity-100"
/>
{validationStatus && validationStatus.valid ? (
<Checkmark
className="absolute ml-0.5 h-5 stroke-2 text-status-green opacity-100 transition-all group-hover:opacity-0"
isVisible={true}
/>
) : validationStatus &&
!validationStatus.valid &&
buildStatus === BuildStatus.INACTIVE ? (
<IconComponent
name="Play"
className="absolute ml-0.5 h-5 fill-current stroke-2 text-status-green opacity-30 transition-all group-hover:opacity-0"
/>
) : buildStatus === BuildStatus.ERROR ||
(validationStatus && !validationStatus.valid) ? (
<Xmark
isVisible={true}
className="absolute ml-0.5 h-5 fill-current stroke-2 text-status-red opacity-100 transition-all group-hover:opacity-0"
/>
) : (
<IconComponent
name="Play"
className="absolute ml-0.5 h-5 fill-current stroke-2 text-muted-foreground opacity-100 transition-all group-hover:opacity-0"
/>
)}
</>
);
}
};
return renderIconStatus();
};
export default useIconStatus;

Some files were not shown because too many files have changed in this diff Show more