diff --git a/.gitattributes b/.gitattributes index 4b878819c..d6e351bc3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -11,12 +11,12 @@ *.ts text *.tsx text *.md text -*.mdx text +*.mdx text working-tree-encoding = UTF-8 *.yml text *.yaml text *.xml text *.csv text -*.json text +*.json text working-tree-encoding = UTF-8 *.sh text *.Dockerfile text Dockerfile text @@ -32,3 +32,4 @@ Dockerfile text *.mp4 binary *.svg binary *.csv binary + diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 180af936e..b5424b367 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -80,7 +80,10 @@ jobs: langflowai/langflow-frontend:1.0-alpha restart-space: + name: Restart HuggingFace Spaces + if: ${{ inputs.release_type == 'main' }} runs-on: ubuntu-latest + needs: docker_build strategy: matrix: python-version: @@ -100,6 +103,4 @@ jobs: - name: Restart HuggingFace Spaces Build run: | - poetry run python ./scripts/factory_restart_space.py - env: - HUGGINGFACE_API_TOKEN: ${{ secrets.HUGGINGFACE_API_TOKEN }} + poetry run python ./scripts/factory_restart_space.py --space "Langflow/Langflow-Preview" --token ${{ secrets.HUGGINGFACE_API_TOKEN }} diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 8cb0bc90e..286a7a921 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -35,6 +35,10 @@ jobs: with: python-version: "3.10" cache: "poetry" + - name: Set up Nodejs 20 + uses: actions/setup-node@v4 + with: + node-version: "20" - name: Check Version id: check-version run: | diff --git a/.vscode/launch.json b/.vscode/launch.json index 40a60f354..82e39fcc9 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -3,7 +3,7 @@ "configurations": [ { "name": "Debug Backend", - "type": "python", + "type": "debugpy", "request": "launch", "module": "uvicorn", "args": [ @@ -26,7 +26,7 @@ }, { "name": "Debug CLI", - "type": "python", + "type": "debugpy", "request": "launch", "module": "langflow", "args": [ @@ -43,7 +43,7 @@ }, { "name": "Python: Remote Attach", - "type": "python", + "type": "debugpy", "request": "attach", "justMyCode": true, "connect": { @@ -65,7 +65,7 @@ }, { "name": "Python: Debug Tests", - "type": "python", + "type": "debugpy", "request": "launch", "program": "${file}", "purpose": ["debug-test"], diff --git a/Makefile b/Makefile index 878eeca80..abf3e67ec 100644 --- a/Makefile +++ b/Makefile @@ -168,6 +168,7 @@ build_and_install: build_frontend: cd src/frontend && CI='' npm run build + rm -rf src/backend/base/langflow/frontend cp -r src/frontend/build src/backend/base/langflow/frontend build: diff --git a/README.PT.md b/README.PT.md new file mode 100644 index 000000000..8d3197dd7 --- /dev/null +++ b/README.PT.md @@ -0,0 +1,171 @@ + + +# [![Langflow](./docs/static/img/hero.png)](https://www.langflow.org) + +

+ Um framework visual para criar apps de agentes autônomos e RAG +

+

+ Open-source, construído em Python, totalmente personalizável, agnóstico em relação a modelos e databases +

+ +

+ Docs - + Junte-se ao nosso Discord - + Siga-nos no X - + Demonstração +

+ +

+ + + + + + +

+ +
+ README em Inglês + README em Chinês Simplificado +
+ +

+ Seu GIF +

+ +# 📝 Conteúdo + +- [📝 Conteúdo](#-conteúdo) +- [📦 Introdução](#-introdução) +- [🎨 Criar Fluxos](#-criar-fluxos) +- [Deploy](#deploy) + - [Deploy usando Google Cloud Platform](#deploy-usando-google-cloud-platform) + - [Deploy on Railway](#deploy-on-railway) + - [Deploy on Render](#deploy-on-render) +- [🖥️ Interface de Linha de Comando (CLI)](#️-interface-de-linha-de-comando-cli) + - [Uso](#uso) + - [Variáveis de Ambiente](#variáveis-de-ambiente) +- [👋 Contribuir](#-contribuir) +- [🌟 Contribuidores](#-contribuidores) +- [📄 Licença](#-licença) + +# 📦 Introdução + +Você pode instalar o Langflow com pip: + +```shell +# Certifique-se de ter >=Python 3.10 instalado no seu sistema. +# Instale a versão pré-lançamento (recomendada para as atualizações mais recentes) +python -m pip install langflow --pre --force-reinstall + +# ou versão estável +python -m pip install langflow -U +``` + +Então, execute o Langflow com: + +```shell +python -m langflow run +``` + +Você também pode visualizar o Langflow no [HuggingFace Spaces](https://huggingface.co/spaces/Langflow/Langflow-Preview). [Clone o Space usando este link](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true) para criar seu próprio workspace do Langflow em minutos. + +# 🎨 Criar Fluxos + +Criar fluxos com Langflow é fácil. Basta arrastar componentes da barra lateral para o canvas e conectá-los para começar a construir sua aplicação. + +Explore editando os parâmetros do prompt, agrupando componentes e construindo seus próprios componentes personalizados (Custom Components). + +Quando terminar, você pode exportar seu fluxo como um arquivo JSON. + +Carregue o fluxo com: + +```python +from langflow.load import run_flow_from_json + +results = run_flow_from_json("path/to/flow.json", input_value="Hello, World!") +``` + +# Deploy + +## Deploy usando Google Cloud Platform + +Siga nosso passo a passo para fazer deploy do Langflow no Google Cloud Platform (GCP) usando o Google Cloud Shell. O guia está disponível no documento [**Langflow on Google Cloud Platform**](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/deployment/gcp-deployment.md). + +Alternativamente, clique no botão **"Open in Cloud Shell"** abaixo para iniciar o Google Cloud Shell, clonar o repositório do Langflow e começar um **tutorial interativo** que o guiará pelo processo de configuração dos recursos necessários e deploy do Langflow no seu projeto GCP. + +[![Open on Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md) + +## Deploy on Railway + +Use este template para implantar o Langflow 1.0 Preview no Railway: + +[![Deploy 1.0 Preview on Railway](https://railway.app/button.svg)](https://railway.app/template/UsJ1uB?referralCode=MnPSdg) + +Ou este para implantar o Langflow 0.6.x: + +[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg) + +## Deploy on Render + + +Deploy to Render + + +# 🖥️ Interface de Linha de Comando (CLI) + +O Langflow fornece uma interface de linha de comando (CLI) para fácil gerenciamento e configuração. + +## Uso + +Você pode executar o Langflow usando o seguinte comando: + +```shell +langflow run [OPTIONS] +``` + +Cada opção é detalhada abaixo: + +- `--help`: Exibe todas as opções disponíveis. +- `--host`: Define o host para vincular o servidor. Pode ser configurado usando a variável de ambiente `LANGFLOW_HOST`. O padrão é `127.0.0.1`. +- `--workers`: Define o número de processos. Pode ser configurado usando a variável de ambiente `LANGFLOW_WORKERS`. O padrão é `1`. +- `--timeout`: Define o tempo limite do worker em segundos. O padrão é `60`. +- `--port`: Define a porta para escutar. Pode ser configurado usando a variável de ambiente `LANGFLOW_PORT`. O padrão é `7860`. +- `--env-file`: Especifica o caminho para o arquivo .env contendo variáveis de ambiente. O padrão é `.env`. +- `--log-level`: Define o nível de log. Pode ser configurado usando a variável de ambiente `LANGFLOW_LOG_LEVEL`. O padrão é `critical`. +- `--components-path`: Especifica o caminho para o diretório contendo componentes personalizados. Pode ser configurado usando a variável de ambiente `LANGFLOW_COMPONENTS_PATH`. O padrão é `langflow/components`. +- `--log-file`: Especifica o caminho para o arquivo de log. Pode ser configurado usando a variável de ambiente `LANGFLOW_LOG_FILE`. O padrão é `logs/langflow.log`. +- `--cache`: Seleciona o tipo de cache a ser usado. As opções são `InMemoryCache` e `SQLiteCache`. Pode ser configurado usando a variável de ambiente `LANGFLOW_LANGCHAIN_CACHE`. O padrão é `SQLiteCache`. +- `--dev/--no-dev`: Alterna o modo de desenvolvimento. O padrão é `no-dev`. +- `--path`: Especifica o caminho para o diretório frontend contendo os arquivos de build. Esta opção é apenas para fins de desenvolvimento. Pode ser configurado usando a variável de ambiente `LANGFLOW_FRONTEND_PATH`. +- `--open-browser/--no-open-browser`: Alterna a opção de abrir o navegador após iniciar o servidor. Pode ser configurado usando a variável de ambiente `LANGFLOW_OPEN_BROWSER`. O padrão é `open-browser`. +- `--remove-api-keys/--no-remove-api-keys`: Alterna a opção de remover as chaves de API dos projetos salvos no banco de dados. Pode ser configurado usando a variável de ambiente `LANGFLOW_REMOVE_API_KEYS`. O padrão é `no-remove-api-keys`. +- `--install-completion [bash|zsh|fish|powershell|pwsh]`: Instala a conclusão para o shell especificado. +- `--show-completion [bash|zsh|fish|powershell|pwsh]`: Exibe a conclusão para o shell especificado, permitindo que você copie ou personalize a instalação. +- `--backend-only`: Este parâmetro, com valor padrão `False`, permite executar apenas o servidor backend sem o frontend. Também pode ser configurado usando a variável de ambiente `LANGFLOW_BACKEND_ONLY`. +- `--store`: Este parâmetro, com valor padrão `True`, ativa os recursos da loja, use `--no-store` para desativá-los. Pode ser configurado usando a variável de ambiente `LANGFLOW_STORE`. + +Esses parâmetros são importantes para usuários que precisam personalizar o comportamento do Langflow, especialmente em cenários de desenvolvimento ou deploy especializado. + +### Variáveis de Ambiente + +Você pode configurar muitas das opções de CLI usando variáveis de ambiente. Estas podem ser exportadas no seu sistema operacional ou adicionadas a um arquivo `.env` e carregadas usando a opção `--env-file`. + +Um arquivo de exemplo `.env` chamado `.env.example` está incluído no projeto. Copie este arquivo para um novo arquivo chamado `.env` e substitua os valores de exemplo pelas suas configurações reais. Se você estiver definindo valores tanto no seu sistema operacional quanto no arquivo `.env`, as configurações do `.env` terão precedência. + +# 👋 Contribuir + +Aceitamos contribuições de desenvolvedores de todos os níveis para nosso projeto open-source no GitHub. Se você deseja contribuir, por favor, confira nossas [diretrizes de contribuição](./CONTRIBUTING.md) e ajude a tornar o Langflow mais acessível. + +--- + +[![Star History Chart](https://api.star-history.com/svg?repos=langflow-ai/langflow&type=Timeline)](https://star-history.com/#langflow-ai/langflow&Date) + +# 🌟 Contribuidores + +[![langflow contributors](https://contrib.rocks/image?repo=langflow-ai/langflow)](https://github.com/langflow-ai/langflow/graphs/contributors) + +# 📄 Licença + +O Langflow é lançado sob a licença MIT. Veja o arquivo [LICENSE](LICENSE) para detalhes. diff --git a/README.md b/README.md index 649186a00..68c8fde29 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,7 @@
README in English + README in Portuguese README in Simplified Chinese
@@ -36,7 +37,6 @@ # 📝 Content -- [](#) - [📝 Content](#-content) - [📦 Get Started](#-get-started) - [🎨 Create Flows](#-create-flows) diff --git a/README.zh_CN.md b/README.zh_CN.md index 4259286c8..fee764902 100644 --- a/README.zh_CN.md +++ b/README.zh_CN.md @@ -86,11 +86,12 @@ from langflow.load import run_flow_from_json results = run_flow_from_json("path/to/flow.json", input_value="Hello, World!") ``` + # 部署 ## 在Google Cloud Platform上部署Langflow -请按照我们的分步指南使用 Google Cloud Shell 在 Google Cloud Platform (GCP) 上部署 Langflow。该指南在 [**Langflow in Google Cloud Platform**](GCP_DEPLOYMENT.md) 文档中提供。 +请按照我们的分步指南使用 Google Cloud Shell 在 Google Cloud Platform (GCP) 上部署 Langflow。该指南在 [**Langflow in Google Cloud Platform**](GCP_DEPLOYMENT.md) 文档中提供。 或者,点击下面的 "Open in Cloud Shell" 按钮,启动 Google Cloud Shell,克隆 Langflow 仓库,并开始一个互动教程,该教程将指导您设置必要的资源并在 GCP 项目中部署 Langflow。 @@ -168,4 +169,4 @@ langflow run [OPTIONS] # 📄 许可证 -Langflow 以 MIT 许可证发布。有关详细信息,请参阅 [LICENSE](LICENSE) 文件。 +Langflow 以 MIT 许可证发布。有关详细信息,请参阅 [LICENSE](LICENSE) 文件。 diff --git a/docker/build_and_push.Dockerfile b/docker/build_and_push.Dockerfile index cabc1a753..63521d06a 100644 --- a/docker/build_and_push.Dockerfile +++ b/docker/build_and_push.Dockerfile @@ -1,6 +1,7 @@ # syntax=docker/dockerfile:1 # Keep this syntax directive! It's used to enable Docker BuildKit + ################################ # BUILDER-BASE # Used to build deps + create our virtual environment @@ -47,12 +48,10 @@ WORKDIR /app COPY pyproject.toml poetry.lock README.md ./ COPY src/ ./src COPY scripts/ ./scripts - RUN python -m pip install requests --user && cd ./scripts && python update_dependencies.py RUN $POETRY_HOME/bin/poetry lock --no-update \ - && $POETRY_HOME/bin/poetry install --no-interaction --no-ansi -E deploy \ && $POETRY_HOME/bin/poetry build -f wheel \ - && $POETRY_HOME/bin/poetry run pip install dist/*.whl + && $POETRY_HOME/bin/poetry run pip install dist/*.whl --force-reinstall ################################ # RUNTIME diff --git a/docs/docs/administration/api.mdx b/docs/docs/administration/api.mdx index 103c43f81..115cdc666 100644 --- a/docs/docs/administration/api.mdx +++ b/docs/docs/administration/api.mdx @@ -10,8 +10,7 @@ Langflow provides an API key functionality that allows users to access their ind The default user and password are set using the LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD environment variables. -The default values are -langflow and langflow, respectively. +The default values are `langflow` and `langflow`, respectively. diff --git a/docs/docs/administration/cli.mdx b/docs/docs/administration/cli.mdx index a2a41adcd..41bc76de3 100644 --- a/docs/docs/administration/cli.mdx +++ b/docs/docs/administration/cli.mdx @@ -1,62 +1,51 @@ # Command Line Interface (CLI) -## Overview - Langflow's Command Line Interface (CLI) is a powerful tool that allows you to interact with the Langflow server from the command line. The CLI provides a wide range of commands to help you shape Langflow to your needs. -Running the CLI without any arguments will display a list of available commands and options. +The available commands are below. Navigate to their individual sections of this page to see the parameters. + +- [langflow](#overview) +- [langflow api-key](#langflow-api-key) +- [langflow copy-db](#langflow-copy-db) +- [langflow migration](#langflow-migration) +- [langflow run](#langflow-run) +- [langflow superuser](#langflow-superuser) + +## Overview + +Running the CLI without any arguments displays a list of available options and commands. ```bash -python -m langflow run --help +langflow # or -python -m langflow run +langflow --help +# or +python -m langflow ``` -Each option for `run` command are detailed below: +| Command | Description | +| ----------- | ---------------------------------------------------------------------- | +| `api-key` | Creates an API key for the default superuser if AUTO_LOGIN is enabled. | +| `copy-db` | Copy the database files to the current directory (`which langflow`). | +| `migration` | Run or test migrations. | +| `run` | Run the Langflow. | +| `superuser` | Create a superuser. | -- `--help`: Displays all available options. -- `--host`: Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`. -- `--workers`: Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`. -- `--timeout`: Sets the worker timeout in seconds. The default is `60`. -- `--port`: Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`. -- `--env-file`: Specifies the path to the .env file containing environment variables. The default is `.env`. -- `--log-level`: Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`. -- `--components-path`: Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`. -- `--log-file`: Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`. -- `--cache`: Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`. -- `--dev/--no-dev`: Toggles the development mode. The default is `no-dev`. -- `--path`: Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable. -- `--open-browser/--no-open-browser`: Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`. -- `--remove-api-keys/--no-remove-api-keys`: Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`. -- `--install-completion [bash|zsh|fish|powershell|pwsh]`: Installs completion for the specified shell. -- `--show-completion [bash|zsh|fish|powershell|pwsh]`: Shows completion for the specified shell, allowing you to copy it or customize the installation. -- `--backend-only`: This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable. -- `--store`: This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable. +### Options -These parameters are important for users who need to customize the behavior of Langflow, especially in development or specialized deployment scenarios. +| Option | Description | +| ---------------------- | -------------------------------------------------------------------------------- | +| `--install-completion` | Install completion for the current shell. | +| `--show-completion` | Show completion for the current shell, to copy it or customize the installation. | +| `--help` | Show this message and exit. | -### API Key Command +## langflow api-key -The `api-key` command allows you to create an API key for accessing Langflow's API when `LANGFLOW_AUTO_LOGIN` is set to `True`. - -```bash -python -m langflow api-key --help - - Usage: langflow api-key [OPTIONS] - - Creates an API key for the default superuser if AUTO_LOGIN is enabled. - Args: log_level (str, optional): Logging level. Defaults to "error". - Returns: None - -╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ -│ --log-level TEXT Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] │ -│ --help Show this message and exit. │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ -``` - -Once you run the `api-key` command, it will create an API key for the default superuser if `LANGFLOW_AUTO_LOGIN` is set to `True`. +Run the `api-key` command to create an API key for the default superuser if `LANGFLOW_AUTO_LOGIN` is set to `True`. ```bash +langflow api-key +# or python -m langflow api-key ╭─────────────────────────────────────────────────────────────────────╮ │ API Key Created Successfully: │ @@ -67,11 +56,98 @@ python -m langflow api-key │ Make sure to store it in a secure location. │ │ │ │ The API key has been copied to your clipboard. Cmd + V to paste it. │ -╰─────────────────────────────────────────────────────────────────────╯ +╰────────────────────────────── ``` -### Environment Variables +### Options + +| Option | Type | Description | +| ----------- | ---- | ------------------------------------------------------------- | +| --log-level | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] | +| --help | | Show this message and exit. | + +## langflow copy-db + +Run the `copy-db` command to copy the cached `langflow.db` and `langflow-pre.db` database files to the current directory. + +If the files exist in the cache directory, they will be copied to the same directory as `__main__.py`, which can be found with `which langflow`. + +### Options + +None. + +## langflow migration + +Run or test migrations with the [Alembic](https://pypi.org/project/alembic/) database tool. + +```bash +langflow migration +# or +python -m langflow migration +``` + +### Options + +| Option | Description | +| ------------------- | -------------------------------------------------------------------------------------------------------------------------- | +| `--test, --no-test` | Run migrations in test mode. [default: test] | +| `--fix, --no-fix` | Fix migrations. This is a destructive operation, and should only be used if you know what you are doing. [default: no-fix] | +| `--help` | Show this message and exit. | + +## langflow run + +Run Langflow. + +```bash +langflow run +# or +python -m langflow run +``` + +### Options + +| Option | Description | +| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--help` | Displays all available options. | +| `--host` | Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`. | +| `--workers` | Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`. | +| `--timeout` | Sets the worker timeout in seconds. The default is `60`. | +| `--port` | Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`. | +| `--env-file` | Specifies the path to the .env file containing environment variables. The default is `.env`. | +| `--log-level` | Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`. | +| `--components-path` | Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`. | +| `--log-file` | Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`. | +| `--cache` | Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`. | +| `--dev`/`--no-dev` | Toggles the development mode. The default is `no-dev`. | +| `--path` | Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable. | +| `--open-browser`/`--no-open-browser` | Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`. | +| `--remove-api-keys`/`--no-remove-api-keys` | Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`. | +| `--install-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Installs completion for the specified shell. | +| `--show-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Shows completion for the specified shell, allowing you to copy it or customize the installation. | +| `--backend-only` | This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable. For more, see [Backend-only](../deployment/backend-only.md). | +| `--store` | This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable. | + +#### Environment Variables You can configure many of the CLI options using environment variables. These can be exported in your operating system or added to a `.env` file and loaded using the `--env-file` option. A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence. + +## langflow superuser + +Create a superuser for Langflow. + +```bash +langflow superuser +# or +python -m langflow superuser +``` + +### Options + +| Option | Type | Description | +| ------------- | ---- | ------------------------------------------------------------- | +| `--username` | TEXT | Username for the superuser. [default: None] [required] | +| `--password` | TEXT | Password for the superuser. [default: None] [required] | +| `--log-level` | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] | +| `--help` | | Show this message and exit. | diff --git a/docs/docs/administration/global-env.mdx b/docs/docs/administration/global-env.mdx index ae5ff0fb4..51e5d633e 100644 --- a/docs/docs/administration/global-env.mdx +++ b/docs/docs/administration/global-env.mdx @@ -17,7 +17,6 @@ Global Variables are a useful feature of Langflow, allowing you to define reusab - All Credential Global Variables are encrypted and accessible only by you. - Set _`LANGFLOW_STORE_ENVIRONMENT_VARIABLES`_ to _`true`_ in your `.env` file to add all variables in _`LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT`_ to your user's Global Variables. - ## Creating and Adding a Global Variable To create and add a global variable, click the 🌐 button in a Text field, and then click **+ Add New Variable**. @@ -25,18 +24,20 @@ To create and add a global variable, click the 🌐 button in a Text field, and Text fields are where you write text without opening a Text area, and are identified with the 🌐 icon. For example, to create an environment variable for the **OpenAI** component: - 1. In the **OpenAI API Key** text field, click the 🌐 button, then **Add New Variable**. - 2. Enter `openai_api_key` in the **Variable Name** field. - 3. Paste your OpenAI API Key (`sk-...`) in the **Value** field. - 4. Select **Credential** for the **Type**. - 5. Choose **OpenAI API Key** in the **Apply to Fields** field to apply this variable to all fields named **OpenAI API Key**. - 6. Click **Save Variable**. + +1. In the **OpenAI API Key** text field, click the 🌐 button, then **Add New Variable**. +2. Enter `openai_api_key` in the **Variable Name** field. +3. Paste your OpenAI API Key (`sk-...`) in the **Value** field. +4. Select **Credential** for the **Type**. +5. Choose **OpenAI API Key** in the **Apply to Fields** field to apply this variable to all fields named **OpenAI API Key**. +6. Click **Save Variable**. You now have a `openai_api_key` global environment variable for your Langflow project. Subsequently, clicking the 🌐 button in a Text field will display the new variable in the dropdown. - You can also create global variables in **Settings** > **Variables and Secrets**. + You can also create global variables in **Settings** > **Variables and + Secrets**. - To prevent this behavior, set `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to `false` in your `.env` file. + To prevent this behavior, set `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to + `false` in your `.env` file. You can specify variables to get from the environment by listing them in `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT`. diff --git a/docs/docs/components/agents.mdx b/docs/docs/components/agents.mdx index f8917e4e2..cdc49a76d 100644 --- a/docs/docs/components/agents.mdx +++ b/docs/docs/components/agents.mdx @@ -1,11 +1,13 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; # Agents -

- We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝 -

+

+ We appreciate your understanding as we polish our documentation – it may + contain some rough edges. Share your feedback or report issues to help us + improve! 🛠️📝 +

Agents are components that use reasoning to make decisions and take actions, designed to autonomously perform tasks or provide services with some degree of agency. LLM chains can only perform hardcoded sequences of actions, while agents use LLMs to reason through which actions to take, and in which order. @@ -87,4 +89,4 @@ The `ZeroShotAgent` uses the ReAct framework to decide which tool to use based o **Parameters**: - **Allowed Tools:** The tools accessible to the agent. -- **LLM Chain:** The LLM Chain used by the agent. \ No newline at end of file +- **LLM Chain:** The LLM Chain used by the agent. diff --git a/docs/docs/components/chains.mdx b/docs/docs/components/chains.mdx index fd3b5bd5d..91477644d 100644 --- a/docs/docs/components/chains.mdx +++ b/docs/docs/components/chains.mdx @@ -6,11 +6,11 @@ import Admonition from "@theme/Admonition"; # Chains -

- Thank you for your patience while we enhance our documentation. It may - have some imperfections. Share your feedback or report issues to help us - improve! 🛠️📝 -

+

+ Thank you for your patience while we enhance our documentation. It may have + some imperfections. Share your feedback or report issues to help us improve! + 🛠️📝 +

Chains, in the context of language models, refer to a series of calls made to a language model. This approach allows for using the output of one call as the input for another. Different chain types facilitate varying complexity levels, making them useful for creating pipelines and executing specific scenarios. diff --git a/docs/docs/components/data.mdx b/docs/docs/components/data.mdx index ca81bd225..d7f525d7d 100644 --- a/docs/docs/components/data.mdx +++ b/docs/docs/components/data.mdx @@ -1,4 +1,4 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; # Data diff --git a/docs/docs/components/embeddings.mdx b/docs/docs/components/embeddings.mdx index 4978ff354..200e0ccf3 100644 --- a/docs/docs/components/embeddings.mdx +++ b/docs/docs/components/embeddings.mdx @@ -4,113 +4,113 @@ Used to load embedding models from [Amazon Bedrock](https://aws.amazon.com/bedrock/). -| **Parameter** | **Type** | **Description** | **Default** | -|-----------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------|-------------| -| `credentials_profile_name` | `str` | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information. | | -| `model_id` | `str` | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API. | | -| `endpoint_url` | `str` | URL to set a specific service endpoint other than the default AWS endpoint. | | -| `region_name` | `str` | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided. | | +| **Parameter** | **Type** | **Description** | **Default** | +| -------------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| `credentials_profile_name` | `str` | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information. | | +| `model_id` | `str` | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API. | | +| `endpoint_url` | `str` | URL to set a specific service endpoint other than the default AWS endpoint. | | +| `region_name` | `str` | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided. | | ## Cohere Embeddings Used to load embedding models from [Cohere](https://cohere.com/). -| **Parameter** | **Type** | **Description** | **Default** | -|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------| -| `cohere_api_key` | `str` | API key required to authenticate with the Cohere service. | | -| `model` | `str` | Language model used for embedding text documents and performing queries. | `embed-english-v2.0` | -| `truncate` | `bool` | Whether to truncate the input text to fit within the model's constraints. | `False` | +| **Parameter** | **Type** | **Description** | **Default** | +| ---------------- | -------- | ------------------------------------------------------------------------- | -------------------- | +| `cohere_api_key` | `str` | API key required to authenticate with the Cohere service. | | +| `model` | `str` | Language model used for embedding text documents and performing queries. | `embed-english-v2.0` | +| `truncate` | `bool` | Whether to truncate the input text to fit within the model's constraints. | `False` | ## Azure OpenAI Embeddings Generate embeddings using Azure OpenAI models. -| **Parameter** | **Type** | **Description** | **Default** | -|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------| -| `Azure Endpoint` | `str` | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | | -| `Deployment Name` | `str` | The name of the deployment. | | -| `API Version` | `str` | The API version to use, options include various dates. | | -| `API Key` | `str` | The API key to access the Azure OpenAI service. | | +| **Parameter** | **Type** | **Description** | **Default** | +| ----------------- | -------- | -------------------------------------------------------------------------------------------------- | ----------- | +| `Azure Endpoint` | `str` | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | | +| `Deployment Name` | `str` | The name of the deployment. | | +| `API Version` | `str` | The API version to use, options include various dates. | | +| `API Key` | `str` | The API key to access the Azure OpenAI service. | | ## Hugging Face API Embeddings Generate embeddings using Hugging Face Inference API models. -| **Parameter** | **Type** | **Description** | **Default** | -|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------| -| `API Key` | `str` | API key for accessing the Hugging Face Inference API. | | -| `API URL` | `str` | URL of the Hugging Face Inference API. | `http://localhost:8080` | -| `Model Name` | `str` | Name of the model to use for embeddings. | `BAAI/bge-large-en-v1.5` | -| `Cache Folder` | `str` | Folder path to cache Hugging Face models. | | -| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | | -| `Model Kwargs` | `dict` | Additional arguments for the model. | | -| `Multi Process` | `bool` | Whether to use multiple processes. | `False` | +| **Parameter** | **Type** | **Description** | **Default** | +| --------------- | -------- | ----------------------------------------------------- | ------------------------ | +| `API Key` | `str` | API key for accessing the Hugging Face Inference API. | | +| `API URL` | `str` | URL of the Hugging Face Inference API. | `http://localhost:8080` | +| `Model Name` | `str` | Name of the model to use for embeddings. | `BAAI/bge-large-en-v1.5` | +| `Cache Folder` | `str` | Folder path to cache Hugging Face models. | | +| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | | +| `Model Kwargs` | `dict` | Additional arguments for the model. | | +| `Multi Process` | `bool` | Whether to use multiple processes. | `False` | ## Hugging Face Embeddings Used to load embedding models from [HuggingFace](https://huggingface.co). -| **Parameter** | **Type** | **Description** | **Default** | -|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------| -| `Cache Folder` | `str` | Folder path to cache HuggingFace models. | | -| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | | -| `Model Kwargs` | `dict` | Additional arguments for the model. | | -| `Model Name` | `str` | Name of the HuggingFace model to use. | `sentence-transformers/all-mpnet-base-v2` | -| `Multi Process` | `bool` | Whether to use multiple processes. | `False` | +| **Parameter** | **Type** | **Description** | **Default** | +| --------------- | -------- | ---------------------------------------------- | ----------------------------------------- | +| `Cache Folder` | `str` | Folder path to cache HuggingFace models. | | +| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | | +| `Model Kwargs` | `dict` | Additional arguments for the model. | | +| `Model Name` | `str` | Name of the HuggingFace model to use. | `sentence-transformers/all-mpnet-base-v2` | +| `Multi Process` | `bool` | Whether to use multiple processes. | `False` | ## OpenAI Embeddings Used to load embedding models from [OpenAI](https://openai.com/). -| **Parameter** | **Type** | **Description** | **Default** | -|-----------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------| -| `OpenAI API Key` | `str` | The API key to use for accessing the OpenAI API. | | -| `Default Headers` | `Dict[str, str]` | Default headers for the HTTP requests. | | -| `Default Query` | `NestedDict` | Default query parameters for the HTTP requests. | | -| `Allowed Special` | `List[str]` | Special tokens allowed for processing. | `[]` | -| `Disallowed Special` | `List[str]` | Special tokens disallowed for processing. | `["all"]` | -| `Chunk Size` | `int` | Chunk size for processing. | `1000` | -| `Client` | `Any` | HTTP client for making requests. | | -| `Deployment` | `str` | Deployment name for the model. | `text-embedding-3-small` | -| `Embedding Context Length` | `int` | Length of embedding context. | `8191` | -| `Max Retries` | `int` | Maximum number of retries for failed requests. | `6` | -| `Model` | `str` | Name of the model to use. | `text-embedding-3-small` | -| `Model Kwargs` | `NestedDict` | Additional keyword arguments for the model. | | -| `OpenAI API Base` | `str` | Base URL of the OpenAI API. | | -| `OpenAI API Type` | `str` | Type of the OpenAI API. | | -| `OpenAI API Version` | `str` | Version of the OpenAI API. | | -| `OpenAI Organization` | `str` | Organization associated with the API key. | | -| `OpenAI Proxy` | `str` | Proxy server for the requests. | | -| `Request Timeout` | `float` | Timeout for the HTTP requests. | | -| `Show Progress Bar` | `bool` | Whether to show a progress bar for processing. | `False` | -| `Skip Empty` | `bool` | Whether to skip empty inputs. | `False` | -| `TikToken Enable` | `bool` | Whether to enable TikToken. | `True` | -| `TikToken Model Name` | `str` | Name of the TikToken model. | | +| **Parameter** | **Type** | **Description** | **Default** | +| -------------------------- | ---------------- | ------------------------------------------------ | ------------------------ | +| `OpenAI API Key` | `str` | The API key to use for accessing the OpenAI API. | | +| `Default Headers` | `Dict[str, str]` | Default headers for the HTTP requests. | | +| `Default Query` | `NestedDict` | Default query parameters for the HTTP requests. | | +| `Allowed Special` | `List[str]` | Special tokens allowed for processing. | `[]` | +| `Disallowed Special` | `List[str]` | Special tokens disallowed for processing. | `["all"]` | +| `Chunk Size` | `int` | Chunk size for processing. | `1000` | +| `Client` | `Any` | HTTP client for making requests. | | +| `Deployment` | `str` | Deployment name for the model. | `text-embedding-3-small` | +| `Embedding Context Length` | `int` | Length of embedding context. | `8191` | +| `Max Retries` | `int` | Maximum number of retries for failed requests. | `6` | +| `Model` | `str` | Name of the model to use. | `text-embedding-3-small` | +| `Model Kwargs` | `NestedDict` | Additional keyword arguments for the model. | | +| `OpenAI API Base` | `str` | Base URL of the OpenAI API. | | +| `OpenAI API Type` | `str` | Type of the OpenAI API. | | +| `OpenAI API Version` | `str` | Version of the OpenAI API. | | +| `OpenAI Organization` | `str` | Organization associated with the API key. | | +| `OpenAI Proxy` | `str` | Proxy server for the requests. | | +| `Request Timeout` | `float` | Timeout for the HTTP requests. | | +| `Show Progress Bar` | `bool` | Whether to show a progress bar for processing. | `False` | +| `Skip Empty` | `bool` | Whether to skip empty inputs. | `False` | +| `TikToken Enable` | `bool` | Whether to enable TikToken. | `True` | +| `TikToken Model Name` | `str` | Name of the TikToken model. | | ## Ollama Embeddings Generate embeddings using Ollama models. -| **Parameter** | **Type** | **Description** | **Default** | -|---------------------|-------------------|--------------------------------------------------------------------------------------------------------------------|---------------------------| -| `Ollama Model` | `str` | Name of the Ollama model to use. | `llama2` | -| `Ollama Base URL` | `str` | Base URL of the Ollama API. | `http://localhost:11434` | -| `Model Temperature` | `float` | Temperature parameter for the model. Adjusts the randomness in the generated embeddings. | | +| **Parameter** | **Type** | **Description** | **Default** | +| ------------------- | -------- | ---------------------------------------------------------------------------------------- | ------------------------ | +| `Ollama Model` | `str` | Name of the Ollama model to use. | `llama2` | +| `Ollama Base URL` | `str` | Base URL of the Ollama API. | `http://localhost:11434` | +| `Model Temperature` | `float` | Temperature parameter for the model. Adjusts the randomness in the generated embeddings. | | ## VertexAI Embeddings Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings). -| **Parameter** | **Type** | **Description** | **Default** | -|-----------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------| -| `credentials` | `Credentials` | The default custom credentials to use. | | -| `location` | `str` | The default location to use when making API calls. | `us-central1`| -| `max_output_tokens` | `int` | Token limit determines the maximum amount of text output from one prompt. | `128` | -| `model_name` | `str` | The name of the Vertex AI large language model. | `text-bison`| -| `project` | `str` | The default GCP project to use when making Vertex API calls. | | -| `request_parallelism` | `int` | The amount of parallelism allowed for requests issued to VertexAI models. | `5` | -| `temperature` | `float` | Tunes the degree of randomness in text generations. Should be a non-negative value. | `0` | -| `top_k` | `int` | How the model selects tokens for output, the next token is selected from the top `k` tokens. | `40` | -| `top_p` | `float` | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value. | `0.95` | -| `tuned_model_name` | `str` | The name of a tuned model. If provided, `model_name` is ignored. | | -| `verbose` | `bool` | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug. | `False` | +| **Parameter** | **Type** | **Description** | **Default** | +| --------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------------- | +| `credentials` | `Credentials` | The default custom credentials to use. | | +| `location` | `str` | The default location to use when making API calls. | `us-central1` | +| `max_output_tokens` | `int` | Token limit determines the maximum amount of text output from one prompt. | `128` | +| `model_name` | `str` | The name of the Vertex AI large language model. | `text-bison` | +| `project` | `str` | The default GCP project to use when making Vertex API calls. | | +| `request_parallelism` | `int` | The amount of parallelism allowed for requests issued to VertexAI models. | `5` | +| `temperature` | `float` | Tunes the degree of randomness in text generations. Should be a non-negative value. | `0` | +| `top_k` | `int` | How the model selects tokens for output, the next token is selected from the top `k` tokens. | `40` | +| `top_p` | `float` | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value. | `0.95` | +| `tuned_model_name` | `str` | The name of a tuned model. If provided, `model_name` is ignored. | | +| `verbose` | `bool` | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug. | `False` | diff --git a/docs/docs/components/experimental.mdx b/docs/docs/components/experimental.mdx index 8e503da06..7664b0406 100644 --- a/docs/docs/components/experimental.mdx +++ b/docs/docs/components/experimental.mdx @@ -1,4 +1,4 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; # Experimental @@ -29,10 +29,12 @@ This component extracts specified keys from a record. **Parameters** - **Record:** + - **Display Name:** Record - **Info:** The record from which to extract keys. - **Keys:** + - **Display Name:** Keys - **Info:** The keys to be extracted. @@ -54,6 +56,7 @@ This component turns a function running a flow into a Tool. **Parameters** - **Flow Name:** + - **Display Name:** Flow Name - **Info:** Select the flow to run. - **Options:** List of available flows. @@ -61,10 +64,12 @@ This component turns a function running a flow into a Tool. - **Refresh Button:** True - **Name:** + - **Display Name:** Name - **Description:** The tool's name. - **Description:** + - **Display Name:** Description - **Description:** Describes the tool. @@ -127,10 +132,12 @@ This component generates a notification. **Parameters** - **Name:** + - **Display Name:** Name - **Info:** The notification's name. - **Record:** + - **Display Name:** Record - **Info:** Optionally, a record to store in the notification. @@ -151,10 +158,12 @@ This component runs a specified flow. **Parameters** - **Input Value:** + - **Display Name:** Input Value - **Multiline:** True - **Flow Name:** + - **Display Name:** Flow Name - **Info:** Select the flow to run. - **Options:** List of available flows. @@ -177,14 +186,17 @@ This component executes a specified runnable. **Parameters** - **Input Key:** + - **Display Name:** Input Key - **Info:** The input key. - **Inputs:** + - **Display Name:** Inputs - **Info:** Inputs for the runnable. - **Runnable:** + - **Display Name:** Runnable - **Info:** The runnable to execute. @@ -205,14 +217,17 @@ This component executes an SQL query. **Parameters** - **Database URL:** + - **Display Name:** Database URL - **Info:** The database's URL. - **Include Columns:** + - **Display Name:** Include Columns - **Info:** Whether to include columns in the result. - **Passthrough:** + - **Display Name:** Passthrough - **Info:** Returns the query instead of raising an exception if an error occurs. @@ -233,10 +248,12 @@ This component dynamically generates a tool from a flow. **Parameters** - **Input Value:** + - **Display Name:** Input Value - **Multiline:** True - **Flow Name:** + - **Display Name:** Flow Name - **Info:** Select the flow to run. - **Options:** List of available flows. diff --git a/docs/docs/components/helpers.mdx b/docs/docs/components/helpers.mdx index ff95eab7e..f95c43b9d 100644 --- a/docs/docs/components/helpers.mdx +++ b/docs/docs/components/helpers.mdx @@ -1,4 +1,4 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; # Helpers @@ -49,9 +49,10 @@ Use this component as a template to create your custom component. - **Parameter:** Describe the purpose of this parameter. -

- Customize the build_config and build methods according to your requirements. -

+

+ Customize the build_config and build methods + according to your requirements. +

Learn more about creating custom components at [Custom Component](http://docs.langflow.org/components/custom). diff --git a/docs/docs/components/inputs-and-outputs.mdx b/docs/docs/components/inputs-and-outputs.mdx index daabc68eb..2a624221a 100644 --- a/docs/docs/components/inputs-and-outputs.mdx +++ b/docs/docs/components/inputs-and-outputs.mdx @@ -1,4 +1,4 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; import ZoomableImage from "/src/theme/ZoomableImage.js"; # Inputs and Outputs @@ -29,9 +29,8 @@ This component collects user input from the chat.

- If `As Record` is `true` and the `Message` is a `Record`, the data - of the `Record` will be updated with the `Sender`, `Sender Name`, and - `Session ID`. + If `As Record` is `true` and the `Message` is a `Record`, the data of the + `Record` will be updated with the `Sender`, `Sender Name`, and `Session ID`.

@@ -112,9 +111,10 @@ This component sends a message to the chat. - **Message:** Specifies the text of the message. -

- If `As Record` is `true` and the `Message` is a `Record`, the data in the `Record` is updated with the `Sender`, `Sender Name`, and `Session ID`. -

+

+ If `As Record` is `true` and the `Message` is a `Record`, the data in the + `Record` is updated with the `Sender`, `Sender Name`, and `Session ID`. +

### Text Output @@ -125,7 +125,6 @@ This component displays text data to the user. It is useful when you want to sho - **Value:** Specifies the text data to be displayed. Defaults to an empty string. - The `TextOutput` component provides a simple way to display text data. It allows textual data to be visible in the chat window during your interaction flow. ## Prompts @@ -155,7 +154,8 @@ The `PromptTemplate` component enables users to create prompts and define variab After defining a variable in the prompt template, it acts as its own component - input. See [Prompt Customization](../administration/prompt-customization) for more details. + input. See [Prompt Customization](../administration/prompt-customization) for + more details. -- **template:** The template used to format an individual request. \ No newline at end of file +- **template:** The template used to format an individual request. diff --git a/docs/docs/components/memories.mdx b/docs/docs/components/memories.mdx index f4002844e..a133c6a6a 100644 --- a/docs/docs/components/memories.mdx +++ b/docs/docs/components/memories.mdx @@ -1,11 +1,13 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; # Memories -

- Thanks for your patience as we improve our documentation—it might have some rough edges. Share your feedback or report issues to help us enhance it! 🛠️📝 -

+

+ Thanks for your patience as we improve our documentation—it might have some + rough edges. Share your feedback or report issues to help us enhance it! + 🛠️📝 +

Memory is a concept in chat-based applications that allows the system to remember previous interactions. This capability helps maintain the context of the conversation and enables the system to understand new messages in light of past messages. @@ -24,9 +26,13 @@ This component retrieves stored messages using various filters such as sender ty - **number_of_messages**: Specifies the number of messages to retrieve. Defaults to `5`. Determines the number of recent messages from the chat history to fetch. -

- The component retrieves messages based on the provided criteria, including the specific file path for stored messages. If no specific criteria are provided, it returns the most recent messages up to the specified limit. This component can be used to review past interactions and analyze conversation flows. -

+

+ The component retrieves messages based on the provided criteria, including + the specific file path for stored messages. If no specific criteria are + provided, it returns the most recent messages up to the specified limit. + This component can be used to review past interactions and analyze + conversation flows. +

### ConversationBufferMemory @@ -84,7 +90,8 @@ The `ConversationKGMemory` utilizes a knowledge graph to enhance memory capabili - **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`. - **output_key**: Identifies the key under which the generated response - is stored, enabling retrieval using this key. +is stored, enabling retrieval using this key. + - **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`. --- @@ -124,4 +131,4 @@ The `VectorRetrieverMemory` retrieves vectors based on queries, facilitating vec - **Retriever**: The tool used to fetch documents. - **input_key**: Identifies where input messages are stored in the memory object, facilitating their retrieval and manipulation. - **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`. -- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`. \ No newline at end of file +- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`. diff --git a/docs/docs/components/model_specs.mdx b/docs/docs/components/model_specs.mdx index 3ed3d60ca..9da89de3f 100644 --- a/docs/docs/components/model_specs.mdx +++ b/docs/docs/components/model_specs.mdx @@ -1,11 +1,13 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; # Large Language Models (LLMs) -

- Thank you for your patience as we refine our documentation. You might encounter some inconsistencies. Please help us improve by sharing your feedback or reporting any issues! 🛠️📝 -

+

+ Thank you for your patience as we refine our documentation. You might + encounter some inconsistencies. Please help us improve by sharing your + feedback or reporting any issues! 🛠️📝 +

A Large Language Model (LLM) is a foundational component of Langflow. It provides a uniform interface for interacting with LLMs from various providers, including OpenAI, Cohere, and HuggingFace. Langflow extensively uses LLMs across its chains and agents, employing them to generate text based on specific prompts or inputs. @@ -37,7 +39,9 @@ This is a wrapper for Anthropic's large language model designed for chat-based i `CTransformers` provides access to Transformer models implemented in C/C++ using the [GGML](https://github.com/ggerganov/ggml) library. -Ensure the `ctransformers` Python package is installed. Discover more about installation, supported models, and usage [here](https://github.com/marella/ctransformers). + Ensure the `ctransformers` Python package is installed. Discover more about + installation, supported models, and usage + [here](https://github.com/marella/ctransformers). - **config:** This configuration is for the Transformer models. Check the default settings and possible configurations at [config](https://github.com/marella/ctransformers#config). @@ -128,7 +132,8 @@ This component integrates with [Google Vertex AI](https://cloud.google.com/verte - **credentials**: Custom - credentials used for API interactions. +credentials used for API interactions. + - **location**: The default location for API calls, defaulting to `us-central1`. - **max_output_tokens**: Limits the output tokens per prompt, defaulting to `128`. - **model_name**: The name of the Vertex AI model in use, defaulting to `text-bison`. @@ -140,4 +145,4 @@ This component integrates with [Google Vertex AI](https://cloud.google.com/verte - **tuned_model_name**: Specifies a tuned model name, which overrides the default model name if provided. - **verbose**: Controls the output verbosity to assist in debugging and understanding the operational details, defaulting to `False`. ---- \ No newline at end of file +--- diff --git a/docs/docs/components/retrievers.mdx b/docs/docs/components/retrievers.mdx index 825842df7..f86695e37 100644 --- a/docs/docs/components/retrievers.mdx +++ b/docs/docs/components/retrievers.mdx @@ -1,11 +1,13 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; # Retrievers -

- We appreciate your patience as we enhance our documentation. It may have some imperfections. Please share your feedback or report issues to help us improve. 🛠️📝 -

+

+ We appreciate your patience as we enhance our documentation. It may have + some imperfections. Please share your feedback or report issues to help us + improve. 🛠️📝 +

A retriever is an interface that returns documents in response to an unstructured query. It's broader than a vector store because it doesn't need to store documents; it only needs to retrieve them. diff --git a/docs/docs/components/text-and-record.mdx b/docs/docs/components/text-and-record.mdx index c9fa06de3..24c16e4aa 100644 --- a/docs/docs/components/text-and-record.mdx +++ b/docs/docs/components/text-and-record.mdx @@ -2,7 +2,7 @@ In Langflow 1.0, we added two main input and output types: `Text` and `Record`. -`Text` is a simple string input and output type, while ``Record`` is a structure very similar to a dictionary in Python. It is a key-value pair data structure. +`Text` is a simple string input and output type, while `Record` is a structure very similar to a dictionary in Python. It is a key-value pair data structure. We've created a few components to help you work with these types. Let's see how a few of them work. diff --git a/docs/docs/components/toolkits.mdx b/docs/docs/components/toolkits.mdx index ea6758aee..3ba7ed7c7 100644 --- a/docs/docs/components/toolkits.mdx +++ b/docs/docs/components/toolkits.mdx @@ -1,9 +1,11 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; # Toolkits -

- We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝 -

-
\ No newline at end of file +

+ We appreciate your understanding as we polish our documentation - it may + contain some rough edges. Share your feedback or report issues to help us + improve! 🛠️📝 +

+ diff --git a/docs/docs/components/tools.mdx b/docs/docs/components/tools.mdx index 940c304eb..25d458360 100644 --- a/docs/docs/components/tools.mdx +++ b/docs/docs/components/tools.mdx @@ -1,11 +1,13 @@ -import Admonition from '@theme/Admonition'; +import Admonition from "@theme/Admonition"; # Tools -

- Thanks for your patience as we refine our documentation. It might have some rough edges currently. Please share your feedback or report issues to help us enhance it! 🛠️📝 -

+

+ Thanks for your patience as we refine our documentation. It might have some + rough edges currently. Please share your feedback or report issues to help + us enhance it! 🛠️📝 +

### SearchApi diff --git a/docs/docs/components/utilities.mdx b/docs/docs/components/utilities.mdx index 5f2a86d4d..8ef1f91c8 100644 --- a/docs/docs/components/utilities.mdx +++ b/docs/docs/components/utilities.mdx @@ -3,9 +3,9 @@ import Admonition from "@theme/Admonition"; # Utilities - We appreciate your understanding as we polish our documentation—it may - contain some rough edges. Share your feedback or report issues to help us - improve! 🛠️📝 + We appreciate your understanding as we polish our documentation—it may contain + some rough edges. Share your feedback or report issues to help us improve! + 🛠️📝 Utilities are a set of actions that can be used to perform common tasks in a flow. They are available in the **Utilities** section in the sidebar. @@ -86,7 +86,11 @@ Generates a unique identifier (UUID) for each instance it is invoked, providing - Returns a unique identifier (UUID) as a string. This UUID is generated using Python's `uuid` module, ensuring that each identifier is unique and can be used as a reliable reference in your application. - The Unique ID Generator is crucial for scenarios requiring distinct identifiers, such as session management, transaction tracking, or any context where different instances or entities must be uniquely identified. The generated UUID is provided as a hexadecimal string, offering a high level of uniqueness and security for identification purposes. + The Unique ID Generator is crucial for scenarios requiring distinct + identifiers, such as session management, transaction tracking, or any context + where different instances or entities must be uniquely identified. The + generated UUID is provided as a hexadecimal string, offering a high level of + uniqueness and security for identification purposes. For additional information and examples, please consult the [Langflow Components Custom Documentation](http://docs.langflow.org/components/custom). diff --git a/docs/docs/contributing/community.md b/docs/docs/contributing/community.md index 604487133..5c95718ec 100644 --- a/docs/docs/contributing/community.md +++ b/docs/docs/contributing/community.md @@ -10,7 +10,7 @@ Langflow [Discord](https://discord.gg/EqksyE2EX9) server. --- -## 🐦 Stay tunned for **Langflow** on Twitter +## 🐦 Stay tuned for **Langflow** on Twitter Follow [@langflow_ai](https://twitter.com/langflow_ai) on **Twitter** to get the latest news about **Langflow**. diff --git a/docs/docs/deployment/backend-only.md b/docs/docs/deployment/backend-only.md new file mode 100644 index 000000000..fb5efdfdb --- /dev/null +++ b/docs/docs/deployment/backend-only.md @@ -0,0 +1,123 @@ +# Backend-only + +You can run Langflow in `--backend-only` mode to expose your Langflow app as an API, without running the frontend UI. + +Start langflow in backend-only mode with `python3 -m langflow run --backend-only`. + +The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`. +Langflow will now serve requests to its API without the frontend running. + +## Prerequisites + +- [Langflow installed](../getting-started/install-langflow.mdx) + +- [OpenAI API key](https://platform.openai.com) + +- [A Langflow flow created](../starter-projects/basic-prompting.mdx) + +## Download your flow's curl call + +1. Click API. +2. Click **curl** > **Copy code** and save the code to your local machine. + It will look something like this: + +```curl +curl -X POST \ + "http://127.0.0.1:7864/api/v1/run/ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef?stream=false" \ + -H 'Content-Type: application/json'\ + -d '{"input_value": "message", + "output_type": "chat", + "input_type": "chat", + "tweaks": { + "Prompt-kvo86": {}, + "OpenAIModel-MilkD": {}, + "ChatOutput-ktwdw": {}, + "ChatInput-xXC4F": {} +}}' +``` + +Note the flow ID of `ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef`. You can find this ID in the UI as well to ensure you're querying the right flow. + +## Start Langflow in backend-only mode + +1. Stop Langflow with Ctrl+C. +2. Start langflow in backend-only mode with `python3 -m langflow run --backend-only`. + The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`. + Langflow will now serve requests to its API. +3. Run the curl code you copied from the UI. + You should get a result like this: + +```bash +{"session_id":"ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880","outputs":[{"inputs":{"input_value":"hi, are you there?"},"outputs":[{"results":{"result":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?"},"artifacts":{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI"},"messages":[{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI","component_id":"ChatOutput-ktwdw"}],"component_display_name":"Chat Output","component_id":"ChatOutput-ktwdw","used_frozen_result":false}]}]}% +``` + +Again, note that the flow ID matches. +Langflow is receiving your POST request, running the flow, and returning the result, all without running the frontend. Cool! + +## Download your flow's Python API call + +Instead of using curl, you can download your flow as a Python API call instead. + +1. Click API. +2. Click **Python API** > **Copy code** and save the code to your local machine. + The code will look something like this: + +```python +import requests +from typing import Optional + +BASE_API_URL = "http://127.0.0.1:7864/api/v1/run" +FLOW_ID = "ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef" +# You can tweak the flow by adding a tweaks dictionary +# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}} + +def run_flow(message: str, + flow_id: str, + output_type: str = "chat", + input_type: str = "chat", + tweaks: Optional[dict] = None, + api_key: Optional[str] = None) -> dict: + """ + Run a flow with a given message and optional tweaks. + + :param message: The message to send to the flow + :param flow_id: The ID of the flow to run + :param tweaks: Optional tweaks to customize the flow + :return: The JSON response from the flow + """ + api_url = f"{BASE_API_URL}/{flow_id}" + + payload = { + "input_value": message, + "output_type": output_type, + "input_type": input_type, + } + headers = None + if tweaks: + payload["tweaks"] = tweaks + if api_key: + headers = {"x-api-key": api_key} + response = requests.post(api_url, json=payload, headers=headers) + return response.json() + +# Setup any tweaks you want to apply to the flow +message = "message" + +print(run_flow(message=message, flow_id=FLOW_ID)) +``` + +3. Run your Python app: + +```python +python3 app.py +``` + +The result is similar to the curl call: + +```bash +{'session_id': 'ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880', 'outputs': [{'inputs': {'input_value': 'message'}, 'outputs': [{'results': {'result': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!"}, 'artifacts': {'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI'}, 'messages': [{'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI', 'component_id': 'ChatOutput-ktwdw'}], 'component_display_name': 'Chat Output', 'component_id': 'ChatOutput-ktwdw', 'used_frozen_result': False}]}]} +``` + +Your Python app POSTs to your Langflow server, and the server runs the flow and returns the result. + +See [API](../administration/api.mdx) for more ways to interact with your headless Langflow server. diff --git a/docs/docs/deployment/docker.md b/docs/docs/deployment/docker.md new file mode 100644 index 000000000..1ebb5746e --- /dev/null +++ b/docs/docs/deployment/docker.md @@ -0,0 +1,65 @@ +# Docker + +This guide will help you get LangFlow up and running using Docker and Docker Compose. + +## Prerequisites + +- Docker +- Docker Compose + +## Steps + +1. Clone the LangFlow repository: + + ```sh + git clone https://github.com/langflow-ai/langflow.git + ``` + +2. Navigate to the `docker_example` directory: + + ```sh + cd langflow/docker_example + ``` + +3. Run the Docker Compose file: + + ```sh + docker compose up + ``` + +LangFlow will now be accessible at [http://localhost:7860/](http://localhost:7860/). + +## Docker Compose Configuration + +The Docker Compose configuration spins up two services: `langflow` and `postgres`. + +### LangFlow Service + +The `langflow` service uses the `langflowai/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service. + +Environment variables: + +- `LANGFLOW_DATABASE_URL`: The connection string for the PostgreSQL database. +- `LANGFLOW_CONFIG_DIR`: The directory where LangFlow stores logs, file storage, monitor data, and secret keys. + +Volumes: + +- `langflow-data`: This volume is mapped to `/var/lib/langflow` in the container. + +### PostgreSQL Service + +The `postgres` service uses the `postgres:16` Docker image and exposes port 5432. + +Environment variables: + +- `POSTGRES_USER`: The username for the PostgreSQL database. +- `POSTGRES_PASSWORD`: The password for the PostgreSQL database. +- `POSTGRES_DB`: The name of the PostgreSQL database. + +Volumes: + +- `langflow-postgres`: This volume is mapped to `/var/lib/postgresql/data` in the container. + +## Switching to a Specific LangFlow Version + +If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `langflowai/langflow:latest` to `langflowai/langflow:1.0-alpha`. diff --git a/docs/docs/deployment/jina-deployment.md b/docs/docs/deployment/jina-deployment.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/docs/integrations/notion/add-content-to-page.md b/docs/docs/integrations/notion/add-content-to-page.md index 243c09d81..ace43e103 100644 --- a/docs/docs/integrations/notion/add-content-to-page.md +++ b/docs/docs/integrations/notion/add-content-to-page.md @@ -9,14 +9,11 @@ The `AddContentToPage` component converts markdown text to Notion blocks and app [Notion Reference](https://developers.notion.com/reference/patch-block-children) - - The `AddContentToPage` component enables you to: - Convert markdown text to Notion blocks. - Append the converted blocks to a specified Notion page. - Seamlessly integrate Notion content creation into Langflow workflows. - ## Component Usage @@ -100,8 +97,6 @@ class NotionPageCreator(CustomComponent): ## Example Usage - - Example of using the `AddContentToPage` component in a Langflow flow using Markdown as input: - ## Best Practices When using the `AddContentToPage` component: diff --git a/docs/docs/integrations/notion/list-users.md b/docs/docs/integrations/notion/list-users.md index c22c20ca8..0eb8236f5 100644 --- a/docs/docs/integrations/notion/list-users.md +++ b/docs/docs/integrations/notion/list-users.md @@ -9,13 +9,11 @@ The `NotionUserList` component retrieves users from Notion. It provides a conven [Notion Reference](https://developers.notion.com/reference/get-users) - - The `NotionUserList` component enables you to: +The `NotionUserList` component enables you to: - Retrieve user data from Notion - Access user information such as ID, type, name, and avatar URL - Integrate Notion user data seamlessly into your Langflow workflows - ## Component Usage @@ -95,7 +93,6 @@ class NotionUserList(CustomComponent): ## Example Usage - Here's an example of how you can use the `NotionUserList` component in a Langflow flow and passing the outputs to the Prompt component: - - ## Best Practices When using the `NotionUserList` component, consider the following best practices: diff --git a/docs/sidebars.js b/docs/sidebars.js index b12111797..04d81d475 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -113,7 +113,11 @@ module.exports = { type: "category", label: "Deployment", collapsed: true, - items: ["deployment/gcp-deployment"], + items: [ + "deployment/docker", + "deployment/backend-only", + "deployment/gcp-deployment", + ], }, { type: "category", diff --git a/docs/static/logos/twitter.svg b/docs/static/logos/twitter.svg index 027488d3c..437e2bfdd 100644 --- a/docs/static/logos/twitter.svg +++ b/docs/static/logos/twitter.svg @@ -1,3 +1,3 @@ - - + + diff --git a/poetry.lock b/poetry.lock index 5d85d6b81..67ba34d2b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -261,13 +261,13 @@ extras = ["pyaudio (>=0.2.13)"] [[package]] name = "astrapy" -version = "1.2.0" +version = "1.2.1" description = "AstraPy is a Pythonic SDK for DataStax Astra and its Data API" optional = false python-versions = "<4.0.0,>=3.8.0" files = [ - {file = "astrapy-1.2.0-py3-none-any.whl", hash = "sha256:5d65242771934c38ebe16f330e9e517968c1437846dabdbe7e48470f7b1782e8"}, - {file = "astrapy-1.2.0.tar.gz", hash = "sha256:6ce1b421d1ae21fe73373fa36048d8d56c775367886525504f01c48cbb742842"}, + {file = "astrapy-1.2.1-py3-none-any.whl", hash = "sha256:0d7ca1e6f18a6a4e9a41ffaf2aa4cc585d36de3e983b5c5ce0bbb30a1595e30b"}, + {file = "astrapy-1.2.1.tar.gz", hash = "sha256:c4ba88ef16ac1e990ccba322d376b6ea256513a3004a0894c14bfa2403f1d646"}, ] [package.dependencies] @@ -367,13 +367,13 @@ files = [ [[package]] name = "bce-python-sdk" -version = "0.9.11" +version = "0.9.14" description = "BCE SDK for python" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,<4,>=2.7" files = [ - {file = "bce_python_sdk-0.9.11-py3-none-any.whl", hash = "sha256:3afb9717f6c0c5f5fe3104a8bea4c111bf2ab3fe87ae73b05492566bc2b5d11a"}, - {file = "bce_python_sdk-0.9.11.tar.gz", hash = "sha256:d9e977f059fef6466eebdbb34ad1e27b6f76ef90338807ab959693a78a761e7d"}, + {file = "bce_python_sdk-0.9.14-py3-none-any.whl", hash = "sha256:5704aa454151ee608b01ddda7531457433f9b4bb8afbd00706dd368f3b4339a1"}, + {file = "bce_python_sdk-0.9.14.tar.gz", hash = "sha256:7cbd182ec1e21034f10d3cdb812f3171d31908f1a783d6cf643039272942d8e8"}, ] [package.dependencies] @@ -471,17 +471,17 @@ files = [ [[package]] name = "boto3" -version = "1.34.119" +version = "1.34.122" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.34.119-py3-none-any.whl", hash = "sha256:8f9c43c54b3dfaa36c4a0d7b42c417227a515bc7a2e163e62802780000a5a3e2"}, - {file = "boto3-1.34.119.tar.gz", hash = "sha256:cea2365a25b2b83a97e77f24ac6f922ef62e20636b42f9f6ee9f97188f9c1c03"}, + {file = "boto3-1.34.122-py3-none-any.whl", hash = "sha256:b2d7400ff84fa547e53b3d9acfa3c95d65d45b5886ba1ede1f7df4768d1cc0b1"}, + {file = "boto3-1.34.122.tar.gz", hash = "sha256:56840d8ce91654d182f1c113f0791fa2113c3aa43230c50b4481f235348a6037"}, ] [package.dependencies] -botocore = ">=1.34.119,<1.35.0" +botocore = ">=1.34.122,<1.35.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -490,13 +490,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.34.119" +version = "1.34.122" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.34.119-py3-none-any.whl", hash = "sha256:4bdf7926a1290b2650d62899ceba65073dd2693e61c35f5cdeb3a286a0aaa27b"}, - {file = "botocore-1.34.119.tar.gz", hash = "sha256:b253f15b24b87b070e176af48e8ef146516090429d30a7d8b136a4c079b28008"}, + {file = "botocore-1.34.122-py3-none-any.whl", hash = "sha256:6d75df3af831b62f0c7baa109728d987e0a8d34bfadf0476eb32e2f29a079a36"}, + {file = "botocore-1.34.122.tar.gz", hash = "sha256:9374e16a36f1062c3e27816e8599b53eba99315dfac71cc84fc3aee3f5d3cbe3"}, ] [package.dependencies] @@ -505,7 +505,7 @@ python-dateutil = ">=2.1,<3.0.0" urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} [package.extras] -crt = ["awscrt (==0.20.9)"] +crt = ["awscrt (==0.20.11)"] [[package]] name = "brotli" @@ -698,13 +698,13 @@ graph = ["gremlinpython (==3.4.6)"] [[package]] name = "cassio" -version = "0.1.7" +version = "0.1.8" description = "A framework-agnostic Python library to seamlessly integrate Apache Cassandra(R) with ML/LLM/genAI workloads." optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "cassio-0.1.7-py3-none-any.whl", hash = "sha256:08d1028a20d09bd207de0e17eaf7ae821b3c8e4788555e2d337aa440e0846d87"}, - {file = "cassio-0.1.7.tar.gz", hash = "sha256:44f705dff8a9a1c48527db2c9e968686358c960fa21ba940d9e66de00639ad78"}, + {file = "cassio-0.1.8-py3-none-any.whl", hash = "sha256:c09e7c884ba7227ff5277c86f3b0f31c523672ea407f56d093c7227e69c54d94"}, + {file = "cassio-0.1.8.tar.gz", hash = "sha256:4e09929506cb3dd6fad217e89846d0a1a59069afd24b82c72526ef6f2e9271af"}, ] [package.dependencies] @@ -1450,13 +1450,13 @@ tests = ["pytest"] [[package]] name = "dataclasses-json" -version = "0.6.6" +version = "0.6.7" description = "Easily serialize dataclasses to and from JSON." optional = false python-versions = "<4.0,>=3.7" files = [ - {file = "dataclasses_json-0.6.6-py3-none-any.whl", hash = "sha256:e54c5c87497741ad454070ba0ed411523d46beb5da102e221efb873801b0ba85"}, - {file = "dataclasses_json-0.6.6.tar.gz", hash = "sha256:0c09827d26fffda27f1be2fed7a7a01a29c5ddcd2eb6393ad5ebf9d77e9deae8"}, + {file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"}, + {file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"}, ] [package.dependencies] @@ -1590,6 +1590,23 @@ files = [ [package.dependencies] packaging = "*" +[[package]] +name = "dictdiffer" +version = "0.9.0" +description = "Dictdiffer is a library that helps you to diff and patch dictionaries." +optional = false +python-versions = "*" +files = [ + {file = "dictdiffer-0.9.0-py2.py3-none-any.whl", hash = "sha256:442bfc693cfcadaf46674575d2eba1c53b42f5e404218ca2c2ff549f2df56595"}, + {file = "dictdiffer-0.9.0.tar.gz", hash = "sha256:17bacf5fbfe613ccf1b6d512bd766e6b21fb798822a133aa86098b8ac9997578"}, +] + +[package.extras] +all = ["Sphinx (>=3)", "check-manifest (>=0.42)", "mock (>=1.3.0)", "numpy (>=1.13.0)", "numpy (>=1.15.0)", "numpy (>=1.18.0)", "numpy (>=1.20.0)", "pytest (==5.4.3)", "pytest (>=6)", "pytest-cov (>=2.10.1)", "pytest-isort (>=1.2.0)", "pytest-pycodestyle (>=2)", "pytest-pycodestyle (>=2.2.0)", "pytest-pydocstyle (>=2)", "pytest-pydocstyle (>=2.2.0)", "sphinx (>=3)", "sphinx-rtd-theme (>=0.2)", "tox (>=3.7.0)"] +docs = ["Sphinx (>=3)", "sphinx-rtd-theme (>=0.2)"] +numpy = ["numpy (>=1.13.0)", "numpy (>=1.15.0)", "numpy (>=1.18.0)", "numpy (>=1.20.0)"] +tests = ["check-manifest (>=0.42)", "mock (>=1.3.0)", "pytest (==5.4.3)", "pytest (>=6)", "pytest-cov (>=2.10.1)", "pytest-isort (>=1.2.0)", "pytest-pycodestyle (>=2)", "pytest-pycodestyle (>=2.2.0)", "pytest-pydocstyle (>=2)", "pytest-pydocstyle (>=2.2.0)", "sphinx (>=3)", "tox (>=3.7.0)"] + [[package]] name = "dill" version = "0.3.7" @@ -1824,13 +1841,13 @@ develop = ["aiohttp", "furo", "httpx", "mock", "opentelemetry-api", "opentelemet [[package]] name = "elasticsearch" -version = "8.13.2" +version = "8.14.0" description = "Python client for Elasticsearch" optional = false python-versions = ">=3.7" files = [ - {file = "elasticsearch-8.13.2-py3-none-any.whl", hash = "sha256:7412ceae9c0e437a72854ab3123aa1f37110d1635cc645366988b8c0fee98598"}, - {file = "elasticsearch-8.13.2.tar.gz", hash = "sha256:d51c93431a459b2b7c6c919b6e92a2adc8ac712758de9aeeb16cd4997fc148ad"}, + {file = "elasticsearch-8.14.0-py3-none-any.whl", hash = "sha256:cef8ef70a81af027f3da74a4f7d9296b390c636903088439087b8262a468c130"}, + {file = "elasticsearch-8.14.0.tar.gz", hash = "sha256:aa2490029dd96f4015b333c1827aa21fd6c0a4d223b00dfb0fe933b8d09a511b"}, ] [package.dependencies] @@ -2392,8 +2409,8 @@ files = [ [package.dependencies] cffi = {version = ">=1.12.2", markers = "platform_python_implementation == \"CPython\" and sys_platform == \"win32\""} greenlet = [ - {version = ">=2.0.0", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.11\""}, {version = ">=3.0rc3", markers = "platform_python_implementation == \"CPython\" and python_version >= \"3.11\""}, + {version = ">=2.0.0", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.11\""}, ] "zope.event" = "*" "zope.interface" = "*" @@ -2520,12 +2537,12 @@ files = [ google-auth = ">=2.14.1,<3.0.dev0" googleapis-common-protos = ">=1.56.2,<2.0.dev0" grpcio = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, ] grpcio-status = [ - {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, ] proto-plus = ">=1.22.3,<2.0.0dev" protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" @@ -2556,13 +2573,13 @@ uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" -version = "2.29.0" +version = "2.30.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.29.0.tar.gz", hash = "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360"}, - {file = "google_auth-2.29.0-py2.py3-none-any.whl", hash = "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"}, + {file = "google-auth-2.30.0.tar.gz", hash = "sha256:ab630a1320f6720909ad76a7dbdb6841cdf5c66b328d690027e4867bdfb16688"}, + {file = "google_auth-2.30.0-py2.py3-none-any.whl", hash = "sha256:8df7da660f62757388b8a7f249df13549b3373f24388cb5d2f1dd91cc18180b5"}, ] [package.dependencies] @@ -2594,13 +2611,13 @@ httplib2 = ">=0.19.0" [[package]] name = "google-cloud-aiplatform" -version = "1.53.0" +version = "1.54.1" description = "Vertex AI API client library" optional = false python-versions = ">=3.8" files = [ - {file = "google-cloud-aiplatform-1.53.0.tar.gz", hash = "sha256:574cfad8ac5fa5d57ef717f5335ce05636a5fa9b8aeea0f5c325b46b9448e6b1"}, - {file = "google_cloud_aiplatform-1.53.0-py2.py3-none-any.whl", hash = "sha256:9dfb1f110e6d4795b45afcfab79108fc5c8ed9aa4eaf899e433bc2ca1b76c778"}, + {file = "google-cloud-aiplatform-1.54.1.tar.gz", hash = "sha256:01c231961cc1a1a3b049ea3ef71fb11e77b2d56d632d020ce09e419b27ff77f2"}, + {file = "google_cloud_aiplatform-1.54.1-py2.py3-none-any.whl", hash = "sha256:43f70fcd572f15317d769e5a0e04cfb7c0e259ead3fe581d2fba4f203ace5617"}, ] [package.dependencies] @@ -2621,7 +2638,7 @@ autologging = ["mlflow (>=1.27.0,<=2.1.1)"] cloud-profiler = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"] datasets = ["pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)"] endpoint = ["requests (>=2.28.1)"] -full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "starlette (>=0.17.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)"] +full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)"] langchain = ["langchain (>=0.1.16,<0.2)", "langchain-core (<0.2)", "langchain-google-vertexai (<2)"] langchain-testing = ["absl-py", "cloudpickle (>=2.2.1,<4.0)", "langchain (>=0.1.16,<0.2)", "langchain-core (<0.2)", "langchain-google-vertexai (<2)", "pydantic (>=2.6.3,<3)", "pytest-xdist"] lit = ["explainable-ai-sdk (>=1.0.0)", "lit-nlp (==0.4.0)", "pandas (>=1.0.0)", "tensorflow (>=2.3.0,<3.0.0dev)"] @@ -2631,11 +2648,11 @@ prediction = ["docker (>=5.0.3)", "fastapi (>=0.71.0,<=0.109.1)", "httpx (>=0.23 preview = ["cloudpickle (<3.0)", "google-cloud-logging (<4.0)"] private-endpoints = ["requests (>=2.28.1)", "urllib3 (>=1.21.1,<1.27)"] rapid-evaluation = ["nest-asyncio (>=1.0.0,<1.6.0)", "pandas (>=1.0.0,<2.2.0)"] -ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)"] -ray-testing = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pytest-xdist", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "ray[train] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "scikit-learn", "tensorflow", "torch (>=2.0.0,<2.1.0)", "xgboost", "xgboost-ray"] +ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "setuptools (<70.0.0)"] +ray-testing = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pytest-xdist", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "ray[train] (==2.9.3)", "scikit-learn", "setuptools (<70.0.0)", "tensorflow", "torch (>=2.0.0,<2.1.0)", "xgboost", "xgboost-ray"] reasoningengine = ["cloudpickle (>=2.2.1,<4.0)", "pydantic (>=2.6.3,<3)"] tensorboard = ["tensorflow (>=2.3.0,<3.0.0dev)"] -testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (==2.16.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "torch (>=2.2.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost"] +testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nest-asyncio (>=1.0.0,<1.6.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (==2.16.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "torch (>=2.2.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost"] vizier = ["google-vizier (>=0.1.6)"] xai = ["tensorflow (>=2.3.0,<3.0.0dev)"] @@ -2897,13 +2914,13 @@ pydantic = ">=1.10,<3" [[package]] name = "gprof2dot" -version = "2022.7.29" +version = "2024.6.6" description = "Generate a dot graph from the output of several profilers." optional = false -python-versions = ">=2.7" +python-versions = ">=3.8" files = [ - {file = "gprof2dot-2022.7.29-py2.py3-none-any.whl", hash = "sha256:f165b3851d3c52ee4915eb1bd6cca571e5759823c2cd0f71a79bda93c2dc85d6"}, - {file = "gprof2dot-2022.7.29.tar.gz", hash = "sha256:45b4d298bd36608fccf9511c3fd88a773f7a1abc04d6cd39445b11ba43133ec5"}, + {file = "gprof2dot-2024.6.6-py2.py3-none-any.whl", hash = "sha256:45b14ad7ce64e299c8f526881007b9eb2c6b75505d5613e96e66ee4d5ab33696"}, + {file = "gprof2dot-2024.6.6.tar.gz", hash = "sha256:fa1420c60025a9eb7734f65225b4da02a10fc6dd741b37fa129bc6b41951e5ab"}, ] [[package]] @@ -3446,100 +3463,105 @@ files = [ [[package]] name = "ijson" -version = "3.2.3" +version = "3.3.0" description = "Iterative JSON parser with standard Python iterator interfaces" optional = false python-versions = "*" files = [ - {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0a4ae076bf97b0430e4e16c9cb635a6b773904aec45ed8dcbc9b17211b8569ba"}, - {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cfced0a6ec85916eb8c8e22415b7267ae118eaff2a860c42d2cc1261711d0d31"}, - {file = "ijson-3.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b9d1141cfd1e6d6643aa0b4876730d0d28371815ce846d2e4e84a2d4f471cf3"}, - {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0a27db6454edd6013d40a956d008361aac5bff375a9c04ab11fc8c214250b5"}, - {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0d526ccb335c3c13063c273637d8611f32970603dfb182177b232d01f14c23"}, - {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:545a30b3659df2a3481593d30d60491d1594bc8005f99600e1bba647bb44cbb5"}, - {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9680e37a10fedb3eab24a4a7e749d8a73f26f1a4c901430e7aa81b5da15f7307"}, - {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2a80c0bb1053055d1599e44dc1396f713e8b3407000e6390add72d49633ff3bb"}, - {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f05ed49f434ce396ddcf99e9fd98245328e99f991283850c309f5e3182211a79"}, - {file = "ijson-3.2.3-cp310-cp310-win32.whl", hash = "sha256:b4eb2304573c9fdf448d3fa4a4fdcb727b93002b5c5c56c14a5ffbbc39f64ae4"}, - {file = "ijson-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:923131f5153c70936e8bd2dd9dcfcff43c67a3d1c789e9c96724747423c173eb"}, - {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:904f77dd3d87736ff668884fe5197a184748eb0c3e302ded61706501d0327465"}, - {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0974444c1f416e19de1e9f567a4560890095e71e81623c509feff642114c1e53"}, - {file = "ijson-3.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1a4b8eb69b6d7b4e94170aa991efad75ba156b05f0de2a6cd84f991def12ff9"}, - {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d052417fd7ce2221114f8d3b58f05a83c1a2b6b99cafe0b86ac9ed5e2fc889df"}, - {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b8064a85ec1b0beda7dd028e887f7112670d574db606f68006c72dd0bb0e0e2"}, - {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaac293853f1342a8d2a45ac1f723c860f700860e7743fb97f7b76356df883a8"}, - {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6c32c18a934c1dc8917455b0ce478fd7a26c50c364bd52c5a4fb0fc6bb516af7"}, - {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:713a919e0220ac44dab12b5fed74f9130f3480e55e90f9d80f58de129ea24f83"}, - {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"}, - {file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"}, - {file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"}, - {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:055b71bbc37af5c3c5861afe789e15211d2d3d06ac51ee5a647adf4def19c0ea"}, - {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c075a547de32f265a5dd139ab2035900fef6653951628862e5cdce0d101af557"}, - {file = "ijson-3.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:457f8a5fc559478ac6b06b6d37ebacb4811f8c5156e997f0d87d708b0d8ab2ae"}, - {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9788f0c915351f41f0e69ec2618b81ebfcf9f13d9d67c6d404c7f5afda3e4afb"}, - {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa234ab7a6a33ed51494d9d2197fb96296f9217ecae57f5551a55589091e7853"}, - {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd0dc5da4f9dc6d12ab6e8e0c57d8b41d3c8f9ceed31a99dae7b2baf9ea769a"}, - {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c6beb80df19713e39e68dc5c337b5c76d36ccf69c30b79034634e5e4c14d6904"}, - {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a2973ce57afb142d96f35a14e9cfec08308ef178a2c76b8b5e1e98f3960438bf"}, - {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:105c314fd624e81ed20f925271ec506523b8dd236589ab6c0208b8707d652a0e"}, - {file = "ijson-3.2.3-cp312-cp312-win32.whl", hash = "sha256:ac44781de5e901ce8339352bb5594fcb3b94ced315a34dbe840b4cff3450e23b"}, - {file = "ijson-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:0567e8c833825b119e74e10a7c29761dc65fcd155f5d4cb10f9d3b8916ef9912"}, - {file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"}, - {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"}, - {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"}, - {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85afdb3f3a5d0011584d4fa8e6dccc5936be51c27e84cd2882fe904ca3bd04c5"}, - {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4fc35d569eff3afa76bfecf533f818ecb9390105be257f3f83c03204661ace70"}, - {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:455d7d3b7a6aacfb8ab1ebcaf697eedf5be66e044eac32508fccdc633d995f0e"}, - {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c63f3d57dbbac56cead05b12b81e8e1e259f14ce7f233a8cbe7fa0996733b628"}, - {file = "ijson-3.2.3-cp36-cp36m-win32.whl", hash = "sha256:a4d7fe3629de3ecb088bff6dfe25f77be3e8261ed53d5e244717e266f8544305"}, - {file = "ijson-3.2.3-cp36-cp36m-win_amd64.whl", hash = "sha256:96190d59f015b5a2af388a98446e411f58ecc6a93934e036daa75f75d02386a0"}, - {file = "ijson-3.2.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:35194e0b8a2bda12b4096e2e792efa5d4801a0abb950c48ade351d479cd22ba5"}, - {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1053fb5f0b010ee76ca515e6af36b50d26c1728ad46be12f1f147a835341083"}, - {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:211124cff9d9d139dd0dfced356f1472860352c055d2481459038b8205d7d742"}, - {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92dc4d48e9f6a271292d6079e9fcdce33c83d1acf11e6e12696fb05c5889fe74"}, - {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3dcc33ee56f92a77f48776014ddb47af67c33dda361e84371153c4f1ed4434e1"}, - {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98c6799925a5d1988da4cd68879b8eeab52c6e029acc45e03abb7921a4715c4b"}, - {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4252e48c95cd8ceefc2caade310559ab61c37d82dfa045928ed05328eb5b5f65"}, - {file = "ijson-3.2.3-cp37-cp37m-win32.whl", hash = "sha256:644f4f03349ff2731fd515afd1c91b9e439e90c9f8c28292251834154edbffca"}, - {file = "ijson-3.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:ba33c764afa9ecef62801ba7ac0319268a7526f50f7601370d9f8f04e77fc02b"}, - {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4b2ec8c2a3f1742cbd5f36b65e192028e541b5fd8c7fd97c1fc0ca6c427c704a"}, - {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dc357da4b4ebd8903e77dbcc3ce0555ee29ebe0747c3c7f56adda423df8ec89"}, - {file = "ijson-3.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bcc51c84bb220ac330122468fe526a7777faa6464e3b04c15b476761beea424f"}, - {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8d54b624629f9903005c58d9321a036c72f5c212701bbb93d1a520ecd15e370"}, - {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6ea7c7e3ec44742e867c72fd750c6a1e35b112f88a917615332c4476e718d40"}, - {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:916acdc5e504f8b66c3e287ada5d4b39a3275fc1f2013c4b05d1ab9933671a6c"}, - {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81815b4184b85ce124bfc4c446d5f5e5e643fc119771c5916f035220ada29974"}, - {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b49fd5fe1cd9c1c8caf6c59f82b08117dd6bea2ec45b641594e25948f48f4169"}, - {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:86b3c91fdcb8ffb30556c9669930f02b7642de58ca2987845b04f0d7fe46d9a8"}, - {file = "ijson-3.2.3-cp38-cp38-win32.whl", hash = "sha256:a729b0c8fb935481afe3cf7e0dadd0da3a69cc7f145dbab8502e2f1e01d85a7c"}, - {file = "ijson-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:d34e049992d8a46922f96483e96b32ac4c9cffd01a5c33a928e70a283710cd58"}, - {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9c2a12dcdb6fa28f333bf10b3a0f80ec70bc45280d8435be7e19696fab2bc706"}, - {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1844c5b57da21466f255a0aeddf89049e730d7f3dfc4d750f0e65c36e6a61a7c"}, - {file = "ijson-3.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ec3e5ff2515f1c40ef6a94983158e172f004cd643b9e4b5302017139b6c96e4"}, - {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46bafb1b9959872a1f946f8dd9c6f1a30a970fc05b7bfae8579da3f1f988e598"}, - {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab4db9fee0138b60e31b3c02fff8a4c28d7b152040553b6a91b60354aebd4b02"}, - {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4bc87e69d1997c6a55fff5ee2af878720801ff6ab1fb3b7f94adda050651e37"}, - {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e9fd906f0c38e9f0bfd5365e1bed98d649f506721f76bb1a9baa5d7374f26f19"}, - {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e84d27d1acb60d9102728d06b9650e5b7e5cb0631bd6e3dfadba8fb6a80d6c2f"}, - {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2cc04fc0a22bb945cd179f614845c8b5106c0b3939ee0d84ce67c7a61ac1a936"}, - {file = "ijson-3.2.3-cp39-cp39-win32.whl", hash = "sha256:e641814793a037175f7ec1b717ebb68f26d89d82cfd66f36e588f32d7e488d5f"}, - {file = "ijson-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:6bd3e7e91d031f1e8cea7ce53f704ab74e61e505e8072467e092172422728b22"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:06f9707da06a19b01013f8c65bf67db523662a9b4a4ff027e946e66c261f17f0"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be8495f7c13fa1f622a2c6b64e79ac63965b89caf664cc4e701c335c652d15f2"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7596b42f38c3dcf9d434dddd50f46aeb28e96f891444c2b4b1266304a19a2c09"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbac4e9609a1086bbad075beb2ceec486a3b138604e12d2059a33ce2cba93051"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:db2d6341f9cb538253e7fe23311d59252f124f47165221d3c06a7ed667ecd595"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fa8b98be298efbb2588f883f9953113d8a0023ab39abe77fe734b71b46b1220a"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:674e585361c702fad050ab4c153fd168dc30f5980ef42b64400bc84d194e662d"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd12e42b9cb9c0166559a3ffa276b4f9fc9d5b4c304e5a13668642d34b48b634"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d31e0d771d82def80cd4663a66de277c3b44ba82cd48f630526b52f74663c639"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ce4c70c23521179d6da842bb9bc2e36bb9fad1e0187e35423ff0f282890c9ca"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39f551a6fbeed4433c85269c7c8778e2aaea2501d7ebcb65b38f556030642c17"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b14d322fec0de7af16f3ef920bf282f0dd747200b69e0b9628117f381b7775b"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7851a341429b12d4527ca507097c959659baf5106c7074d15c17c387719ffbcd"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db3bf1b42191b5cc9b6441552fdcb3b583594cb6b19e90d1578b7cbcf80d0fae"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6f662dc44362a53af3084d3765bb01cd7b4734d1f484a6095cad4cb0cbfe5374"}, - {file = "ijson-3.2.3.tar.gz", hash = "sha256:10294e9bf89cb713da05bc4790bdff616610432db561964827074898e174f917"}, + {file = "ijson-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7f7a5250599c366369fbf3bc4e176f5daa28eb6bc7d6130d02462ed335361675"}, + {file = "ijson-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f87a7e52f79059f9c58f6886c262061065eb6f7554a587be7ed3aa63e6b71b34"}, + {file = "ijson-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b73b493af9e947caed75d329676b1b801d673b17481962823a3e55fe529c8b8b"}, + {file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5576415f3d76290b160aa093ff968f8bf6de7d681e16e463a0134106b506f49"}, + {file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e9ffe358d5fdd6b878a8a364e96e15ca7ca57b92a48f588378cef315a8b019e"}, + {file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8643c255a25824ddd0895c59f2319c019e13e949dc37162f876c41a283361527"}, + {file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:df3ab5e078cab19f7eaeef1d5f063103e1ebf8c26d059767b26a6a0ad8b250a3"}, + {file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dc1fb02c6ed0bae1b4bf96971258bf88aea72051b6e4cebae97cff7090c0607"}, + {file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e9afd97339fc5a20f0542c971f90f3ca97e73d3050cdc488d540b63fae45329a"}, + {file = "ijson-3.3.0-cp310-cp310-win32.whl", hash = "sha256:844c0d1c04c40fd1b60f148dc829d3f69b2de789d0ba239c35136efe9a386529"}, + {file = "ijson-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:d654d045adafdcc6c100e8e911508a2eedbd2a1b5f93f930ba13ea67d7704ee9"}, + {file = "ijson-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:501dce8eaa537e728aa35810656aa00460a2547dcb60937c8139f36ec344d7fc"}, + {file = "ijson-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:658ba9cad0374d37b38c9893f4864f284cdcc7d32041f9808fba8c7bcaadf134"}, + {file = "ijson-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2636cb8c0f1023ef16173f4b9a233bcdb1df11c400c603d5f299fac143ca8d70"}, + {file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd174b90db68c3bcca273e9391934a25d76929d727dc75224bf244446b28b03b"}, + {file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97a9aea46e2a8371c4cf5386d881de833ed782901ac9f67ebcb63bb3b7d115af"}, + {file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c594c0abe69d9d6099f4ece17763d53072f65ba60b372d8ba6de8695ce6ee39e"}, + {file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e0ff16c224d9bfe4e9e6bd0395826096cda4a3ef51e6c301e1b61007ee2bd24"}, + {file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0015354011303175eae7e2ef5136414e91de2298e5a2e9580ed100b728c07e51"}, + {file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034642558afa57351a0ffe6de89e63907c4cf6849070cc10a3b2542dccda1afe"}, + {file = "ijson-3.3.0-cp311-cp311-win32.whl", hash = "sha256:192e4b65495978b0bce0c78e859d14772e841724d3269fc1667dc6d2f53cc0ea"}, + {file = "ijson-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:72e3488453754bdb45c878e31ce557ea87e1eb0f8b4fc610373da35e8074ce42"}, + {file = "ijson-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:988e959f2f3d59ebd9c2962ae71b97c0df58323910d0b368cc190ad07429d1bb"}, + {file = "ijson-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b2f73f0d0fce5300f23a1383d19b44d103bb113b57a69c36fd95b7c03099b181"}, + {file = "ijson-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ee57a28c6bf523d7cb0513096e4eb4dac16cd935695049de7608ec110c2b751"}, + {file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0155a8f079c688c2ccaea05de1ad69877995c547ba3d3612c1c336edc12a3a5"}, + {file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ab00721304af1ae1afa4313ecfa1bf16b07f55ef91e4a5b93aeaa3e2bd7917c"}, + {file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40ee3821ee90be0f0e95dcf9862d786a7439bd1113e370736bfdf197e9765bfb"}, + {file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3b6987a0bc3e6d0f721b42c7a0198ef897ae50579547b0345f7f02486898f5"}, + {file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:63afea5f2d50d931feb20dcc50954e23cef4127606cc0ecf7a27128ed9f9a9e6"}, + {file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b5c3e285e0735fd8c5a26d177eca8b52512cdd8687ca86ec77a0c66e9c510182"}, + {file = "ijson-3.3.0-cp312-cp312-win32.whl", hash = "sha256:907f3a8674e489abdcb0206723e5560a5cb1fa42470dcc637942d7b10f28b695"}, + {file = "ijson-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8f890d04ad33262d0c77ead53c85f13abfb82f2c8f078dfbf24b78f59534dfdd"}, + {file = "ijson-3.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b9d85a02e77ee8ea6d9e3fd5d515bcc3d798d9c1ea54817e5feb97a9bc5d52fe"}, + {file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6576cdc36d5a09b0c1a3d81e13a45d41a6763188f9eaae2da2839e8a4240bce"}, + {file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5589225c2da4bb732c9c370c5961c39a6db72cf69fb2a28868a5413ed7f39e6"}, + {file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad04cf38164d983e85f9cba2804566c0160b47086dcca4cf059f7e26c5ace8ca"}, + {file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:a3b730ef664b2ef0e99dec01b6573b9b085c766400af363833e08ebc1e38eb2f"}, + {file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:4690e3af7b134298055993fcbea161598d23b6d3ede11b12dca6815d82d101d5"}, + {file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:aaa6bfc2180c31a45fac35d40e3312a3d09954638ce0b2e9424a88e24d262a13"}, + {file = "ijson-3.3.0-cp36-cp36m-win32.whl", hash = "sha256:44367090a5a876809eb24943f31e470ba372aaa0d7396b92b953dda953a95d14"}, + {file = "ijson-3.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7e2b3e9ca957153557d06c50a26abaf0d0d6c0ddf462271854c968277a6b5372"}, + {file = "ijson-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:47c144117e5c0e2babb559bc8f3f76153863b8dd90b2d550c51dab5f4b84a87f"}, + {file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29ce02af5fbf9ba6abb70765e66930aedf73311c7d840478f1ccecac53fefbf3"}, + {file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ac6c3eeed25e3e2cb9b379b48196413e40ac4e2239d910bb33e4e7f6c137745"}, + {file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d92e339c69b585e7b1d857308ad3ca1636b899e4557897ccd91bb9e4a56c965b"}, + {file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:8c85447569041939111b8c7dbf6f8fa7a0eb5b2c4aebb3c3bec0fb50d7025121"}, + {file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:542c1e8fddf082159a5d759ee1412c73e944a9a2412077ed00b303ff796907dc"}, + {file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:30cfea40936afb33b57d24ceaf60d0a2e3d5c1f2335ba2623f21d560737cc730"}, + {file = "ijson-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:6b661a959226ad0d255e49b77dba1d13782f028589a42dc3172398dd3814c797"}, + {file = "ijson-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:0b003501ee0301dbf07d1597482009295e16d647bb177ce52076c2d5e64113e0"}, + {file = "ijson-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3e8d8de44effe2dbd0d8f3eb9840344b2d5b4cc284a14eb8678aec31d1b6bea8"}, + {file = "ijson-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9cd5c03c63ae06d4f876b9844c5898d0044c7940ff7460db9f4cd984ac7862b5"}, + {file = "ijson-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04366e7e4a4078d410845e58a2987fd9c45e63df70773d7b6e87ceef771b51ee"}, + {file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de7c1ddb80fa7a3ab045266dca169004b93f284756ad198306533b792774f10a"}, + {file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8851584fb931cffc0caa395f6980525fd5116eab8f73ece9d95e6f9c2c326c4c"}, + {file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdcfc88347fd981e53c33d832ce4d3e981a0d696b712fbcb45dcc1a43fe65c65"}, + {file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3917b2b3d0dbbe3296505da52b3cb0befbaf76119b2edaff30bd448af20b5400"}, + {file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:e10c14535abc7ddf3fd024aa36563cd8ab5d2bb6234a5d22c77c30e30fa4fb2b"}, + {file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3aba5c4f97f4e2ce854b5591a8b0711ca3b0c64d1b253b04ea7b004b0a197ef6"}, + {file = "ijson-3.3.0-cp38-cp38-win32.whl", hash = "sha256:b325f42e26659df1a0de66fdb5cde8dd48613da9c99c07d04e9fb9e254b7ee1c"}, + {file = "ijson-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:ff835906f84451e143f31c4ce8ad73d83ef4476b944c2a2da91aec8b649570e1"}, + {file = "ijson-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3c556f5553368dff690c11d0a1fb435d4ff1f84382d904ccc2dc53beb27ba62e"}, + {file = "ijson-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e4396b55a364a03ff7e71a34828c3ed0c506814dd1f50e16ebed3fc447d5188e"}, + {file = "ijson-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6850ae33529d1e43791b30575070670070d5fe007c37f5d06aebc1dd152ab3f"}, + {file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36aa56d68ea8def26778eb21576ae13f27b4a47263a7a2581ab2ef58b8de4451"}, + {file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7ec759c4a0fc820ad5dc6a58e9c391e7b16edcb618056baedbedbb9ea3b1524"}, + {file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b51bab2c4e545dde93cb6d6bb34bf63300b7cd06716f195dd92d9255df728331"}, + {file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:92355f95a0e4da96d4c404aa3cff2ff033f9180a9515f813255e1526551298c1"}, + {file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8795e88adff5aa3c248c1edce932db003d37a623b5787669ccf205c422b91e4a"}, + {file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8f83f553f4cde6d3d4eaf58ec11c939c94a0ec545c5b287461cafb184f4b3a14"}, + {file = "ijson-3.3.0-cp39-cp39-win32.whl", hash = "sha256:ead50635fb56577c07eff3e557dac39533e0fe603000684eea2af3ed1ad8f941"}, + {file = "ijson-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:c8a9befb0c0369f0cf5c1b94178d0d78f66d9cebb9265b36be6e4f66236076b8"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2af323a8aec8a50fa9effa6d640691a30a9f8c4925bd5364a1ca97f1ac6b9b5c"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f64f01795119880023ba3ce43072283a393f0b90f52b66cc0ea1a89aa64a9ccb"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a716e05547a39b788deaf22725490855337fc36613288aa8ae1601dc8c525553"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:473f5d921fadc135d1ad698e2697025045cd8ed7e5e842258295012d8a3bc702"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd26b396bc3a1e85f4acebeadbf627fa6117b97f4c10b177d5779577c6607744"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:25fd49031cdf5fd5f1fd21cb45259a64dad30b67e64f745cc8926af1c8c243d3"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b72178b1e565d06ab19319965022b36ef41bcea7ea153b32ec31194bec032a2"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d0b6b637d05dbdb29d0bfac2ed8425bb369e7af5271b0cc7cf8b801cb7360c2"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5378d0baa59ae422905c5f182ea0fd74fe7e52a23e3821067a7d58c8306b2191"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:99f5c8ab048ee4233cc4f2b461b205cbe01194f6201018174ac269bf09995749"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:45ff05de889f3dc3d37a59d02096948ce470699f2368b32113954818b21aa74a"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1efb521090dd6cefa7aafd120581947b29af1713c902ff54336b7c7130f04c47"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c727691858fd3a1c085d9980d12395517fcbbf02c69fbb22dede8ee03422da"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0420c24e50389bc251b43c8ed379ab3e3ba065ac8262d98beb6735ab14844460"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8fdf3721a2aa7d96577970f5604bd81f426969c1822d467f07b3d844fa2fecc7"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:891f95c036df1bc95309951940f8eea8537f102fa65715cdc5aae20b8523813b"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed1336a2a6e5c427f419da0154e775834abcbc8ddd703004108121c6dd9eba9d"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0c819f83e4f7b7f7463b2dc10d626a8be0c85fbc7b3db0edc098c2b16ac968e"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33afc25057377a6a43c892de34d229a86f89ea6c4ca3dd3db0dcd17becae0dbb"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7914d0cf083471856e9bc2001102a20f08e82311dfc8cf1a91aa422f9414a0d6"}, + {file = "ijson-3.3.0.tar.gz", hash = "sha256:7f172e6ba1bee0d4c8f8ebd639577bfe429dee0f3f96775a067b8bae4492d8a0"}, ] [[package]] @@ -3994,13 +4016,13 @@ zookeeper = ["kazoo (>=2.8.0)"] [[package]] name = "kubernetes" -version = "29.0.0" +version = "30.1.0" description = "Kubernetes python client" optional = false python-versions = ">=3.6" files = [ - {file = "kubernetes-29.0.0-py2.py3-none-any.whl", hash = "sha256:ab8cb0e0576ccdfb71886366efb102c6a20f268d817be065ce7f9909c631e43e"}, - {file = "kubernetes-29.0.0.tar.gz", hash = "sha256:c4812e227ae74d07d53c88293e564e54b850452715a59a927e7e1bc6b9a60459"}, + {file = "kubernetes-30.1.0-py2.py3-none-any.whl", hash = "sha256:e212e8b7579031dd2e512168b617373bc1e03888d41ac4e04039240a292d478d"}, + {file = "kubernetes-30.1.0.tar.gz", hash = "sha256:41e4c77af9f28e7a6c314e3bd06a8c6229ddd787cad684e0ab9f69b498e98ebc"}, ] [package.dependencies] @@ -4020,13 +4042,13 @@ adal = ["adal (>=1.0.2)"] [[package]] name = "langchain" -version = "0.2.2" +version = "0.2.3" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain-0.2.2-py3-none-any.whl", hash = "sha256:58ca0c47bcdd156da66f50a0a4fcedc49bf6950827f4a6b06c8c4842d55805f3"}, - {file = "langchain-0.2.2.tar.gz", hash = "sha256:9d61e50e9cdc2bea659bc5e6c03650ba048fda63a307490ae368e539f61a0d3a"}, + {file = "langchain-0.2.3-py3-none-any.whl", hash = "sha256:5dc33cd9c8008693d328b7cb698df69073acecc89ad9c2a95f243b3314f8d834"}, + {file = "langchain-0.2.3.tar.gz", hash = "sha256:81962cc72cce6515f7bd71e01542727870789bf8b666c6913d85559080c1a201"}, ] [package.dependencies] @@ -4042,20 +4064,6 @@ requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] -clarifai = ["clarifai (>=9.1.0)"] -cli = ["typer (>=0.9.0,<0.10.0)"] -cohere = ["cohere (>=4,<6)"] -docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] -embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] -javascript = ["esprima (>=4.0.1,<5.0.0)"] -llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] -openai = ["openai (<2)", "tiktoken (>=0.7,<1.0)"] -qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] -text-helpers = ["chardet (>=5.1.0,<6.0.0)"] - [[package]] name = "langchain-anthropic" version = "0.1.15" @@ -4122,13 +4130,13 @@ langchain-core = ">=0.1.42,<0.3" [[package]] name = "langchain-community" -version = "0.2.2" +version = "0.2.4" description = "Community contributed LangChain integrations." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_community-0.2.2-py3-none-any.whl", hash = "sha256:470ee16e05f1acacb91a656b6d3c2cbf6fb6a8dcb00a13901cd1353cd29c2bb3"}, - {file = "langchain_community-0.2.2.tar.gz", hash = "sha256:fb09faf4640726a929932056dc55ff120e490aaf2e424fae8ddbb15605195447"}, + {file = "langchain_community-0.2.4-py3-none-any.whl", hash = "sha256:8582e9800f4837660dc297cccd2ee1ddc1d8c440d0fe8b64edb07620f0373b0e"}, + {file = "langchain_community-0.2.4.tar.gz", hash = "sha256:2bb6a1a36b8500a564d25d76469c02457b1a7c3afea6d4a609a47c06b993e3e4"}, ] [package.dependencies] @@ -4143,19 +4151,15 @@ requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -cli = ["typer (>=0.9.0,<0.10.0)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-search-documents (==11.4.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.6,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpathlib (>=0.18,<0.19)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "httpx-sse (>=0.4.0,<0.5.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "oracledb (>=2.2.0,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pyjwt (>=2.8.0,<3.0.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "simsimd (>=4.3.1,<5.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] - [[package]] name = "langchain-core" -version = "0.2.4" +version = "0.2.5" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_core-0.2.4-py3-none-any.whl", hash = "sha256:5212f7ec78a525e88a178ed3aefe2fd7134b03fb92573dfbab9914f1d92d6ec5"}, - {file = "langchain_core-0.2.4.tar.gz", hash = "sha256:82bdcc546eb0341cefcf1f4ecb3e49836fff003903afddda2d1312bb8491ef81"}, + {file = "langchain_core-0.2.5-py3-none-any.whl", hash = "sha256:abe5138f22acff23a079ec538be5268bbf97cf023d51987a0dd474d2a16cae3e"}, + {file = "langchain_core-0.2.5.tar.gz", hash = "sha256:4a5c2f56b22396a63ef4790043660e393adbfa6832b978f023ca996a04b8e752"}, ] [package.dependencies] @@ -4166,9 +4170,6 @@ pydantic = ">=1,<3" PyYAML = ">=5.3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -extended-testing = ["jinja2 (>=3,<4)"] - [[package]] name = "langchain-experimental" version = "0.0.60" @@ -4189,37 +4190,37 @@ extended-testing = ["faker (>=19.3.1,<20.0.0)", "jinja2 (>=3,<4)", "pandas (>=2. [[package]] name = "langchain-google-genai" -version = "1.0.5" +version = "1.0.6" description = "An integration package connecting Google's genai package and LangChain" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "langchain_google_genai-1.0.5-py3-none-any.whl", hash = "sha256:06b1af072e14fe2d4f9257be4bf883ccd544896094f847c2b1ab09b123ba3b9e"}, - {file = "langchain_google_genai-1.0.5.tar.gz", hash = "sha256:5b515192755fd396a1b61b33d1b08c77fb9b53394cc25954f9d7e9a0f615de9b"}, + {file = "langchain_google_genai-1.0.6-py3-none-any.whl", hash = "sha256:65188b3c2867efda78e09c29371499ab0d25c6a111b175365fdae2b5be1502e6"}, + {file = "langchain_google_genai-1.0.6.tar.gz", hash = "sha256:7c964117fa385c490b323ee50ab46907229823d3678b80bfacc8fa0a237fb0b9"}, ] [package.dependencies] google-generativeai = ">=0.5.2,<0.6.0" -langchain-core = ">=0.2.0,<0.3" +langchain-core = ">=0.2.2,<0.3" [package.extras] images = ["pillow (>=10.1.0,<11.0.0)"] [[package]] name = "langchain-google-vertexai" -version = "1.0.4" +version = "1.0.5" description = "An integration package connecting Google VertexAI and LangChain" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_google_vertexai-1.0.4-py3-none-any.whl", hash = "sha256:f9d217df2d5cfafb2e551ddd5f1c43611222f542ee0df0cc3b5faed82e657ee3"}, - {file = "langchain_google_vertexai-1.0.4.tar.gz", hash = "sha256:bb2d2e93cc2896b9bdc96789c2df247f6392184dffc0c3dddc06889f2b530465"}, + {file = "langchain_google_vertexai-1.0.5-py3-none-any.whl", hash = "sha256:38f4a39bf35927d744d0883907c4d4a59eef059e9b36f28bb5c737c2aae6963b"}, + {file = "langchain_google_vertexai-1.0.5.tar.gz", hash = "sha256:50005dc12ff9d66bbbab9e1ab660574b1584eee3e7b5a647dc8a009a94f0c500"}, ] [package.dependencies] google-cloud-aiplatform = ">=1.47.0,<2.0.0" google-cloud-storage = ">=2.14.0,<3.0.0" -langchain-core = ">=0.1.42,<0.3" +langchain-core = ">=0.2.2,<0.3" [package.extras] anthropic = ["anthropic[vertexai] (>=0.23.0,<1)"] @@ -4307,13 +4308,13 @@ extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"] [[package]] name = "langchainhub" -version = "0.1.17" +version = "0.1.18" description = "The LangChain Hub API client" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchainhub-0.1.17-py3-none-any.whl", hash = "sha256:4c609b3948252c71670f0d98f73413b515cfd2f6701a7b40ce959203e6133e04"}, - {file = "langchainhub-0.1.17.tar.gz", hash = "sha256:af7df0cb1cebc7a6e0864e8632ae48ecad39ed96568f699c78657b9d04e50b46"}, + {file = "langchainhub-0.1.18-py3-none-any.whl", hash = "sha256:11501f15e7f34715ecc8892587daa35c6f2a3005e1f2926c9bcabd31fc2c100c"}, + {file = "langchainhub-0.1.18.tar.gz", hash = "sha256:f2d0d8bf3abe4ca5e70511d8220bdc9ccea28d5267bcfd0e5ef9c53bd5bd3bad"}, ] [package.dependencies] @@ -4322,7 +4323,7 @@ types-requests = ">=2.31.0.2,<3.0.0.0" [[package]] name = "langflow-base" -version = "0.0.56" +version = "0.0.60" description = "A Python package with a built-in web application" optional = false python-versions = ">=3.10,<3.13" @@ -4379,13 +4380,13 @@ url = "src/backend/base" [[package]] name = "langfuse" -version = "2.33.1" +version = "2.35.2" description = "A client library for accessing langfuse" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langfuse-2.33.1-py3-none-any.whl", hash = "sha256:61ff3ff4b9c9c195028c981cba892106fdf90028e3950209a15f0ae06a378a36"}, - {file = "langfuse-2.33.1.tar.gz", hash = "sha256:444a870e8b13ad37df710931389ecd3bad9997e550edf3c3178b5a0bd7ada013"}, + {file = "langfuse-2.35.2-py3-none-any.whl", hash = "sha256:d01a23842cab484594f03878aacb9732ef8fd361158eb819c7bf43f758a0954b"}, + {file = "langfuse-2.35.2.tar.gz", hash = "sha256:32b2e6c5bc71b4efdc430c6b964ab1c1e1ba1e105a4a73912c38b3959dc4502d"}, ] [package.dependencies] @@ -4403,13 +4404,13 @@ openai = ["openai (>=0.27.8)"] [[package]] name = "langsmith" -version = "0.1.71" +version = "0.1.75" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.71-py3-none-any.whl", hash = "sha256:a9979de2780442eb24eced31314e49f5ece6f807a0d70740b2c6c39217226794"}, - {file = "langsmith-0.1.71.tar.gz", hash = "sha256:bdb1037a08acf7c19b3969c085df09c1eecb65baca8400b3b76ae871e2c8a97e"}, + {file = "langsmith-0.1.75-py3-none-any.whl", hash = "sha256:d08b08dd6b3fa4da170377f95123d77122ef4c52999d10fff4ae08ff70d07aed"}, + {file = "langsmith-0.1.75.tar.gz", hash = "sha256:61274e144ea94c297dd78ce03e6dfae18459fe9bd8ab5094d61a0c4816561279"}, ] [package.dependencies] @@ -4419,13 +4420,13 @@ requests = ">=2,<3" [[package]] name = "litellm" -version = "1.40.2" +version = "1.40.7" description = "Library to easily interface with LLM API providers" optional = false python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" files = [ - {file = "litellm-1.40.2-py3-none-any.whl", hash = "sha256:56ee777eed30ee9acb86e74401d090dcac4adb57b5c8a8714f791b0c97a34afc"}, - {file = "litellm-1.40.2.tar.gz", hash = "sha256:1f5dc4eab7100962c3a2985c7d8c13070ff5793b341540d19b98a2bd85955cb0"}, + {file = "litellm-1.40.7-py3-none-any.whl", hash = "sha256:c98dd8733e632aba16f14bf82e56f7159222097a6d085b242a3140b5d3e7baa4"}, + {file = "litellm-1.40.7.tar.gz", hash = "sha256:557bb19e8e484d0dfe8e4eaa9ccefc888617852988a46d6e7adc41585a2c0600"}, ] [package.dependencies] @@ -4467,13 +4468,13 @@ test = ["httpx (>=0.24.1)", "pytest (>=7.4.0)", "scipy (>=1.10)"] [[package]] name = "locust" -version = "2.28.0" +version = "2.29.0" description = "Developer-friendly load testing framework" optional = false python-versions = ">=3.9" files = [ - {file = "locust-2.28.0-py3-none-any.whl", hash = "sha256:766be879db030c0118e7d9fca712f3538c4e628bdebf59468fa1c6c2fab217d3"}, - {file = "locust-2.28.0.tar.gz", hash = "sha256:260557eec866f7e34a767b6c916b5b278167562a280480aadb88f43d962fbdeb"}, + {file = "locust-2.29.0-py3-none-any.whl", hash = "sha256:aa9d94d3604ed9f2aab3248460d91e55d3de980a821dffdf8658b439b049d03f"}, + {file = "locust-2.29.0.tar.gz", hash = "sha256:649c99ce49d00720a3084c0109547035ad9021222835386599a8b545d31ebe51"}, ] [package.dependencies] @@ -4487,7 +4488,10 @@ msgpack = ">=1.0.0" psutil = ">=5.9.1" pywin32 = {version = "*", markers = "platform_system == \"Windows\""} pyzmq = ">=25.0.0" -requests = ">=2.26.0" +requests = [ + {version = ">=2.32.2", markers = "python_version > \"3.11\""}, + {version = ">=2.26.0", markers = "python_version <= \"3.11\""}, +] tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} Werkzeug = ">=2.0.0" @@ -4796,13 +4800,13 @@ files = [ [[package]] name = "marshmallow" -version = "3.21.2" +version = "3.21.3" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ - {file = "marshmallow-3.21.2-py3-none-any.whl", hash = "sha256:70b54a6282f4704d12c0a41599682c5c5450e843b9ec406308653b47c59648a1"}, - {file = "marshmallow-3.21.2.tar.gz", hash = "sha256:82408deadd8b33d56338d2182d455db632c6313aa2af61916672146bb32edc56"}, + {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, + {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, ] [package.dependencies] @@ -5583,13 +5587,13 @@ sympy = "*" [[package]] name = "openai" -version = "1.31.0" +version = "1.33.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.31.0-py3-none-any.whl", hash = "sha256:82044ee3122113f2a468a1f308a8882324d09556ba5348687c535d3655ee331c"}, - {file = "openai-1.31.0.tar.gz", hash = "sha256:54ae0625b005d6a3b895db2b8438dae1059cffff0cd262a26e9015c13a29ab06"}, + {file = "openai-1.33.0-py3-none-any.whl", hash = "sha256:621163b56570897ab8389d187f686a53d4771fd6ce95d481c0a9611fe8bc4229"}, + {file = "openai-1.33.0.tar.gz", hash = "sha256:1169211a7b326ecbc821cafb427c29bfd0871f9a3e0947dd9e51acb3b0f1df78"}, ] [package.dependencies] @@ -5915,9 +5919,9 @@ files = [ [package.dependencies] numpy = [ + {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -6837,13 +6841,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pydantic-settings" -version = "2.3.0" +version = "2.3.1" description = "Settings management using Pydantic" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_settings-2.3.0-py3-none-any.whl", hash = "sha256:26eeed27370a9c5e3f64e4a7d6602573cbedf05ed940f1d5b11c3f178427af7a"}, - {file = "pydantic_settings-2.3.0.tar.gz", hash = "sha256:78db28855a71503cfe47f39500a1dece523c640afd5280edb5c5c9c9cfa534c9"}, + {file = "pydantic_settings-2.3.1-py3-none-any.whl", hash = "sha256:acb2c213140dfff9669f4fe9f8180d43914f51626db28ab2db7308a576cce51a"}, + {file = "pydantic_settings-2.3.1.tar.gz", hash = "sha256:e34bbd649803a6bb3e2f0f58fb0edff1f0c7f556849fda106cc21bcce12c30ab"}, ] [package.dependencies] @@ -7575,13 +7579,13 @@ websockets = ">=11,<13" [[package]] name = "redis" -version = "5.0.4" +version = "5.0.5" description = "Python client for Redis database and key-value store" optional = true python-versions = ">=3.7" files = [ - {file = "redis-5.0.4-py3-none-any.whl", hash = "sha256:7adc2835c7a9b5033b7ad8f8918d09b7344188228809c98df07af226d39dec91"}, - {file = "redis-5.0.4.tar.gz", hash = "sha256:ec31f2ed9675cc54c21ba854cfe0462e6faf1d83c8ce5944709db8a4700b9c61"}, + {file = "redis-5.0.5-py3-none-any.whl", hash = "sha256:30b47d4ebb6b7a0b9b40c1275a19b87bb6f46b3bed82a89012cf56dea4024ada"}, + {file = "redis-5.0.5.tar.gz", hash = "sha256:3417688621acf6ee368dec4a04dd95881be24efd34c79f00d31f62bb528800ae"}, ] [package.dependencies] @@ -7766,28 +7770,28 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.4.7" +version = "0.4.8" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.4.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:e089371c67892a73b6bb1525608e89a2aca1b77b5440acf7a71dda5dac958f9e"}, - {file = "ruff-0.4.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:10f973d521d910e5f9c72ab27e409e839089f955be8a4c8826601a6323a89753"}, - {file = "ruff-0.4.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59c3d110970001dfa494bcd95478e62286c751126dfb15c3c46e7915fc49694f"}, - {file = "ruff-0.4.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa9773c6c00f4958f73b317bc0fd125295110c3776089f6ef318f4b775f0abe4"}, - {file = "ruff-0.4.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07fc80bbb61e42b3b23b10fda6a2a0f5a067f810180a3760c5ef1b456c21b9db"}, - {file = "ruff-0.4.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:fa4dafe3fe66d90e2e2b63fa1591dd6e3f090ca2128daa0be33db894e6c18648"}, - {file = "ruff-0.4.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7c0083febdec17571455903b184a10026603a1de078428ba155e7ce9358c5f6"}, - {file = "ruff-0.4.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad1b20e66a44057c326168437d680a2166c177c939346b19c0d6b08a62a37589"}, - {file = "ruff-0.4.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbf5d818553add7511c38b05532d94a407f499d1a76ebb0cad0374e32bc67202"}, - {file = "ruff-0.4.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:50e9651578b629baec3d1513b2534de0ac7ed7753e1382272b8d609997e27e83"}, - {file = "ruff-0.4.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8874a9df7766cb956b218a0a239e0a5d23d9e843e4da1e113ae1d27ee420877a"}, - {file = "ruff-0.4.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:b9de9a6e49f7d529decd09381c0860c3f82fa0b0ea00ea78409b785d2308a567"}, - {file = "ruff-0.4.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:13a1768b0691619822ae6d446132dbdfd568b700ecd3652b20d4e8bc1e498f78"}, - {file = "ruff-0.4.7-py3-none-win32.whl", hash = "sha256:769e5a51df61e07e887b81e6f039e7ed3573316ab7dd9f635c5afaa310e4030e"}, - {file = "ruff-0.4.7-py3-none-win_amd64.whl", hash = "sha256:9e3ab684ad403a9ed1226894c32c3ab9c2e0718440f6f50c7c5829932bc9e054"}, - {file = "ruff-0.4.7-py3-none-win_arm64.whl", hash = "sha256:10f2204b9a613988e3484194c2c9e96a22079206b22b787605c255f130db5ed7"}, - {file = "ruff-0.4.7.tar.gz", hash = "sha256:2331d2b051dc77a289a653fcc6a42cce357087c5975738157cd966590b18b5e1"}, + {file = "ruff-0.4.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7663a6d78f6adb0eab270fa9cf1ff2d28618ca3a652b60f2a234d92b9ec89066"}, + {file = "ruff-0.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eeceb78da8afb6de0ddada93112869852d04f1cd0f6b80fe464fd4e35c330913"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aad360893e92486662ef3be0a339c5ca3c1b109e0134fcd37d534d4be9fb8de3"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:284c2e3f3396fb05f5f803c9fffb53ebbe09a3ebe7dda2929ed8d73ded736deb"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7354f921e3fbe04d2a62d46707e569f9315e1a613307f7311a935743c51a764"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:72584676164e15a68a15778fd1b17c28a519e7a0622161eb2debdcdabdc71883"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9678d5c9b43315f323af2233a04d747409d1e3aa6789620083a82d1066a35199"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704977a658131651a22b5ebeb28b717ef42ac6ee3b11e91dc87b633b5d83142b"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05f8d6f0c3cce5026cecd83b7a143dcad503045857bc49662f736437380ad45"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6ea874950daca5697309d976c9afba830d3bf0ed66887481d6bca1673fc5b66a"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fc95aac2943ddf360376be9aa3107c8cf9640083940a8c5bd824be692d2216dc"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:384154a1c3f4bf537bac69f33720957ee49ac8d484bfc91720cc94172026ceed"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e9d5ce97cacc99878aa0d084c626a15cd21e6b3d53fd6f9112b7fc485918e1fa"}, + {file = "ruff-0.4.8-py3-none-win32.whl", hash = "sha256:6d795d7639212c2dfd01991259460101c22aabf420d9b943f153ab9d9706e6a9"}, + {file = "ruff-0.4.8-py3-none-win_amd64.whl", hash = "sha256:e14a3a095d07560a9d6769a72f781d73259655919d9b396c650fc98a8157555d"}, + {file = "ruff-0.4.8-py3-none-win_arm64.whl", hash = "sha256:14019a06dbe29b608f6b7cbcec300e3170a8d86efaddb7b23405cb7f7dcaf780"}, + {file = "ruff-0.4.8.tar.gz", hash = "sha256:16d717b1d57b2e2fd68bd0bf80fb43931b79d05a7131aa477d66fc40fbd86268"}, ] [[package]] @@ -8313,13 +8317,13 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7 [[package]] name = "storage3" -version = "0.7.5" +version = "0.7.6" description = "Supabase Storage client for Python." optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "storage3-0.7.5-py3-none-any.whl", hash = "sha256:a2d9fdacafdcbcdb6776a54987a7d84c3e3195a5e4782955c4ccfb36cb021f14"}, - {file = "storage3-0.7.5.tar.gz", hash = "sha256:ffe43f3877898b43a94024e68c2aaf4cebb3ad73dbbbd67747041d1d70bbf032"}, + {file = "storage3-0.7.6-py3-none-any.whl", hash = "sha256:d8c23bf87b3a88cafb03761b7f936e4e49daca67741d571513edf746e0f8ba72"}, + {file = "storage3-0.7.6.tar.gz", hash = "sha256:0b7781cea7fe6382e6b9349b84395808c5f4203dfcac31478304eedc2f81acf6"}, ] [package.dependencies] @@ -8381,13 +8385,13 @@ supafunc = ">=0.3.1,<0.5.0" [[package]] name = "supafunc" -version = "0.4.5" +version = "0.4.6" description = "Library for Supabase Functions" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "supafunc-0.4.5-py3-none-any.whl", hash = "sha256:2208045f8f5c797924666f6a332efad75ad368f8030b2e4ceb9d2bf63f329373"}, - {file = "supafunc-0.4.5.tar.gz", hash = "sha256:a6466d78bdcaa58b7f0303793643103baae8106a87acd5d01e196179a9d0d024"}, + {file = "supafunc-0.4.6-py3-none-any.whl", hash = "sha256:f7ca7b244365e171da7055a64edb462c2ec449cdaa210fc418cfccd132f4cf98"}, + {file = "supafunc-0.4.6.tar.gz", hash = "sha256:92db51f8f8568d1430285219c9c0072e44207409c416622d7387f609e31928a6"}, ] [package.dependencies] @@ -8653,31 +8657,31 @@ files = [ [[package]] name = "torch" -version = "2.3.0" +version = "2.3.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = true python-versions = ">=3.8.0" files = [ - {file = "torch-2.3.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:d8ea5a465dbfd8501f33c937d1f693176c9aef9d1c1b0ca1d44ed7b0a18c52ac"}, - {file = "torch-2.3.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:09c81c5859a5b819956c6925a405ef1cdda393c9d8a01ce3851453f699d3358c"}, - {file = "torch-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:1bf023aa20902586f614f7682fedfa463e773e26c58820b74158a72470259459"}, - {file = "torch-2.3.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:758ef938de87a2653bba74b91f703458c15569f1562bf4b6c63c62d9c5a0c1f5"}, - {file = "torch-2.3.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:493d54ee2f9df100b5ce1d18c96dbb8d14908721f76351e908c9d2622773a788"}, - {file = "torch-2.3.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:bce43af735c3da16cc14c7de2be7ad038e2fbf75654c2e274e575c6c05772ace"}, - {file = "torch-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:729804e97b7cf19ae9ab4181f91f5e612af07956f35c8b2c8e9d9f3596a8e877"}, - {file = "torch-2.3.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:d24e328226d8e2af7cf80fcb1d2f1d108e0de32777fab4aaa2b37b9765d8be73"}, - {file = "torch-2.3.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:b0de2bdc0486ea7b14fc47ff805172df44e421a7318b7c4d92ef589a75d27410"}, - {file = "torch-2.3.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a306c87a3eead1ed47457822c01dfbd459fe2920f2d38cbdf90de18f23f72542"}, - {file = "torch-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:f9b98bf1a3c8af2d4c41f0bf1433920900896c446d1ddc128290ff146d1eb4bd"}, - {file = "torch-2.3.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:dca986214267b34065a79000cee54232e62b41dff1ec2cab9abc3fc8b3dee0ad"}, - {file = "torch-2.3.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:20572f426965dd8a04e92a473d7e445fa579e09943cc0354f3e6fef6130ce061"}, - {file = "torch-2.3.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:e65ba85ae292909cde0dde6369826d51165a3fc8823dc1854cd9432d7f79b932"}, - {file = "torch-2.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:5515503a193781fd1b3f5c474e89c9dfa2faaa782b2795cc4a7ab7e67de923f6"}, - {file = "torch-2.3.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:6ae9f64b09516baa4ef890af0672dc981c20b1f0d829ce115d4420a247e88fba"}, - {file = "torch-2.3.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:cd0dc498b961ab19cb3f8dbf0c6c50e244f2f37dbfa05754ab44ea057c944ef9"}, - {file = "torch-2.3.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:e05f836559251e4096f3786ee99f4a8cbe67bc7fbedba8ad5e799681e47c5e80"}, - {file = "torch-2.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:4fb27b35dbb32303c2927da86e27b54a92209ddfb7234afb1949ea2b3effffea"}, - {file = "torch-2.3.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:760f8bedff506ce9e6e103498f9b1e9e15809e008368594c3a66bf74a8a51380"}, + {file = "torch-2.3.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:605a25b23944be5ab7c3467e843580e1d888b8066e5aaf17ff7bf9cc30001cc3"}, + {file = "torch-2.3.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f2357eb0965583a0954d6f9ad005bba0091f956aef879822274b1bcdb11bd308"}, + {file = "torch-2.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:32b05fe0d1ada7f69c9f86c14ff69b0ef1957a5a54199bacba63d22d8fab720b"}, + {file = "torch-2.3.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:7c09a94362778428484bcf995f6004b04952106aee0ef45ff0b4bab484f5498d"}, + {file = "torch-2.3.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:b2ec81b61bb094ea4a9dee1cd3f7b76a44555375719ad29f05c0ca8ef596ad39"}, + {file = "torch-2.3.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:490cc3d917d1fe0bd027057dfe9941dc1d6d8e3cae76140f5dd9a7e5bc7130ab"}, + {file = "torch-2.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:5802530783bd465fe66c2df99123c9a54be06da118fbd785a25ab0a88123758a"}, + {file = "torch-2.3.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:a7dd4ed388ad1f3d502bf09453d5fe596c7b121de7e0cfaca1e2017782e9bbac"}, + {file = "torch-2.3.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:a486c0b1976a118805fc7c9641d02df7afbb0c21e6b555d3bb985c9f9601b61a"}, + {file = "torch-2.3.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:224259821fe3e4c6f7edf1528e4fe4ac779c77addaa74215eb0b63a5c474d66c"}, + {file = "torch-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:e5fdccbf6f1334b2203a61a0e03821d5845f1421defe311dabeae2fc8fbeac2d"}, + {file = "torch-2.3.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:3c333dc2ebc189561514eda06e81df22bf8fb64e2384746b2cb9f04f96d1d4c8"}, + {file = "torch-2.3.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:07e9ba746832b8d069cacb45f312cadd8ad02b81ea527ec9766c0e7404bb3feb"}, + {file = "torch-2.3.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:462d1c07dbf6bb5d9d2f3316fee73a24f3d12cd8dacf681ad46ef6418f7f6626"}, + {file = "torch-2.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:ff60bf7ce3de1d43ad3f6969983f321a31f0a45df3690921720bcad6a8596cc4"}, + {file = "torch-2.3.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:bee0bd33dc58aa8fc8a7527876e9b9a0e812ad08122054a5bff2ce5abf005b10"}, + {file = "torch-2.3.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:aaa872abde9a3d4f91580f6396d54888620f4a0b92e3976a6034759df4b961ad"}, + {file = "torch-2.3.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:3d7a7f7ef21a7520510553dc3938b0c57c116a7daee20736a9e25cbc0e832bdc"}, + {file = "torch-2.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:4777f6cefa0c2b5fa87223c213e7b6f417cf254a45e5829be4ccd1b2a4ee1011"}, + {file = "torch-2.3.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:2bb5af780c55be68fe100feb0528d2edebace1d55cb2e351de735809ba7391eb"}, ] [package.dependencies] @@ -8698,7 +8702,7 @@ nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \" nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} sympy = "*" -triton = {version = "2.3.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\""} +triton = {version = "2.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\""} typing-extensions = ">=4.8.0" [package.extras] @@ -8707,22 +8711,22 @@ optree = ["optree (>=0.9.1)"] [[package]] name = "tornado" -version = "6.4" +version = "6.4.1" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = false -python-versions = ">= 3.8" +python-versions = ">=3.8" files = [ - {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"}, - {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"}, - {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"}, - {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"}, - {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"}, - {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"}, - {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"}, - {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"}, - {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"}, - {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"}, - {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"}, + {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, + {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"}, + {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"}, + {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"}, + {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"}, ] [[package]] @@ -8830,17 +8834,17 @@ vision = ["Pillow (>=10.0.1,<=15.0)"] [[package]] name = "triton" -version = "2.3.0" +version = "2.3.1" description = "A language and compiler for custom Deep Learning operations" optional = true python-versions = "*" files = [ - {file = "triton-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ce4b8ff70c48e47274c66f269cce8861cf1dc347ceeb7a67414ca151b1822d8"}, - {file = "triton-2.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c3d9607f85103afdb279938fc1dd2a66e4f5999a58eb48a346bd42738f986dd"}, - {file = "triton-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:218d742e67480d9581bafb73ed598416cc8a56f6316152e5562ee65e33de01c0"}, - {file = "triton-2.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:381ec6b3dac06922d3e4099cfc943ef032893b25415de295e82b1a82b0359d2c"}, - {file = "triton-2.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:038e06a09c06a164fef9c48de3af1e13a63dc1ba3c792871e61a8e79720ea440"}, - {file = "triton-2.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d8f636e0341ac348899a47a057c3daea99ea7db31528a225a3ba4ded28ccc65"}, + {file = "triton-2.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c84595cbe5e546b1b290d2a58b1494df5a2ef066dd890655e5b8a8a92205c33"}, + {file = "triton-2.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9d64ae33bcb3a7a18081e3a746e8cf87ca8623ca13d2c362413ce7a486f893e"}, + {file = "triton-2.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaf80e8761a9e3498aa92e7bf83a085b31959c61f5e8ac14eedd018df6fccd10"}, + {file = "triton-2.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b13bf35a2b659af7159bf78e92798dc62d877aa991de723937329e2d382f1991"}, + {file = "triton-2.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63381e35ded3304704ea867ffde3b7cfc42c16a55b3062d41e017ef510433d66"}, + {file = "triton-2.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d968264523c7a07911c8fb51b4e0d1b920204dae71491b1fe7b01b62a31e124"}, ] [package.dependencies] @@ -9041,13 +9045,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.12.1" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.12.1-py3-none-any.whl", hash = "sha256:6024b58b69089e5a89c347397254e35f1bf02a907728ec7fee9bf0fe837d203a"}, - {file = "typing_extensions-4.12.1.tar.gz", hash = "sha256:915f5e35ff76f56588223f15fdd5938f9a1cf9195c0de25130c627e4d597f6d1"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] @@ -10054,4 +10058,4 @@ local = ["ctransformers", "llama-cpp-python", "sentence-transformers"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "83c94ed0fa28b968553221385251b871139a7440ab0420f867efbe16568b8411" +content-hash = "0ee3f3bef82d57be2ab4ae7b70215ebca67b5bd5223e6a9322ee1837516a3cc6" diff --git a/pyproject.toml b/pyproject.toml index fb1fc5e3d..cf11dc07d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langflow" -version = "1.0.0a45" +version = "1.0.0a49" description = "A Python package with a built-in web application" authors = ["Langflow "] maintainers = [ @@ -66,7 +66,7 @@ qianfan = "0.3.5" pgvector = "^0.2.3" pyautogen = "^0.2.0" langchain-google-genai = "^1.0.1" -langchain-cohere = "^0.1.0rc1" +langchain-cohere = "^0.1.5" elasticsearch = "^8.12.0" pytube = "^15.0.0" dspy-ai = "^2.4.0" @@ -115,6 +115,7 @@ pytest-asyncio = "^0.23.0" pytest-profiling = "^1.7.0" pre-commit = "^3.7.0" vulture = "^2.11" +dictdiffer = "^0.9.0" [tool.poetry.extras] deploy = ["celery", "redis", "flower"] diff --git a/scripts/factory_restart_space.py b/scripts/factory_restart_space.py index e9972e8cb..07a25d0de 100644 --- a/scripts/factory_restart_space.py +++ b/scripts/factory_restart_space.py @@ -1,4 +1,4 @@ -import os +import argparse from huggingface_hub import HfApi, list_models from rich import print @@ -6,11 +6,27 @@ from rich import print # Use root method models = list_models() +args = argparse.ArgumentParser(description="Restart a space in the Hugging Face Hub.") +args.add_argument("--space", type=str, help="The space to restart.") +args.add_argument("--token", type=str, help="The Hugging Face API token.") + +parsed_args = args.parse_args() + +space = parsed_args.space + +if not space: + print("Please provide a space to restart.") + exit() + +if not parsed_args.token: + print("Please provide an API token.") + exit() + # Or configure a HfApi client hf_api = HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. - token=os.getenv("HUGGINFACE_API_TOKEN"), + token=parsed_args.token, ) -space_runtime = hf_api.restart_space("Langflow/Langflow-Preview", factory_reboot=True) +space_runtime = hf_api.restart_space(space, factory_reboot=True) print(space_runtime) diff --git a/src/backend/base/langflow/api/utils.py b/src/backend/base/langflow/api/utils.py index cc38b474a..1dbd68d8f 100644 --- a/src/backend/base/langflow/api/utils.py +++ b/src/backend/base/langflow/api/utils.py @@ -86,6 +86,10 @@ def update_frontend_node_with_template_values(frontend_node, raw_frontend_node): update_template_values(frontend_node["template"], raw_frontend_node["template"]) + old_code = raw_frontend_node["template"]["code"]["value"] + new_code = frontend_node["template"]["code"]["value"] + frontend_node["edited"] = old_code != new_code + return frontend_node @@ -204,16 +208,18 @@ def format_elapsed_time(elapsed_time: float) -> str: return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}" -async def build_and_cache_graph_from_db( - flow_id: str, - session: Session, - chat_service: "ChatService", -): +async def build_and_cache_graph_from_db(flow_id: str, session: Session, chat_service: "ChatService"): """Build and cache the graph.""" flow: Optional[Flow] = session.get(Flow, flow_id) if not flow or not flow.data: raise ValueError("Invalid flow ID") graph = Graph.from_payload(flow.data, flow_id) + for vertex_id in graph._has_session_id_vertices: + vertex = graph.get_vertex(vertex_id) + if vertex is None: + raise ValueError(f"Vertex {vertex_id} not found") + if not vertex._raw_params.get("session_id"): + vertex.update_raw_params({"session_id": flow_id}) await chat_service.set_cache(flow_id, graph) return graph @@ -317,3 +323,4 @@ def parse_exception(exc): if hasattr(exc, "body"): return exc.body["message"] return str(exc) + return str(exc) diff --git a/src/backend/base/langflow/api/v1/chat.py b/src/backend/base/langflow/api/v1/chat.py index fbb763e8d..004517575 100644 --- a/src/backend/base/langflow/api/v1/chat.py +++ b/src/backend/base/langflow/api/v1/chat.py @@ -22,6 +22,7 @@ from langflow.api.v1.schemas import ( VertexBuildResponse, VerticesOrderResponse, ) +from langflow.schema.schema import Log from langflow.services.auth.utils import get_current_active_user from langflow.services.chat.service import ChatService from langflow.services.deps import get_chat_service, get_session, get_session_service @@ -123,6 +124,7 @@ async def build_vertex( vertex_id: str, background_tasks: BackgroundTasks, inputs: Annotated[Optional[InputValueRequest], Body(embed=True)] = None, + files: Optional[list[str]] = None, chat_service: "ChatService" = Depends(get_chat_service), current_user=Depends(get_current_active_user), ): @@ -159,6 +161,7 @@ async def build_vertex( else: graph = cache.get("result") vertex = graph.get_vertex(vertex_id) + try: lock = chat_service._cache_locks[flow_id_str] ( @@ -175,19 +178,25 @@ async def build_vertex( vertex_id=vertex_id, user_id=current_user.id, inputs_dict=inputs.model_dump() if inputs else {}, + files=files, ) + log_obj = Log(message=vertex.artifacts_raw, type=vertex.artifacts_type) result_data_response = ResultDataResponse(**result_dict.model_dump()) except Exception as exc: logger.exception(f"Error building vertex: {exc}") params = format_exception_message(exc) valid = False + log_obj = Log(message=params, type="error") result_data_response = ResultDataResponse(results={}) artifacts = {} # If there's an error building the vertex # we need to clear the cache await chat_service.clear_cache(flow_id_str) + result_data_response.message = artifacts + result_data_response.logs.append(log_obj) + # Log the vertex build if not vertex.will_stream: background_tasks.add_task( diff --git a/src/backend/base/langflow/api/v1/files.py b/src/backend/base/langflow/api/v1/files.py index bbe97f81a..f6293686f 100644 --- a/src/backend/base/langflow/api/v1/files.py +++ b/src/backend/base/langflow/api/v1/files.py @@ -2,6 +2,7 @@ import hashlib from http import HTTPStatus from io import BytesIO from uuid import UUID +from pathlib import Path from fastapi import APIRouter, Depends, HTTPException, UploadFile from fastapi.responses import StreamingResponse @@ -99,6 +100,47 @@ async def download_image(file_name: str, flow_id: UUID, storage_service: Storage raise HTTPException(status_code=500, detail=str(e)) +@router.get("/profile_pictures/{folder_name}/{file_name}") +async def download_profile_picture( + folder_name: str, + file_name: str, + storage_service: StorageService = Depends(get_storage_service), +): + try: + extension = file_name.split(".")[-1] + config_dir = get_storage_service().settings_service.settings.config_dir + config_path = Path(config_dir) + folder_path = config_path / 'profile_pictures' / folder_name + content_type = build_content_type_from_extension(extension) + file_content = await storage_service.get_file(flow_id=folder_path, file_name=file_name) + return StreamingResponse(BytesIO(file_content), media_type=content_type) + + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get("/profile_pictures/list") +async def list_profile_pictures(storage_service: StorageService = Depends(get_storage_service)): + try: + config_dir = get_storage_service().settings_service.settings.config_dir + config_path = Path(config_dir) + + people_path = config_path / "profile_pictures/People" + space_path = config_path / "profile_pictures/Space" + + people = await storage_service.list_files(flow_id=people_path) + space = await storage_service.list_files(flow_id=space_path) + + files = [Path("People") / i for i in people] + files += [Path("Space") / i for i in space] + + return {"files": files} + + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + + @router.get("/list/{flow_id}") async def list_files( flow_id: UUID = Depends(get_flow_id), storage_service: StorageService = Depends(get_storage_service) diff --git a/src/backend/base/langflow/api/v1/flows.py b/src/backend/base/langflow/api/v1/flows.py index 36030a12d..c1ccf68db 100644 --- a/src/backend/base/langflow/api/v1/flows.py +++ b/src/backend/base/langflow/api/v1/flows.py @@ -9,7 +9,7 @@ from loguru import logger from sqlmodel import Session, col, select from langflow.api.utils import remove_api_keys, validate_is_component -from langflow.api.v1.schemas import FlowListCreate, FlowListIds, FlowListRead +from langflow.api.v1.schemas import FlowListCreate, FlowListRead from langflow.initial_setup.setup import STARTER_FOLDER_NAME from langflow.services.auth.utils import get_current_active_user from langflow.services.database.models.flow import Flow, FlowCreate, FlowRead, FlowUpdate @@ -258,9 +258,9 @@ async def download_file( return FlowListRead(flows=flows) -@router.post("/multiple_delete/") +@router.delete("/") async def delete_multiple_flows( - flow_ids: FlowListIds, user: User = Depends(get_current_active_user), db: Session = Depends(get_session) + flow_ids: List[UUID], user: User = Depends(get_current_active_user), db: Session = Depends(get_session) ): """ Delete multiple flows by their IDs. @@ -274,9 +274,7 @@ async def delete_multiple_flows( """ try: - deleted_flows = db.exec( - select(Flow).where(col(Flow.id).in_(flow_ids.flow_ids)).where(Flow.user_id == user.id) - ).all() + deleted_flows = db.exec(select(Flow).where(col(Flow.id).in_(flow_ids)).where(Flow.user_id == user.id)).all() for flow in deleted_flows: db.delete(flow) db.commit() diff --git a/src/backend/base/langflow/api/v1/folders.py b/src/backend/base/langflow/api/v1/folders.py index 7402881c7..d55f9bd15 100644 --- a/src/backend/base/langflow/api/v1/folders.py +++ b/src/backend/base/langflow/api/v1/folders.py @@ -1,5 +1,7 @@ from typing import List +from langflow.helpers.flow import generate_unique_flow_name +from langflow.helpers.folders import generate_unique_folder_name import orjson from fastapi import APIRouter, Depends, File, HTTPException, Response, UploadFile, status from sqlalchemy import or_, update @@ -203,16 +205,9 @@ async def upload_file( if not data: raise HTTPException(status_code=400, detail="No flows found in the file") - folder_results = session.exec( - select(Folder).where( - Folder.name == data["folder_name"], - Folder.user_id == current_user.id, - ) - ) - existing_folder_names = [folder.name for folder in folder_results] + folder_name = generate_unique_folder_name(data["folder_name"], current_user.id, session) - if existing_folder_names: - data["folder_name"] = f"{data['folder_name']} ({len(existing_folder_names) + 1})" + data["folder_name"] = folder_name folder = FolderCreate(name=data["folder_name"], description=data["folder_description"]) @@ -232,6 +227,8 @@ async def upload_file( raise HTTPException(status_code=400, detail="No flows found in the data") # Now we set the user_id for all flows for flow in flow_list.flows: + flow_name = generate_unique_flow_name(flow.name, current_user.id, session) + flow.name = flow_name flow.user_id = current_user.id flow.folder_id = new_folder.id diff --git a/src/backend/base/langflow/api/v1/login.py b/src/backend/base/langflow/api/v1/login.py index cde6bd28b..2637cc865 100644 --- a/src/backend/base/langflow/api/v1/login.py +++ b/src/backend/base/langflow/api/v1/login.py @@ -71,9 +71,7 @@ async def login_to_get_access_token( @router.get("/auto_login") async def auto_login( - response: Response, - db: Session = Depends(get_session), - settings_service=Depends(get_settings_service) + response: Response, db: Session = Depends(get_session), settings_service=Depends(get_settings_service) ): auth_settings = settings_service.auth_settings if settings_service.auth_settings.AUTO_LOGIN: diff --git a/src/backend/base/langflow/api/v1/monitor.py b/src/backend/base/langflow/api/v1/monitor.py index 05fee6f03..9714e4592 100644 --- a/src/backend/base/langflow/api/v1/monitor.py +++ b/src/backend/base/langflow/api/v1/monitor.py @@ -4,6 +4,7 @@ from fastapi import APIRouter, Depends, HTTPException, Query from langflow.services.deps import get_monitor_service from langflow.services.monitor.schema import ( + MessageModelRequest, MessageModelResponse, TransactionModelResponse, VertexBuildMapModel, @@ -66,6 +67,44 @@ async def get_messages( raise HTTPException(status_code=500, detail=str(e)) +@router.delete("/messages", status_code=204) +async def delete_messages( + message_ids: List[int], + monitor_service: MonitorService = Depends(get_monitor_service), +): + try: + monitor_service.delete_messages(message_ids=message_ids) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@router.post("/messages/{message_id}", response_model=MessageModelResponse) +async def update_message( + message_id: int, + message: MessageModelRequest, + monitor_service: MonitorService = Depends(get_monitor_service), +): + try: + message_dict = message.model_dump(exclude_none=True) + message_dict.pop("index", None) + monitor_service.update_message(message_id=message_id, **message_dict) + return MessageModelResponse(index=message_id, **message_dict) + + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@router.delete("/messages/session/{session_id}", status_code=204) +async def delete_messages_session( + session_id: str, + monitor_service: MonitorService = Depends(get_monitor_service), +): + try: + monitor_service.delete_messages_session(session_id=session_id) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + @router.get("/transactions", response_model=List[TransactionModelResponse]) async def get_transactions( source: Optional[str] = Query(None), @@ -79,6 +118,22 @@ async def get_transactions( dicts = monitor_service.get_transactions( source=source, target=target, status=status, order_by=order_by, flow_id=flow_id ) - return [TransactionModelResponse(**d) for d in dicts] + result = [] + for d in dicts: + d = TransactionModelResponse( + index=d["index"], + timestamp=d["timestamp"], + vertex_id=d["vertex_id"], + inputs=d["inputs"], + outputs=d["outputs"], + status=d["status"], + error=d["error"], + flow_id=d["flow_id"], + source=d["vertex_id"], + target=d["target_id"], + ) + result.append(d) + return result except Exception as e: raise HTTPException(status_code=500, detail=str(e)) + raise HTTPException(status_code=500, detail=str(e)) diff --git a/src/backend/base/langflow/api/v1/schemas.py b/src/backend/base/langflow/api/v1/schemas.py index 9ccdb0085..1e0308bd5 100644 --- a/src/backend/base/langflow/api/v1/schemas.py +++ b/src/backend/base/langflow/api/v1/schemas.py @@ -9,7 +9,7 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator, model_serial from langflow.graph.schema import RunOutputs from langflow.schema import dotdict from langflow.schema.graph import Tweaks -from langflow.schema.schema import InputType, OutputType +from langflow.schema.schema import InputType, Log, OutputType from langflow.services.database.models.api_key.model import ApiKeyRead from langflow.services.database.models.base import orjson_dumps from langflow.services.database.models.flow import FlowCreate, FlowRead @@ -245,6 +245,8 @@ class VerticesOrderResponse(BaseModel): class ResultDataResponse(BaseModel): results: Optional[Any] = Field(default_factory=dict) + logs: List[Log | None] = Field(default_factory=list) + message: Optional[Any] = Field(default_factory=dict) artifacts: Optional[Any] = Field(default_factory=dict) timedelta: Optional[float] = None duration: Optional[str] = None diff --git a/src/backend/base/langflow/base/agents/agent.py b/src/backend/base/langflow/base/agents/agent.py index ce40f1f51..d4328032d 100644 --- a/src/backend/base/langflow/base/agents/agent.py +++ b/src/backend/base/langflow/base/agents/agent.py @@ -7,7 +7,7 @@ from langchain_core.runnables import Runnable from langflow.base.agents.utils import get_agents_list, records_to_messages from langflow.custom import CustomComponent from langflow.field_typing import Text, Tool -from langflow.schema.schema import Record +from langflow.schema import Record class LCAgentComponent(CustomComponent): diff --git a/src/backend/base/langflow/base/agents/utils.py b/src/backend/base/langflow/base/agents/utils.py index cb34d1cea..781fa2362 100644 --- a/src/backend/base/langflow/base/agents/utils.py +++ b/src/backend/base/langflow/base/agents/utils.py @@ -13,7 +13,7 @@ from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate from langchain_core.tools import BaseTool from pydantic import BaseModel -from langflow.schema.schema import Record +from langflow.schema import Record from .default_prompts import XML_AGENT_PROMPT diff --git a/src/backend/base/langflow/base/constants.py b/src/backend/base/langflow/base/constants.py index 498b46f65..cb520a835 100644 --- a/src/backend/base/langflow/base/constants.py +++ b/src/backend/base/langflow/base/constants.py @@ -7,9 +7,11 @@ Constants: - FIELD_FORMAT_ATTRIBUTES: A list of attributes used for formatting fields. """ +import orjson + STREAM_INFO_TEXT = "Stream the response from the model. Streaming works only in Chat." -NODE_FORMAT_ATTRIBUTES = ["beta", "icon", "display_name", "description"] +NODE_FORMAT_ATTRIBUTES = ["beta", "icon", "display_name", "description", "output_types"] FIELD_FORMAT_ATTRIBUTES = [ @@ -27,3 +29,5 @@ FIELD_FORMAT_ATTRIBUTES = [ "refresh_button_text", "options", ] + +ORJSON_OPTIONS = orjson.OPT_INDENT_2 | orjson.OPT_SORT_KEYS | orjson.OPT_OMIT_MICROSECONDS diff --git a/src/backend/base/langflow/base/curl/parse.py b/src/backend/base/langflow/base/curl/parse.py index c86638306..c3c2d31ce 100644 --- a/src/backend/base/langflow/base/curl/parse.py +++ b/src/backend/base/langflow/base/curl/parse.py @@ -15,10 +15,25 @@ import shlex from collections import OrderedDict, namedtuple from http.cookies import SimpleCookie -from uncurl.api import parser # type: ignore - -parser.add_argument("-x", "--proxy", default={}) -parser.add_argument("-U", "--proxy-user", default="") +ParsedArgs = namedtuple( + "ParsedArgs", + [ + "command", + "url", + "data", + "data_binary", + "method", + "headers", + "compressed", + "insecure", + "user", + "include", + "silent", + "proxy", + "proxy_user", + "cookies", + ], +) ParsedContext = namedtuple("ParsedContext", ["method", "url", "data", "headers", "cookies", "verify", "auth", "proxy"]) @@ -27,24 +42,90 @@ def normalize_newlines(multiline_text): return multiline_text.replace(" \\\n", " ") +def parse_curl_command(curl_command): + tokens = shlex.split(normalize_newlines(curl_command)) + tokens = [token for token in tokens if token and token != " "] + if "curl" not in tokens[0]: + raise ValueError("Invalid curl command") + args_template = { + "command": None, + "url": None, + "data": None, + "data_binary": None, + "method": "get", + "headers": [], + "compressed": False, + "insecure": False, + "user": (), + "include": False, + "silent": False, + "proxy": None, + "proxy_user": None, + "cookies": {}, + } + args = args_template.copy() + method_on_curl = None + i = 0 + while i < len(tokens): + token = tokens[i] + if token == "-X": + i += 1 + args["method"] = tokens[i].lower() + method_on_curl = tokens[i].lower() + elif token in ("-d", "--data"): + i += 1 + args["data"] = tokens[i] + elif token in ("-b", "--data-binary", "--data-raw"): + i += 1 + args["data_binary"] = tokens[i] + elif token in ("-H", "--header"): + i += 1 + args["headers"].append(tokens[i]) + elif token == "--compressed": + args["compressed"] = True + elif token in ("-k", "--insecure"): + args["insecure"] = True + elif token in ("-u", "--user"): + i += 1 + args["user"] = tuple(tokens[i].split(":")) + elif token in ("-I", "--include"): + args["include"] = True + elif token in ("-s", "--silent"): + args["silent"] = True + elif token in ("-x", "--proxy"): + i += 1 + args["proxy"] = tokens[i] + elif token in ("-U", "--proxy-user"): + i += 1 + args["proxy_user"] = tokens[i] + elif not token.startswith("-"): + if args["command"] is None: + args["command"] = token + else: + args["url"] = token + i += 1 + + args["method"] = method_on_curl or args["method"] + + return ParsedArgs(**args) + + def parse_context(curl_command): method = "get" - tokens = shlex.split(normalize_newlines(curl_command)) - tokens = [token for token in tokens if token and token != " "] - parsed_args = parser.parse_args(tokens) + parsed_args: ParsedArgs = parse_curl_command(curl_command) post_data = parsed_args.data or parsed_args.data_binary if post_data: method = "post" - if parsed_args.X: - method = parsed_args.X.lower() + if parsed_args.method: + method = parsed_args.method.lower() cookie_dict = OrderedDict() quoted_headers = OrderedDict() - for curl_header in parsed_args.header: + for curl_header in parsed_args.headers: if curl_header.startswith(":"): occurrence = [m.start() for m in re.finditer(":", curl_header)] header_key, header_value = curl_header[: occurrence[1]], curl_header[occurrence[1] + 1 :] diff --git a/src/backend/base/langflow/base/data/utils.py b/src/backend/base/langflow/base/data/utils.py index 2aaf3b23d..3779e8065 100644 --- a/src/backend/base/langflow/base/data/utils.py +++ b/src/backend/base/langflow/base/data/utils.py @@ -1,12 +1,14 @@ -import json +import unicodedata import xml.etree.ElementTree as ET from concurrent import futures from pathlib import Path from typing import Callable, List, Optional, Text + import chardet +import orjson import yaml -from langflow.schema.schema import Record +from langflow.schema import Record # Types of files that can be read simply by file.read() # and have 100% to be completely readable @@ -31,6 +33,17 @@ TEXT_FILE_TYPES = [ "tsx", ] +IMG_FILE_TYPES = [ + "jpg", + "jpeg", + "png", + "bmp", +] + + +def normalize_text(text): + return unicodedata.normalize("NFKD", text) + def is_hidden(path: Path) -> bool: return path.name.startswith(".") @@ -92,7 +105,10 @@ def read_text_file(file_path: str) -> str: with open(file_path, "rb") as f: raw_data = f.read() result = chardet.detect(raw_data) - encoding = result['encoding'] + encoding = result["encoding"] + + if encoding in ["Windows-1252", "Windows-1254"]: + encoding = "utf-8" with open(file_path, "r", encoding=encoding) as f: return f.read() @@ -121,9 +137,15 @@ def parse_text_file_to_record(file_path: str, silent_errors: bool) -> Optional[R text = read_docx_file(file_path) else: text = read_text_file(file_path) + # if file is json, yaml, or xml, we can parse it if file_path.endswith(".json"): - text = json.loads(text) + text = orjson.loads(text) + if isinstance(text, dict): + text = {k: normalize_text(v) if isinstance(v, str) else v for k, v in text.items()} + elif isinstance(text, list): + text = [normalize_text(item) if isinstance(item, str) else item for item in text] + elif file_path.endswith(".yaml") or file_path.endswith(".yml"): text = yaml.safe_load(text) elif file_path.endswith(".xml"): diff --git a/src/backend/base/langflow/base/flow_processing/utils.py b/src/backend/base/langflow/base/flow_processing/utils.py index 4e121f128..1f756a1db 100644 --- a/src/backend/base/langflow/base/flow_processing/utils.py +++ b/src/backend/base/langflow/base/flow_processing/utils.py @@ -1,7 +1,7 @@ from typing import List from langflow.graph.schema import ResultData, RunOutputs -from langflow.schema.schema import Record +from langflow.schema import Record def build_records_from_run_outputs(run_outputs: RunOutputs) -> List[Record]: diff --git a/src/backend/base/langflow/base/io/chat.py b/src/backend/base/langflow/base/io/chat.py index 6089f19ea..fd16c1988 100644 --- a/src/backend/base/langflow/base/io/chat.py +++ b/src/backend/base/langflow/base/io/chat.py @@ -1,10 +1,10 @@ from typing import Optional, Union +from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES from langflow.custom import CustomComponent -from langflow.field_typing import Text -from langflow.helpers.record import records_to_text from langflow.memory import store_message from langflow.schema import Record +from langflow.schema.message import Message class ChatComponent(CustomComponent): @@ -15,7 +15,7 @@ class ChatComponent(CustomComponent): return { "input_value": { "input_types": ["Text"], - "display_name": "Message", + "display_name": "Text", "multiline": True, }, "sender": { @@ -40,98 +40,45 @@ class ChatComponent(CustomComponent): "info": "In case of Message being a Record, this template will be used to convert it to text.", "advanced": True, }, + "files": { + "field_type": "file", + "display_name": "Files", + "file_types": TEXT_FILE_TYPES + IMG_FILE_TYPES, + "info": "Files to be sent with the message.", + "advanced": True, + }, } def store_message( self, - message: Union[str, Text, Record], - session_id: Optional[str] = None, - sender: Optional[str] = None, - sender_name: Optional[str] = None, - ) -> list[Record]: - records = store_message( + message: Message, + ) -> list[Message]: + messages = store_message( message, - session_id=session_id, - sender=sender, - sender_name=sender_name, flow_id=self.graph.flow_id, ) - self.status = records - return records + self.status = messages + return messages def build_with_record( self, sender: Optional[str] = "User", sender_name: Optional[str] = "User", - input_value: Optional[Union[str, Record]] = None, + input_value: Optional[Union[str, Record, Message]] = None, + files: Optional[list[str]] = None, session_id: Optional[str] = None, - return_record: Optional[bool] = False, - record_template: str = "Text: {text}\nData: {data}", - ) -> Union[Text, Record]: - input_value_record: Optional[Record] = None - if return_record: - if isinstance(input_value, Record): - # Update the data of the record - input_value.data["sender"] = sender - input_value.data["sender_name"] = sender_name - input_value.data["session_id"] = session_id - else: - input_value_record = Record( - text=input_value, - data={ - "sender": sender, - "sender_name": sender_name, - "session_id": session_id, - }, - ) - elif isinstance(input_value, Record): - input_value = records_to_text(template=record_template, records=input_value) - if not input_value: - input_value = "" - if return_record and input_value_record: - result: Union[Text, Record] = input_value_record - else: - result = input_value - self.status = result - if session_id and isinstance(result, (Record, str)): - self.store_message(result, session_id, sender, sender_name) - return result + ) -> Message: + message: Message | None = None - def build_no_record( - self, - sender: Optional[str] = "User", - sender_name: Optional[str] = "User", - input_value: Optional[str] = None, - session_id: Optional[str] = None, - return_record: Optional[bool] = False, - record_template: str = "Text: {text}\nData: {data}", - ) -> Union[Text, Record]: - input_value_record: Optional[Record] = None - if return_record: - if isinstance(input_value, Record): - # Update the data of the record - input_value.data["sender"] = sender - input_value.data["sender_name"] = sender_name - input_value.data["session_id"] = session_id - else: - input_value_record = Record( - text=input_value, - data={ - "sender": sender, - "sender_name": sender_name, - "session_id": session_id, - }, - ) - elif isinstance(input_value, Record): - input_value = records_to_text(template=record_template, records=input_value) - if not input_value: - input_value = "" - if return_record and input_value_record: - result: Union[Text, Record] = input_value_record + if isinstance(input_value, Record): + # Update the data of the record + message = Message.from_record(input_value) else: - result = input_value - self.status = result - if session_id and isinstance(result, (Record, str)): - self.store_message(result, session_id, sender, sender_name) - return result + message = Message( + text=input_value, sender=sender, sender_name=sender_name, files=files, session_id=session_id + ) + self.status = message + if session_id and isinstance(message, Message): + self.store_message(message) + return message diff --git a/src/backend/base/langflow/base/io/text.py b/src/backend/base/langflow/base/io/text.py index 5ecfea11a..5b6ece996 100644 --- a/src/backend/base/langflow/base/io/text.py +++ b/src/backend/base/langflow/base/io/text.py @@ -3,7 +3,7 @@ from typing import Optional from langflow.custom import CustomComponent from langflow.field_typing import Text from langflow.helpers.record import records_to_text -from langflow.schema.schema import Record +from langflow.schema import Record class TextComponent(CustomComponent): diff --git a/src/backend/base/langflow/base/memory/memory.py b/src/backend/base/langflow/base/memory/memory.py index 0fb8cf209..fe372a96b 100644 --- a/src/backend/base/langflow/base/memory/memory.py +++ b/src/backend/base/langflow/base/memory/memory.py @@ -1,7 +1,7 @@ from typing import Optional from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record class BaseMemoryComponent(CustomComponent): diff --git a/src/backend/base/langflow/base/models/model.py b/src/backend/base/langflow/base/models/model.py index b38d275f9..74d350446 100644 --- a/src/backend/base/langflow/base/models/model.py +++ b/src/backend/base/langflow/base/models/model.py @@ -1,3 +1,4 @@ +import warnings from typing import Optional, Union from langchain_core.language_models.chat_models import BaseChatModel @@ -5,6 +6,7 @@ from langchain_core.language_models.llms import LLM from langchain_core.messages import AIMessage, HumanMessage, SystemMessage from langflow.custom import CustomComponent +from langflow.field_typing.prompt import Prompt class LCModelComponent(CustomComponent): @@ -53,19 +55,28 @@ class LCModelComponent(CustomComponent): key in response_metadata["token_usage"] for key in inner_openai_keys ): token_usage = response_metadata["token_usage"] - completion_tokens = token_usage["completion_tokens"] - prompt_tokens = token_usage["prompt_tokens"] - total_tokens = token_usage["total_tokens"] - finish_reason = response_metadata["finish_reason"] - status_message = f"Tokens:\nInput: {prompt_tokens}\nOutput: {completion_tokens}\nTotal Tokens: {total_tokens}\nStop Reason: {finish_reason}\nResponse: {content}" + status_message = { + "tokens": { + "input": token_usage["prompt_tokens"], + "output": token_usage["completion_tokens"], + "total": token_usage["total_tokens"], + "stop_reason": response_metadata["finish_reason"], + "response": content, + } + } + elif all(key in response_metadata for key in anthropic_keys) and all( key in response_metadata["usage"] for key in inner_anthropic_keys ): usage = response_metadata["usage"] - input_tokens = usage["input_tokens"] - output_tokens = usage["output_tokens"] - stop_reason = response_metadata["stop_reason"] - status_message = f"Tokens:\nInput: {input_tokens}\nOutput: {output_tokens}\nStop Reason: {stop_reason}\nResponse: {content}" + status_message = { + "tokens": { + "input": usage["input_tokens"], + "output": usage["output_tokens"], + "stop_reason": response_metadata["stop_reason"], + "response": content, + } + } else: status_message = f"Response: {content}" else: @@ -73,7 +84,7 @@ class LCModelComponent(CustomComponent): return status_message def get_chat_result( - self, runnable: BaseChatModel, stream: bool, input_value: str, system_message: Optional[str] = None + self, runnable: BaseChatModel, stream: bool, input_value: str | Prompt, system_message: Optional[str] = None ): messages: list[Union[HumanMessage, SystemMessage]] = [] if not input_value and not system_message: @@ -81,11 +92,21 @@ class LCModelComponent(CustomComponent): if system_message: messages.append(SystemMessage(content=system_message)) if input_value: - messages.append(HumanMessage(content=input_value)) + if isinstance(input_value, Prompt): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + if "prompt" in input_value: + prompt = input_value.load_lc_prompt() + runnable = prompt | runnable + else: + messages.append(input_value.to_lc_message()) + else: + messages.append(HumanMessage(content=input_value)) + inputs = messages or {} if stream: - return runnable.stream(messages) + return runnable.stream(inputs) else: - message = runnable.invoke(messages) + message = runnable.invoke(inputs) result = message.content if isinstance(message, AIMessage): status_message = self.build_status_message(message) diff --git a/src/backend/base/langflow/base/prompts/utils.py b/src/backend/base/langflow/base/prompts/utils.py index 2270035af..0fa62ea3b 100644 --- a/src/backend/base/langflow/base/prompts/utils.py +++ b/src/backend/base/langflow/base/prompts/utils.py @@ -1,9 +1,9 @@ from copy import deepcopy - from langchain_core.documents import Document from langflow.schema import Record +from langflow.schema.message import Message def record_to_string(record: Record) -> str: @@ -35,10 +35,14 @@ def dict_values_to_string(d: dict) -> dict: # it could be a list of records or documents or strings if isinstance(value, list): for i, item in enumerate(value): - if isinstance(item, Record): + if isinstance(item, Message): + d_copy[key][i] = item.text + elif isinstance(item, Record): d_copy[key][i] = record_to_string(item) elif isinstance(item, Document): d_copy[key][i] = document_to_string(item) + elif isinstance(value, Message): + d_copy[key] = value.text elif isinstance(value, Record): d_copy[key] = record_to_string(value) elif isinstance(value, Document): diff --git a/src/backend/base/langflow/components/agents/ToolCallingAgent.py b/src/backend/base/langflow/components/agents/ToolCallingAgent.py index b4a319e2f..91fcb1132 100644 --- a/src/backend/base/langflow/components/agents/ToolCallingAgent.py +++ b/src/backend/base/langflow/components/agents/ToolCallingAgent.py @@ -5,7 +5,7 @@ from langchain_core.prompts import ChatPromptTemplate from langflow.base.agents.agent import LCAgentComponent from langflow.field_typing import BaseLanguageModel, Text, Tool -from langflow.schema.schema import Record +from langflow.schema import Record class ToolCallingAgentComponent(LCAgentComponent): diff --git a/src/backend/base/langflow/components/agents/XMLAgent.py b/src/backend/base/langflow/components/agents/XMLAgent.py index 76f96da53..47f823ba4 100644 --- a/src/backend/base/langflow/components/agents/XMLAgent.py +++ b/src/backend/base/langflow/components/agents/XMLAgent.py @@ -3,10 +3,9 @@ from typing import List, Optional from langchain.agents import create_xml_agent from langchain_core.prompts import ChatPromptTemplate - from langflow.base.agents.agent import LCAgentComponent from langflow.field_typing import BaseLanguageModel, Text, Tool -from langflow.schema.schema import Record +from langflow.schema import Record class XMLAgentComponent(LCAgentComponent): diff --git a/src/backend/base/langflow/components/chains/RetrievalQA.py b/src/backend/base/langflow/components/chains/RetrievalQA.py index da77f89d4..ca9910279 100644 --- a/src/backend/base/langflow/components/chains/RetrievalQA.py +++ b/src/backend/base/langflow/components/chains/RetrievalQA.py @@ -5,7 +5,7 @@ from langchain_core.documents import Document from langflow.custom import CustomComponent from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever, Text -from langflow.schema.schema import Record +from langflow.schema import Record class RetrievalQAComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/data/Webhook.py b/src/backend/base/langflow/components/data/Webhook.py index cf82e07d2..a1989cd49 100644 --- a/src/backend/base/langflow/components/data/Webhook.py +++ b/src/backend/base/langflow/components/data/Webhook.py @@ -3,8 +3,8 @@ import uuid from typing import Any, Optional from langflow.custom import CustomComponent +from langflow.schema import Record from langflow.schema.dotdict import dotdict -from langflow.schema.schema import Record class WebhookComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/experimental/AgentComponent.py b/src/backend/base/langflow/components/experimental/AgentComponent.py index 9a6840a41..abd8826d4 100644 --- a/src/backend/base/langflow/components/experimental/AgentComponent.py +++ b/src/backend/base/langflow/components/experimental/AgentComponent.py @@ -6,8 +6,8 @@ from langchain_core.prompts.chat import HumanMessagePromptTemplate, SystemMessag from langflow.base.agents.agent import LCAgentComponent from langflow.base.agents.utils import AGENTS, AgentSpec, get_agents_list from langflow.field_typing import BaseLanguageModel, Text, Tool +from langflow.schema import Record from langflow.schema.dotdict import dotdict -from langflow.schema.schema import Record class AgentComponent(LCAgentComponent): diff --git a/src/backend/base/langflow/components/experimental/FlowTool.py b/src/backend/base/langflow/components/experimental/FlowTool.py index fa81f6351..eaebb0c6e 100644 --- a/src/backend/base/langflow/components/experimental/FlowTool.py +++ b/src/backend/base/langflow/components/experimental/FlowTool.py @@ -7,8 +7,8 @@ from langflow.custom import CustomComponent from langflow.field_typing import Tool from langflow.graph.graph.base import Graph from langflow.helpers.flow import get_flow_inputs +from langflow.schema import Record from langflow.schema.dotdict import dotdict -from langflow.schema.schema import Record class FlowToolComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/experimental/StoreMessage.py b/src/backend/base/langflow/components/experimental/StoreMessage.py index 761646188..19be36068 100644 --- a/src/backend/base/langflow/components/experimental/StoreMessage.py +++ b/src/backend/base/langflow/components/experimental/StoreMessage.py @@ -2,7 +2,7 @@ from typing import List, Optional from langflow.custom import CustomComponent from langflow.memory import get_messages, store_message -from langflow.schema import Record +from langflow.schema.message import Message class StoreMessageComponent(CustomComponent): @@ -31,12 +31,11 @@ class StoreMessageComponent(CustomComponent): sender_name: Optional[str] = None, session_id: Optional[str] = None, message: str = "", - ) -> List[Record]: + ) -> List[Message]: store_message( - sender=sender, - sender_name=sender_name, - session_id=session_id, - message=message, + message=Message( + text=message, sender=sender, sender_name=sender_name, flow_id=self.graph.flow_id, session_id=session_id + ) ) self.status = get_messages(session_id=session_id) diff --git a/src/backend/base/langflow/components/helpers/MemoryComponent.py b/src/backend/base/langflow/components/helpers/MemoryComponent.py index 6d19bfd59..96e82da1e 100644 --- a/src/backend/base/langflow/components/helpers/MemoryComponent.py +++ b/src/backend/base/langflow/components/helpers/MemoryComponent.py @@ -2,9 +2,9 @@ from typing import Optional from langflow.base.memory.memory import BaseMemoryComponent from langflow.field_typing import Text -from langflow.helpers.record import records_to_text +from langflow.helpers.record import messages_to_text from langflow.memory import get_messages -from langflow.schema.schema import Record +from langflow.schema.message import Message class MemoryComponent(BaseMemoryComponent): @@ -43,7 +43,7 @@ class MemoryComponent(BaseMemoryComponent): }, } - def get_messages(self, **kwargs) -> list[Record]: + def get_messages(self, **kwargs) -> list[Message]: # Validate kwargs by checking if it contains the correct keys if "sender" not in kwargs: kwargs["sender"] = None @@ -77,6 +77,6 @@ class MemoryComponent(BaseMemoryComponent): limit=n_messages, order=order, ) - messages_str = records_to_text(template=record_template or "", records=messages) + messages_str = messages_to_text(template=record_template or "", messages=messages) self.status = messages_str return messages_str diff --git a/src/backend/base/langflow/components/inputs/ChatInput.py b/src/backend/base/langflow/components/inputs/ChatInput.py index 40203851f..150d5f2af 100644 --- a/src/backend/base/langflow/components/inputs/ChatInput.py +++ b/src/backend/base/langflow/components/inputs/ChatInput.py @@ -1,8 +1,7 @@ -from typing import Optional, Union +from typing import Optional from langflow.base.io.chat import ChatComponent -from langflow.field_typing import Text -from langflow.schema import Record +from langflow.schema.message import Message class ChatInput(ChatComponent): @@ -14,7 +13,7 @@ class ChatInput(ChatComponent): build_config = super().build_config() build_config["input_value"] = { "input_types": [], - "display_name": "Message", + "display_name": "Text", "multiline": True, } @@ -25,13 +24,13 @@ class ChatInput(ChatComponent): sender: Optional[str] = "User", sender_name: Optional[str] = "User", input_value: Optional[str] = None, + files: Optional[list[str]] = None, session_id: Optional[str] = None, - return_record: Optional[bool] = False, - ) -> Union[Text, Record]: - return super().build_no_record( + ) -> Message: + return super().build_with_record( sender=sender, sender_name=sender_name, input_value=input_value, + files=files, session_id=session_id, - return_record=return_record, ) diff --git a/src/backend/base/langflow/components/inputs/Prompt.py b/src/backend/base/langflow/components/inputs/Prompt.py index 2c76e6132..e65d27576 100644 --- a/src/backend/base/langflow/components/inputs/Prompt.py +++ b/src/backend/base/langflow/components/inputs/Prompt.py @@ -1,7 +1,6 @@ -from langchain_core.prompts import PromptTemplate - from langflow.custom import CustomComponent -from langflow.field_typing import Prompt, TemplateField, Text +from langflow.field_typing import TemplateField +from langflow.field_typing.prompt import Prompt class PromptComponent(CustomComponent): @@ -15,19 +14,11 @@ class PromptComponent(CustomComponent): "code": TemplateField(advanced=True), } - def build( + async def build( self, template: Prompt, **kwargs, - ) -> Text: - from langflow.base.prompts.utils import dict_values_to_string - - prompt_template = PromptTemplate.from_template(Text(template)) - kwargs = dict_values_to_string(kwargs) - kwargs = {k: "\n".join(v) if isinstance(v, list) else v for k, v in kwargs.items()} - try: - formated_prompt = prompt_template.format(**kwargs) - except Exception as exc: - raise ValueError(f"Error formatting prompt: {exc}") from exc - self.status = f'Prompt:\n"{formated_prompt}"' - return formated_prompt + ) -> Prompt: + prompt = await Prompt.from_template_and_variables(template, kwargs) + self.status = prompt.format_text() + return prompt diff --git a/src/backend/base/langflow/components/inputs/TextInput.py b/src/backend/base/langflow/components/inputs/TextInput.py index b2317678e..4edf21723 100644 --- a/src/backend/base/langflow/components/inputs/TextInput.py +++ b/src/backend/base/langflow/components/inputs/TextInput.py @@ -12,7 +12,7 @@ class TextInput(TextComponent): def build_config(self): return { "input_value": { - "display_name": "Value", + "display_name": "Text", "input_types": ["Record", "Text"], "info": "Text or Record to be passed as input.", }, diff --git a/src/backend/base/langflow/components/langchain_utilities/SearchApi.py b/src/backend/base/langflow/components/langchain_utilities/SearchApi.py index 3dcd48d9f..3e6721fd6 100644 --- a/src/backend/base/langflow/components/langchain_utilities/SearchApi.py +++ b/src/backend/base/langflow/components/langchain_utilities/SearchApi.py @@ -3,7 +3,7 @@ from typing import Optional from langchain_community.utilities.searchapi import SearchApiAPIWrapper from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record from langflow.services.database.models.base import orjson_dumps diff --git a/src/backend/base/langflow/components/memories/AstraDBMessageReader.py b/src/backend/base/langflow/components/memories/AstraDBMessageReader.py index bbb732f16..f2e93d19d 100644 --- a/src/backend/base/langflow/components/memories/AstraDBMessageReader.py +++ b/src/backend/base/langflow/components/memories/AstraDBMessageReader.py @@ -4,7 +4,7 @@ from langchain_astradb.chat_message_histories import AstraDBChatMessageHistory from langflow.base.memory.memory import BaseMemoryComponent from langflow.field_typing import Text -from langflow.schema.schema import Record +from langflow.schema import Record class AstraDBMessageReaderComponent(BaseMemoryComponent): diff --git a/src/backend/base/langflow/components/memories/AstraDBMessageWriter.py b/src/backend/base/langflow/components/memories/AstraDBMessageWriter.py index 265f60cf4..a95c7a15c 100644 --- a/src/backend/base/langflow/components/memories/AstraDBMessageWriter.py +++ b/src/backend/base/langflow/components/memories/AstraDBMessageWriter.py @@ -1,11 +1,11 @@ from typing import Optional +from langchain_astradb import AstraDBChatMessageHistory +from langchain_core.messages import BaseMessage + from langflow.base.memory.memory import BaseMemoryComponent from langflow.field_typing import Text -from langflow.schema.schema import Record - -from langchain_core.messages import BaseMessage -from langchain_astradb import AstraDBChatMessageHistory +from langflow.schema import Record class AstraDBMessageWriterComponent(BaseMemoryComponent): diff --git a/src/backend/base/langflow/components/memories/ZepMessageReader.py b/src/backend/base/langflow/components/memories/ZepMessageReader.py index 75b27091f..feef017a6 100644 --- a/src/backend/base/langflow/components/memories/ZepMessageReader.py +++ b/src/backend/base/langflow/components/memories/ZepMessageReader.py @@ -4,7 +4,7 @@ from langchain_community.chat_message_histories.zep import SearchScope, SearchTy from langflow.base.memory.memory import BaseMemoryComponent from langflow.field_typing import Text -from langflow.schema.schema import Record +from langflow.schema import Record class ZepMessageReaderComponent(BaseMemoryComponent): diff --git a/src/backend/base/langflow/components/memories/ZepMessageWriter.py b/src/backend/base/langflow/components/memories/ZepMessageWriter.py index b062f66bf..c3d55a721 100644 --- a/src/backend/base/langflow/components/memories/ZepMessageWriter.py +++ b/src/backend/base/langflow/components/memories/ZepMessageWriter.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Optional from langflow.base.memory.memory import BaseMemoryComponent from langflow.field_typing import Text -from langflow.schema.schema import Record +from langflow.schema import Record if TYPE_CHECKING: from zep_python.langchain import ZepChatMessageHistory diff --git a/src/backend/base/langflow/components/models/AmazonBedrockModel.py b/src/backend/base/langflow/components/models/AmazonBedrockModel.py index 1015f1684..99229deb2 100644 --- a/src/backend/base/langflow/components/models/AmazonBedrockModel.py +++ b/src/backend/base/langflow/components/models/AmazonBedrockModel.py @@ -58,7 +58,7 @@ class AmazonBedrockComponent(LCModelComponent): "advanced": True, }, "cache": {"display_name": "Cache"}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "system_message": { "display_name": "System Message", "info": "System message to pass to the model.", diff --git a/src/backend/base/langflow/components/models/AnthropicModel.py b/src/backend/base/langflow/components/models/AnthropicModel.py index cfe9ed900..bac7708d4 100644 --- a/src/backend/base/langflow/components/models/AnthropicModel.py +++ b/src/backend/base/langflow/components/models/AnthropicModel.py @@ -63,7 +63,7 @@ class AnthropicLLM(LCModelComponent): "info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.", }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "stream": { "display_name": "Stream", "advanced": True, diff --git a/src/backend/base/langflow/components/models/AzureOpenAIModel.py b/src/backend/base/langflow/components/models/AzureOpenAIModel.py index c296a8fae..97ee88920 100644 --- a/src/backend/base/langflow/components/models/AzureOpenAIModel.py +++ b/src/backend/base/langflow/components/models/AzureOpenAIModel.py @@ -78,7 +78,7 @@ class AzureChatOpenAIComponent(LCModelComponent): "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py index f5e6497d0..aaae3112f 100644 --- a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py +++ b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py @@ -81,7 +81,7 @@ class QianfanChatEndpointComponent(LCModelComponent): "info": "Endpoint of the Qianfan LLM, required if custom model used.", }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py index 054b59d12..aa3cf6976 100644 --- a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py +++ b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py @@ -111,7 +111,7 @@ class ChatLiteLLMModelComponent(LCModelComponent): "required": False, "default": False, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/CohereModel.py b/src/backend/base/langflow/components/models/CohereModel.py index 3bd12c095..b5ecbab9f 100644 --- a/src/backend/base/langflow/components/models/CohereModel.py +++ b/src/backend/base/langflow/components/models/CohereModel.py @@ -1,10 +1,11 @@ from typing import Optional +from langchain_cohere import ChatCohere from pydantic.v1 import SecretStr -from langflow.field_typing import Text + from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langchain_cohere import ChatCohere +from langflow.field_typing import Text class CohereComponent(LCModelComponent): @@ -42,7 +43,7 @@ class CohereComponent(LCModelComponent): "type": "float", "show": True, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, @@ -69,3 +70,4 @@ class CohereComponent(LCModelComponent): temperature=temperature, ) return self.get_chat_result(output, stream, input_value, system_message) + return self.get_chat_result(output, stream, input_value, system_message) diff --git a/src/backend/base/langflow/components/models/HuggingFaceModel.py b/src/backend/base/langflow/components/models/HuggingFaceModel.py index 19750ef9f..949598b2d 100644 --- a/src/backend/base/langflow/components/models/HuggingFaceModel.py +++ b/src/backend/base/langflow/components/models/HuggingFaceModel.py @@ -2,9 +2,10 @@ from typing import Optional from langchain_community.chat_models.huggingface import ChatHuggingFace from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint -from langflow.field_typing import Text + from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent +from langflow.field_typing import Text class HuggingFaceEndpointsComponent(LCModelComponent): @@ -36,7 +37,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent): "advanced": True, }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, @@ -72,3 +73,4 @@ class HuggingFaceEndpointsComponent(LCModelComponent): raise ValueError("Could not connect to HuggingFace Endpoints API.") from e output = ChatHuggingFace(llm=llm) return self.get_chat_result(output, stream, input_value, system_message) + return self.get_chat_result(output, stream, input_value, system_message) diff --git a/src/backend/base/langflow/components/models/MistralModel.py b/src/backend/base/langflow/components/models/MistralModel.py index 305a45e4b..75937e70d 100644 --- a/src/backend/base/langflow/components/models/MistralModel.py +++ b/src/backend/base/langflow/components/models/MistralModel.py @@ -27,7 +27,7 @@ class MistralAIModelComponent(LCModelComponent): def build_config(self): return { - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "max_tokens": { "display_name": "Max Tokens", "advanced": True, diff --git a/src/backend/base/langflow/components/models/OllamaModel.py b/src/backend/base/langflow/components/models/OllamaModel.py index f591e4a5c..cca2a0f48 100644 --- a/src/backend/base/langflow/components/models/OllamaModel.py +++ b/src/backend/base/langflow/components/models/OllamaModel.py @@ -194,7 +194,7 @@ class ChatOllamaComponent(LCModelComponent): "info": "Template to use for generating text.", "advanced": True, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/OpenAIModel.py b/src/backend/base/langflow/components/models/OpenAIModel.py index 0aedce495..329b0357f 100644 --- a/src/backend/base/langflow/components/models/OpenAIModel.py +++ b/src/backend/base/langflow/components/models/OpenAIModel.py @@ -28,7 +28,7 @@ class OpenAIModelComponent(LCModelComponent): def build_config(self): return { - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "max_tokens": { "display_name": "Max Tokens", "advanced": True, @@ -79,7 +79,7 @@ class OpenAIModelComponent(LCModelComponent): input_value: Text, openai_api_key: str, temperature: float = 0.1, - model_name: str = "gpt-4o", + model_name: str = "gpt-3.5-turbo", max_tokens: Optional[int] = 256, model_kwargs: NestedDict = {}, openai_api_base: Optional[str] = None, diff --git a/src/backend/base/langflow/components/models/VertexAiModel.py b/src/backend/base/langflow/components/models/VertexAiModel.py index a992447f4..33bbbbc46 100644 --- a/src/backend/base/langflow/components/models/VertexAiModel.py +++ b/src/backend/base/langflow/components/models/VertexAiModel.py @@ -1,6 +1,5 @@ from typing import Optional - from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import Text @@ -74,7 +73,7 @@ class ChatVertexAIComponent(LCModelComponent): "value": False, "advanced": True, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/outputs/ChatOutput.py b/src/backend/base/langflow/components/outputs/ChatOutput.py index 7994c9ded..9a118f37e 100644 --- a/src/backend/base/langflow/components/outputs/ChatOutput.py +++ b/src/backend/base/langflow/components/outputs/ChatOutput.py @@ -1,8 +1,7 @@ -from typing import Optional, Union +from typing import Optional from langflow.base.io.chat import ChatComponent -from langflow.field_typing import Text -from langflow.schema import Record +from langflow.schema.message import Message class ChatOutput(ChatComponent): @@ -16,14 +15,12 @@ class ChatOutput(ChatComponent): sender_name: Optional[str] = "AI", input_value: Optional[str] = None, session_id: Optional[str] = None, - return_record: Optional[bool] = False, - record_template: Optional[str] = "{text}", - ) -> Union[Text, Record]: + files: Optional[list[str]] = None, + ) -> Message: return super().build_with_record( sender=sender, sender_name=sender_name, input_value=input_value, session_id=session_id, - return_record=return_record, - record_template=record_template or "", + files=files, ) diff --git a/src/backend/base/langflow/components/outputs/RecordsOutput.py b/src/backend/base/langflow/components/outputs/RecordsOutput.py index 25eae862e..c750e675b 100644 --- a/src/backend/base/langflow/components/outputs/RecordsOutput.py +++ b/src/backend/base/langflow/components/outputs/RecordsOutput.py @@ -2,9 +2,18 @@ from langflow.custom import CustomComponent from langflow.schema import Record -class RecordsOutput(CustomComponent): +class RecordOutput(CustomComponent): display_name = "Records Output" description = "Display Records as a Table" + def build_config(self): + return { + "input_value": { + "display_name": "Records", + "input_types": ["Record"], + "info": "Record or Record list to be passed as input.", + }, + } + def build(self, input_value: Record) -> Record: return input_value diff --git a/src/backend/base/langflow/components/outputs/TextOutput.py b/src/backend/base/langflow/components/outputs/TextOutput.py index 0d55621b2..9096b7a4d 100644 --- a/src/backend/base/langflow/components/outputs/TextOutput.py +++ b/src/backend/base/langflow/components/outputs/TextOutput.py @@ -12,7 +12,7 @@ class TextOutput(TextComponent): def build_config(self): return { "input_value": { - "display_name": "Value", + "display_name": "Text", "input_types": ["Record", "Text"], "info": "Text or Record to be passed as output.", }, diff --git a/src/backend/base/langflow/components/textsplitters/CharacterTextSplitter.py b/src/backend/base/langflow/components/textsplitters/CharacterTextSplitter.py index ee340ab26..9f60d7c88 100644 --- a/src/backend/base/langflow/components/textsplitters/CharacterTextSplitter.py +++ b/src/backend/base/langflow/components/textsplitters/CharacterTextSplitter.py @@ -3,7 +3,7 @@ from typing import List from langchain_text_splitters import CharacterTextSplitter from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record from langflow.utils.util import unescape_string diff --git a/src/backend/base/langflow/components/textsplitters/LanguageRecursiveTextSplitter.py b/src/backend/base/langflow/components/textsplitters/LanguageRecursiveTextSplitter.py index 7ef7d5c24..a43fdcd72 100644 --- a/src/backend/base/langflow/components/textsplitters/LanguageRecursiveTextSplitter.py +++ b/src/backend/base/langflow/components/textsplitters/LanguageRecursiveTextSplitter.py @@ -3,7 +3,7 @@ from typing import List, Optional from langchain_text_splitters import Language, RecursiveCharacterTextSplitter from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record class LanguageRecursiveTextSplitterComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/tools/SearchApi.py b/src/backend/base/langflow/components/tools/SearchApi.py index 3dcd48d9f..3e6721fd6 100644 --- a/src/backend/base/langflow/components/tools/SearchApi.py +++ b/src/backend/base/langflow/components/tools/SearchApi.py @@ -3,7 +3,7 @@ from typing import Optional from langchain_community.utilities.searchapi import SearchApiAPIWrapper from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record from langflow.services.database.models.base import orjson_dumps diff --git a/src/backend/base/langflow/components/vectorsearch/RedisSearch.py b/src/backend/base/langflow/components/vectorsearch/RedisSearch.py index afe653f6e..75aba7f8a 100644 --- a/src/backend/base/langflow/components/vectorsearch/RedisSearch.py +++ b/src/backend/base/langflow/components/vectorsearch/RedisSearch.py @@ -1,10 +1,11 @@ from typing import List, Optional +from langchain_core.embeddings import Embeddings + from langflow.components.vectorstores.base.model import LCVectorStoreComponent from langflow.components.vectorstores.Redis import RedisComponent from langflow.field_typing import Text from langflow.schema import Record -from langchain_core.embeddings import Embeddings class RedisSearchComponent(RedisComponent, LCVectorStoreComponent): diff --git a/src/backend/base/langflow/components/vectorsearch/WeaviateSearch.py b/src/backend/base/langflow/components/vectorsearch/WeaviateSearch.py index b51f65a55..b70dfa41d 100644 --- a/src/backend/base/langflow/components/vectorsearch/WeaviateSearch.py +++ b/src/backend/base/langflow/components/vectorsearch/WeaviateSearch.py @@ -1,10 +1,11 @@ from typing import List, Optional +from langchain_core.embeddings import Embeddings + from langflow.components.vectorstores.base.model import LCVectorStoreComponent from langflow.components.vectorstores.Weaviate import WeaviateVectorStoreComponent from langflow.field_typing import Text from langflow.schema import Record -from langchain_core.embeddings import Embeddings class WeaviateSearchVectorStore(WeaviateVectorStoreComponent, LCVectorStoreComponent): diff --git a/src/backend/base/langflow/components/vectorsearch/__init__.py b/src/backend/base/langflow/components/vectorsearch/__init__.py index 83ce34b26..e69de29bb 100644 --- a/src/backend/base/langflow/components/vectorsearch/__init__.py +++ b/src/backend/base/langflow/components/vectorsearch/__init__.py @@ -1,27 +0,0 @@ -from .AstraDBSearch import AstraDBSearchComponent -from .ChromaSearch import ChromaSearchComponent -from .FAISSSearch import FAISSSearchComponent -from .MongoDBAtlasVectorSearch import MongoDBAtlasSearchComponent -from .PineconeSearch import PineconeSearchComponent -from .QdrantSearch import QdrantSearchComponent -from .RedisSearch import RedisSearchComponent -from .SupabaseVectorStoreSearch import SupabaseSearchComponent -from .VectaraSearch import VectaraSearchComponent -from .WeaviateSearch import WeaviateSearchVectorStore -from .pgvectorSearch import PGVectorSearchComponent -from .Couchbase import CouchbaseSearchComponent # type: ignore - -__all__ = [ - "AstraDBSearchComponent", - "ChromaSearchComponent", - "CouchbaseSearchComponent", - "FAISSSearchComponent", - "MongoDBAtlasSearchComponent", - "PineconeSearchComponent", - "QdrantSearchComponent", - "RedisSearchComponent", - "SupabaseSearchComponent", - "VectaraSearchComponent", - "WeaviateSearchVectorStore", - "PGVectorSearchComponent", -] diff --git a/src/backend/base/langflow/components/vectorsearch/pgvectorSearch.py b/src/backend/base/langflow/components/vectorsearch/pgvectorSearch.py index c6bedfede..304439ff4 100644 --- a/src/backend/base/langflow/components/vectorsearch/pgvectorSearch.py +++ b/src/backend/base/langflow/components/vectorsearch/pgvectorSearch.py @@ -1,10 +1,11 @@ from typing import List +from langchain_core.embeddings import Embeddings + from langflow.components.vectorstores.base.model import LCVectorStoreComponent from langflow.components.vectorstores.pgvector import PGVectorComponent from langflow.field_typing import Text from langflow.schema import Record -from langchain_core.embeddings import Embeddings class PGVectorSearchComponent(PGVectorComponent, LCVectorStoreComponent): diff --git a/src/backend/base/langflow/components/vectorstores/AstraDB.py b/src/backend/base/langflow/components/vectorstores/AstraDB.py index 07ded028e..c9f7da8ee 100644 --- a/src/backend/base/langflow/components/vectorstores/AstraDB.py +++ b/src/backend/base/langflow/components/vectorstores/AstraDB.py @@ -1,11 +1,12 @@ from typing import List, Optional, Union + from langchain_astradb import AstraDBVectorStore from langchain_astradb.utils.astradb import SetupMode +from langchain_core.retrievers import BaseRetriever from langflow.custom import CustomComponent from langflow.field_typing import Embeddings, VectorStore from langflow.schema import Record -from langchain_core.retrievers import BaseRetriever class AstraDBVectorStoreComponent(CustomComponent): @@ -156,3 +157,4 @@ class AstraDBVectorStoreComponent(CustomComponent): ) return vector_store + return vector_store diff --git a/src/backend/base/langflow/components/vectorstores/Chroma.py b/src/backend/base/langflow/components/vectorstores/Chroma.py index 3671dbbdb..5742aad7b 100644 --- a/src/backend/base/langflow/components/vectorstores/Chroma.py +++ b/src/backend/base/langflow/components/vectorstores/Chroma.py @@ -8,7 +8,7 @@ from langchain_core.retrievers import BaseRetriever from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record class ChromaComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/Couchbase.py b/src/backend/base/langflow/components/vectorstores/Couchbase.py index f99ac7d40..81fa0727a 100644 --- a/src/backend/base/langflow/components/vectorstores/Couchbase.py +++ b/src/backend/base/langflow/components/vectorstores/Couchbase.py @@ -1,18 +1,16 @@ -from typing import List, Optional, Union - -from langchain_community.vectorstores import CouchbaseVectorStore - -from langflow.custom import CustomComponent -from langflow.field_typing import Embeddings, VectorStore -from langflow.schema import Record - from datetime import timedelta +from typing import List, Optional, Union from couchbase.auth import PasswordAuthenticator # type: ignore from couchbase.cluster import Cluster # type: ignore from couchbase.options import ClusterOptions # type: ignore +from langchain_community.vectorstores import CouchbaseVectorStore from langchain_core.retrievers import BaseRetriever +from langflow.custom import CustomComponent +from langflow.field_typing import Embeddings, VectorStore +from langflow.schema import Record + class CouchbaseComponent(CustomComponent): display_name = "Couchbase" diff --git a/src/backend/base/langflow/components/vectorstores/FAISS.py b/src/backend/base/langflow/components/vectorstores/FAISS.py index 9d9624919..3efd5b722 100644 --- a/src/backend/base/langflow/components/vectorstores/FAISS.py +++ b/src/backend/base/langflow/components/vectorstores/FAISS.py @@ -6,7 +6,7 @@ from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent from langflow.field_typing import Embeddings -from langflow.schema.schema import Record +from langflow.schema import Record class FAISSComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/MongoDBAtlasVector.py b/src/backend/base/langflow/components/vectorstores/MongoDBAtlasVector.py index 8c045a1bd..61c4933e9 100644 --- a/src/backend/base/langflow/components/vectorstores/MongoDBAtlasVector.py +++ b/src/backend/base/langflow/components/vectorstores/MongoDBAtlasVector.py @@ -4,7 +4,7 @@ from langchain_community.vectorstores.mongodb_atlas import MongoDBAtlasVectorSea from langflow.custom import CustomComponent from langflow.field_typing import Embeddings -from langflow.schema.schema import Record +from langflow.schema import Record class MongoDBAtlasComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/Pinecone.py b/src/backend/base/langflow/components/vectorstores/Pinecone.py index 2bc0e2252..135dd7501 100644 --- a/src/backend/base/langflow/components/vectorstores/Pinecone.py +++ b/src/backend/base/langflow/components/vectorstores/Pinecone.py @@ -8,7 +8,7 @@ from langchain_pinecone.vectorstores import PineconeVectorStore from langflow.custom import CustomComponent from langflow.field_typing import Embeddings -from langflow.schema.schema import Record +from langflow.schema import Record class PineconeComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/Qdrant.py b/src/backend/base/langflow/components/vectorstores/Qdrant.py index 794e282db..6c1bdbcb6 100644 --- a/src/backend/base/langflow/components/vectorstores/Qdrant.py +++ b/src/backend/base/langflow/components/vectorstores/Qdrant.py @@ -6,7 +6,7 @@ from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent from langflow.field_typing import Embeddings -from langflow.schema.schema import Record +from langflow.schema import Record class QdrantComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/Redis.py b/src/backend/base/langflow/components/vectorstores/Redis.py index 04d137538..c35ec018e 100644 --- a/src/backend/base/langflow/components/vectorstores/Redis.py +++ b/src/backend/base/langflow/components/vectorstores/Redis.py @@ -6,7 +6,7 @@ from langchain_core.retrievers import BaseRetriever from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record class RedisComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/SupabaseVectorStore.py b/src/backend/base/langflow/components/vectorstores/SupabaseVectorStore.py index 5e87a09ca..e7c847f2b 100644 --- a/src/backend/base/langflow/components/vectorstores/SupabaseVectorStore.py +++ b/src/backend/base/langflow/components/vectorstores/SupabaseVectorStore.py @@ -7,7 +7,7 @@ from supabase.client import Client, create_client from langflow.custom import CustomComponent from langflow.field_typing import Embeddings -from langflow.schema.schema import Record +from langflow.schema import Record class SupabaseComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/Upstash.py b/src/backend/base/langflow/components/vectorstores/Upstash.py index c066d7f44..2695abecc 100644 --- a/src/backend/base/langflow/components/vectorstores/Upstash.py +++ b/src/backend/base/langflow/components/vectorstores/Upstash.py @@ -6,7 +6,7 @@ from langchain_core.retrievers import BaseRetriever from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record class UpstashVectorStoreComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/Vectara.py b/src/backend/base/langflow/components/vectorstores/Vectara.py index 247614345..5a51b5a1b 100644 --- a/src/backend/base/langflow/components/vectorstores/Vectara.py +++ b/src/backend/base/langflow/components/vectorstores/Vectara.py @@ -9,7 +9,7 @@ from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent from langflow.field_typing import BaseRetriever -from langflow.schema.schema import Record +from langflow.schema import Record class VectaraComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/Weaviate.py b/src/backend/base/langflow/components/vectorstores/Weaviate.py index e1a802000..fafa2f390 100644 --- a/src/backend/base/langflow/components/vectorstores/Weaviate.py +++ b/src/backend/base/langflow/components/vectorstores/Weaviate.py @@ -8,7 +8,7 @@ from langchain_core.retrievers import BaseRetriever from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record class WeaviateVectorStoreComponent(CustomComponent): diff --git a/src/backend/base/langflow/components/vectorstores/__init__.py b/src/backend/base/langflow/components/vectorstores/__init__.py index d38b0a735..e69de29bb 100644 --- a/src/backend/base/langflow/components/vectorstores/__init__.py +++ b/src/backend/base/langflow/components/vectorstores/__init__.py @@ -1,28 +0,0 @@ -from .AstraDB import AstraDBVectorStoreComponent -from .Chroma import ChromaComponent -from .FAISS import FAISSComponent -from .MongoDBAtlasVector import MongoDBAtlasComponent -from .Pinecone import PineconeComponent -from .Qdrant import QdrantComponent -from .Redis import RedisComponent -from .SupabaseVectorStore import SupabaseComponent -from .Vectara import VectaraComponent -from .Weaviate import WeaviateVectorStoreComponent -from .pgvector import PGVectorComponent -from .Couchbase import CouchbaseComponent - -__all__ = [ - "AstraDBVectorStoreComponent", - "ChromaComponent", - "CouchbaseComponent", - "FAISSComponent", - "MongoDBAtlasComponent", - "PineconeComponent", - "QdrantComponent", - "RedisComponent", - "SupabaseComponent", - "VectaraComponent", - "WeaviateVectorStoreComponent", - "base", - "PGVectorComponent", -] diff --git a/src/backend/base/langflow/components/vectorstores/pgvector.py b/src/backend/base/langflow/components/vectorstores/pgvector.py index 75c833ded..3ea7b6eb6 100644 --- a/src/backend/base/langflow/components/vectorstores/pgvector.py +++ b/src/backend/base/langflow/components/vectorstores/pgvector.py @@ -6,7 +6,7 @@ from langchain_core.retrievers import BaseRetriever from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent -from langflow.schema.schema import Record +from langflow.schema import Record class PGVectorComponent(CustomComponent): diff --git a/src/backend/base/langflow/custom/code_parser/code_parser.py b/src/backend/base/langflow/custom/code_parser/code_parser.py index 17fe12896..705e779f4 100644 --- a/src/backend/base/langflow/custom/code_parser/code_parser.py +++ b/src/backend/base/langflow/custom/code_parser/code_parser.py @@ -297,7 +297,7 @@ class CodeParser: bases = self.execute_and_inspect_classes(self.code) except Exception as e: # If the code cannot be executed, return an empty list - logger.exception(e) + logger.debug(e) bases = [] raise e return bases diff --git a/src/backend/base/langflow/custom/custom_component/custom_component.py b/src/backend/base/langflow/custom/custom_component/custom_component.py index aeac9cae6..896b07337 100644 --- a/src/backend/base/langflow/custom/custom_component/custom_component.py +++ b/src/backend/base/langflow/custom/custom_component/custom_component.py @@ -7,6 +7,7 @@ import yaml from cachetools import TTLCache, cachedmethod from langchain_core.documents import Document from pydantic import BaseModel + from langflow.custom.code_parser.utils import ( extract_inner_type_from_generic_alias, extract_union_types_from_generic_alias, @@ -378,13 +379,14 @@ class CustomComponent(Component): The variable for the current user with the specified name. """ - def get_variable(name: str): + def get_variable(name: str, field: str): if hasattr(self, "_user_id") and not self._user_id: raise ValueError(f"User id is not set for {self.__class__.__name__}") variable_service = get_variable_service() # Get service instance # Retrieve and decrypt the variable by name for the current user with session_scope() as session: - return variable_service.get_variable(user_id=self._user_id or "", name=name, session=session) + user_id = self._user_id or "" + return variable_service.get_variable(user_id=user_id, name=name, field=field, session=session) return get_variable diff --git a/src/backend/base/langflow/custom/directory_reader/directory_reader.py b/src/backend/base/langflow/custom/directory_reader/directory_reader.py index b9f55f21f..52a310314 100644 --- a/src/backend/base/langflow/custom/directory_reader/directory_reader.py +++ b/src/backend/base/langflow/custom/directory_reader/directory_reader.py @@ -78,7 +78,8 @@ class DirectoryReader: component_tuple = (*build_component(component), component) components.append(component_tuple) except Exception as e: - logger.error(f"Error while loading component { component['name']}: {e}") + logger.debug(f"Error while loading component { component['name']}") + logger.debug(e) continue items.append({"name": menu["name"], "path": menu["path"], "components": components}) filtered = [menu for menu in items if menu["components"]] @@ -266,8 +267,7 @@ class DirectoryReader: if validation_result: try: output_types = self.get_output_types_from_code(result_content) - except Exception as exc: - logger.exception(f"Error while getting output types from code: {str(exc)}") + except Exception: output_types = [component_name_camelcase] else: output_types = [component_name_camelcase] diff --git a/src/backend/base/langflow/field_typing/__init__.py b/src/backend/base/langflow/field_typing/__init__.py index 15ce03693..67dfec050 100644 --- a/src/backend/base/langflow/field_typing/__init__.py +++ b/src/backend/base/langflow/field_typing/__init__.py @@ -19,13 +19,13 @@ from .constants import ( Embeddings, NestedDict, Object, - Prompt, PromptTemplate, Text, TextSplitter, Tool, VectorStore, ) +from .prompt import Prompt from .range_spec import RangeSpec diff --git a/src/backend/base/langflow/field_typing/constants.py b/src/backend/base/langflow/field_typing/constants.py index d73257c14..807f9a77e 100644 --- a/src/backend/base/langflow/field_typing/constants.py +++ b/src/backend/base/langflow/field_typing/constants.py @@ -6,7 +6,7 @@ from langchain.memory.chat_memory import BaseChatMemory from langchain_core.document_loaders import BaseLoader from langchain_core.documents import Document from langchain_core.embeddings import Embeddings -from langchain_core.language_models import BaseLLM, BaseLanguageModel +from langchain_core.language_models import BaseLanguageModel, BaseLLM from langchain_core.memory import BaseMemory from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate, PromptTemplate @@ -15,6 +15,8 @@ from langchain_core.tools import Tool from langchain_core.vectorstores import VectorStore from langchain_text_splitters import TextSplitter +from langflow.field_typing.prompt import Prompt + # Type alias for more complex dicts NestedDict = Dict[str, Union[str, Dict]] @@ -27,10 +29,6 @@ class Data: pass -class Prompt: - pass - - class Code: pass diff --git a/src/backend/base/langflow/field_typing/prompt.py b/src/backend/base/langflow/field_typing/prompt.py new file mode 100644 index 000000000..ef6c7ce9a --- /dev/null +++ b/src/backend/base/langflow/field_typing/prompt.py @@ -0,0 +1,41 @@ +from langchain_core.load import load +from langchain_core.messages import HumanMessage +from langchain_core.prompts import BaseChatPromptTemplate, ChatPromptTemplate, PromptTemplate + +from langflow.base.prompts.utils import dict_values_to_string +from langflow.schema.message import Message +from langflow.schema.record import Record + + +class Prompt(Record): + def load_lc_prompt(self): + if "prompt" not in self: + raise ValueError("Prompt is required.") + return load(self.prompt) + + @classmethod + def from_lc_prompt( + cls, + prompt: BaseChatPromptTemplate, + ): + prompt_json = prompt.to_json() + return cls(prompt=prompt_json) + + def format_text(self): + prompt_template = PromptTemplate.from_template(self.template) + variables_with_str_values = dict_values_to_string(self.variables) + formatted_prompt = prompt_template.format(**variables_with_str_values) + return formatted_prompt + + @classmethod + async def from_template_and_variables(cls, template: str, variables: dict): + instance = cls(template=template, variables=variables) + contents = [{"type": "text", "text": instance.format_text()}] + # Get all Message instances from the kwargs + for value in variables.values(): + if isinstance(value, Message): + content_dicts = await value.get_file_content_dicts() + contents.extend(content_dicts) + prompt_template = ChatPromptTemplate.from_messages([HumanMessage(content=contents)]) + instance.prompt = prompt_template.to_json() + return instance diff --git a/src/backend/base/langflow/graph/graph/base.py b/src/backend/base/langflow/graph/graph/base.py index f4e71bd7c..06074ee1d 100644 --- a/src/backend/base/langflow/graph/graph/base.py +++ b/src/backend/base/langflow/graph/graph/base.py @@ -710,6 +710,7 @@ class Graph: chat_service: ChatService, vertex_id: str, inputs_dict: Optional[Dict[str, str]] = None, + files: Optional[list[str]] = None, user_id: Optional[str] = None, fallback_to_env_vars: bool = False, ): @@ -737,7 +738,9 @@ class Graph: # Check the cache for the vertex cached_result = await chat_service.get_cache(key=vertex.id) if isinstance(cached_result, CacheMiss): - await vertex.build(user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars) + await vertex.build( + user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars, files=files + ) await chat_service.set_cache(key=vertex.id, data=vertex) else: cached_vertex = cached_result["result"] @@ -751,7 +754,9 @@ class Graph: vertex.result.used_frozen_result = True else: - await vertex.build(user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars) + await vertex.build( + user_id=user_id, inputs=inputs_dict, fallback_to_env_vars=fallback_to_env_vars, files=files + ) if vertex.result is not None: params = f"{vertex._built_object_repr()}{params}" @@ -764,11 +769,13 @@ class Graph: next_runnable_vertices, top_level_vertices = await self.get_next_and_top_level_vertices( lock, set_cache_coro, vertex ) - log_transaction(vertex, status="success") + flow_id = self.flow_id + log_transaction(flow_id, vertex, status="success") return next_runnable_vertices, top_level_vertices, result_dict, params, valid, artifacts, vertex except Exception as exc: logger.exception(f"Error building vertex: {exc}") - log_transaction(vertex, status="failure", error=str(exc)) + flow_id = self.flow_id + log_transaction(flow_id, vertex, status="failure", error=str(exc)) raise exc async def get_next_and_top_level_vertices( diff --git a/src/backend/base/langflow/graph/schema.py b/src/backend/base/langflow/graph/schema.py index 60e7ab590..766575364 100644 --- a/src/backend/base/langflow/graph/schema.py +++ b/src/backend/base/langflow/graph/schema.py @@ -1,15 +1,17 @@ from enum import Enum from typing import Any, List, Optional -from pydantic import BaseModel, Field, field_serializer +from pydantic import BaseModel, Field, field_serializer, model_validator from langflow.graph.utils import serialize_field +from langflow.schema.schema import Log, StreamURL from langflow.utils.schemas import ChatOutputResponse, ContainsEnumMeta class ResultData(BaseModel): results: Optional[Any] = Field(default_factory=dict) artifacts: Optional[Any] = Field(default_factory=dict) + logs: Optional[List[dict]] = Field(default_factory=list) messages: Optional[list[ChatOutputResponse]] = Field(default_factory=list) timedelta: Optional[float] = None duration: Optional[str] = None @@ -23,6 +25,24 @@ class ResultData(BaseModel): return {key: serialize_field(val) for key, val in value.items()} return serialize_field(value) + @model_validator(mode="before") + @classmethod + def validate_model(cls, values): + if not values.get("logs") and values.get("artifacts"): + # Build the log from the artifacts + message = values["artifacts"] + + # ! Temporary fix + if not isinstance(message, dict): + message = {"message": message} + + if "stream_url" in message and "type" in message: + stream_url = StreamURL(location=message["stream_url"]) + values["logs"] = [Log(message=stream_url, type=message["type"])] + elif "type" in message: + values["logs"] = [Log(message=message, type=message["type"])] + return values + class InterfaceComponentTypes(str, Enum, metaclass=ContainsEnumMeta): # ChatInput and ChatOutput are the only ones that are diff --git a/src/backend/base/langflow/graph/utils.py b/src/backend/base/langflow/graph/utils.py index 83e2177b1..06b7ca90a 100644 --- a/src/backend/base/langflow/graph/utils.py +++ b/src/backend/base/langflow/graph/utils.py @@ -1,9 +1,12 @@ -from typing import Any, Union +from enum import Enum +from typing import Any, Generator, Union from langchain_core.documents import Document from pydantic import BaseModel from langflow.interface.utils import extract_input_variables_from_prompt +from langflow.schema import Record +from langflow.schema.message import Message class UnbuiltObject: @@ -14,6 +17,16 @@ class UnbuiltResult: pass +class ArtifactType(str, Enum): + TEXT = "text" + RECORD = "record" + OBJECT = "object" + ARRAY = "array" + STREAM = "stream" + UNKNOWN = "unknown" + MESSAGE = "message" + + def validate_prompt(prompt: str): """Validate prompt.""" if extract_input_variables_from_prompt(prompt): @@ -50,3 +63,38 @@ def serialize_field(value): elif isinstance(value, str): return {"result": value} return value + + +def get_artifact_type(custom_component, build_result) -> str: + result = ArtifactType.UNKNOWN + value = custom_component.repr_value + match value: + case Record(): + result = ArtifactType.RECORD + + case str(): + result = ArtifactType.TEXT + + case dict(): + result = ArtifactType.OBJECT + + case list(): + result = ArtifactType.ARRAY + + case Message(): + result = ArtifactType.MESSAGE + + if result == ArtifactType.UNKNOWN: + if isinstance(build_result, Generator): + result = ArtifactType.STREAM + elif isinstance(value, Message) and isinstance(value.text, Generator): + result = ArtifactType.STREAM + + return result.value + + +def post_process_raw(raw, artifact_type: str): + if artifact_type == ArtifactType.STREAM.value: + raw = "" + + return raw diff --git a/src/backend/base/langflow/graph/vertex/base.py b/src/backend/base/langflow/graph/vertex/base.py index 963ae2115..86cf9cd4e 100644 --- a/src/backend/base/langflow/graph/vertex/base.py +++ b/src/backend/base/langflow/graph/vertex/base.py @@ -4,17 +4,17 @@ import inspect import os import types from enum import Enum -from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, Dict, Iterator, List, Optional +from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, Dict, Iterator, List, Mapping, Optional from loguru import logger from langflow.graph.schema import INPUT_COMPONENTS, OUTPUT_COMPONENTS, InterfaceComponentTypes, ResultData -from langflow.graph.utils import UnbuiltObject, UnbuiltResult -from langflow.graph.vertex.utils import log_transaction +from langflow.graph.utils import ArtifactType, UnbuiltObject, UnbuiltResult from langflow.interface.initialize import loading from langflow.interface.listing import lazy_load_dict from langflow.schema.schema import INPUT_FIELD_NAME from langflow.services.deps import get_storage_service +from langflow.services.monitor.utils import log_transaction from langflow.utils.constants import DIRECT_TYPES from langflow.utils.schemas import ChatOutputResponse from langflow.utils.util import sync_to_async, unescape_string @@ -63,6 +63,8 @@ class Vertex: self._built_result = None self._built = False self.artifacts: Dict[str, Any] = {} + self.artifacts_raw: Any = None + self.artifacts_type: Optional[str] = None self.steps: List[Callable] = [self._build] self.steps_ran: List[Callable] = [] self.task_id: Optional[str] = None @@ -371,7 +373,7 @@ class Vertex: self.load_from_db_fields = load_from_db_fields self._raw_params = params.copy() - def update_raw_params(self, new_params: Dict[str, str], overwrite: bool = False): + def update_raw_params(self, new_params: Mapping[str, str | list[str]], overwrite: bool = False): """ Update the raw parameters of the vertex with the given new parameters. @@ -422,11 +424,14 @@ class Vertex: try: messages = [ ChatOutputResponse( - message=artifacts["message"], + message=artifacts["text"], sender=artifacts.get("sender"), sender_name=artifacts.get("sender_name"), session_id=artifacts.get("session_id"), + stream_url=artifacts.get("stream_url"), + files=[{"path": file} if isinstance(file, str) else file for file in artifacts.get("files", [])], component_id=self.id, + type=self.artifacts_type, ).model_dump(exclude_none=True) ] except KeyError: @@ -439,12 +444,11 @@ class Vertex: # We need to set the artifacts to pass information # to the frontend self.set_artifacts() - artifacts = self.artifacts + artifacts = self.artifacts_raw if isinstance(artifacts, dict): messages = self.extract_messages_from_artifacts(artifacts) else: messages = [] - result_dict = ResultData( results=result_dict, artifacts=artifacts, @@ -525,12 +529,13 @@ class Vertex: Returns: The built result if use_result is True, else the built object. """ + flow_id = self.graph.flow_id if not self._built: - log_transaction(source=self, target=requester, flow_id=self.graph.flow_id, status="error") + log_transaction(flow_id, vertex=self, target=requester, status="error") raise ValueError(f"Component {self.display_name} has not been built yet") result = self._built_result if self.use_result else self._built_object - log_transaction(source=self, target=requester, flow_id=self.graph.flow_id, status="success") + log_transaction(flow_id, vertex=self, target=requester, status="success") return result async def _build_vertex_and_update_params(self, key, vertex: "Vertex"): @@ -624,6 +629,8 @@ class Vertex: self._built_object, self.artifacts = result elif len(result) == 3: self._custom_component, self._built_object, self.artifacts = result + self.artifacts_raw = self.artifacts.get("raw", None) + self.artifacts_type = self.artifacts.get("type", None) or ArtifactType.UNKNOWN.value else: self._built_object = result @@ -664,6 +671,7 @@ class Vertex: self, user_id=None, inputs: Optional[Dict[str, Any]] = None, + files: Optional[list[str]] = None, requester: Optional["Vertex"] = None, **kwargs, ) -> Any: @@ -681,9 +689,14 @@ class Vertex: return await self.get_requester_result(requester) self._reset() - if self._is_chat_input() and inputs: - inputs = {"input_value": inputs.get(INPUT_FIELD_NAME, "")} - self.update_raw_params(inputs, overwrite=True) + if self._is_chat_input() and (inputs or files): + chat_input = {} + if inputs: + chat_input.update({"input_value": inputs.get(INPUT_FIELD_NAME, "")}) + if files: + chat_input.update({"files": files}) + + self.update_raw_params(chat_input, overwrite=True) # Run steps for step in self.steps: diff --git a/src/backend/base/langflow/graph/vertex/types.py b/src/backend/base/langflow/graph/vertex/types.py index 590c38c24..ba91a2597 100644 --- a/src/backend/base/langflow/graph/vertex/types.py +++ b/src/backend/base/langflow/graph/vertex/types.py @@ -2,13 +2,14 @@ import json from typing import AsyncIterator, Dict, Iterator, List import yaml -from langchain_core.messages import AIMessage +from langchain_core.messages import AIMessage, AIMessageChunk from loguru import logger from langflow.graph.schema import CHAT_COMPONENTS, RECORDS_COMPONENTS, InterfaceComponentTypes -from langflow.graph.utils import UnbuiltObject, serialize_field +from langflow.graph.utils import ArtifactType, UnbuiltObject, serialize_field from langflow.graph.vertex.base import Vertex from langflow.schema import Record +from langflow.schema.message import Message from langflow.schema.schema import INPUT_FIELD_NAME from langflow.services.monitor.utils import log_vertex_build from langflow.utils.schemas import ChatOutputResponse, RecordOutputResponse @@ -83,10 +84,11 @@ class InterfaceVertex(Vertex): sender = self.params.get("sender", None) sender_name = self.params.get("sender_name", None) message = self.params.get(INPUT_FIELD_NAME, None) + files = [{"path": file} if isinstance(file, str) else file for file in self.params.get("files", [])] if isinstance(message, str): message = unescape_string(message) stream_url = None - if isinstance(self._built_object, AIMessage): + if isinstance(self._built_object, (AIMessage, AIMessageChunk)): artifacts = ChatOutputResponse.from_message( self._built_object, sender=sender, @@ -97,23 +99,27 @@ class InterfaceVertex(Vertex): # Turn the dict into a pleasing to # read JSON inside a code block message = dict_to_codeblock(self._built_object) - elif isinstance(self._built_object, Record): - message = self._built_object.text - elif isinstance(message, (AsyncIterator, Iterator)): - stream_url = self.build_stream_url() - message = "" + elif isinstance(self._built_object, Message): + if isinstance(message, (AsyncIterator, Iterator)): + stream_url = self.build_stream_url() + message = "" + self._built_object.text = message + else: + message = self._built_object.text elif not isinstance(self._built_object, str): message = str(self._built_object) # if the message is a generator or iterator # it means that it is a stream of messages else: message = self._built_object - + artifact_type = ArtifactType.STREAM if stream_url is not None else ArtifactType.OBJECT artifacts = ChatOutputResponse( message=message, sender=sender, sender_name=sender_name, stream_url=stream_url, + files=files, + type=artifact_type, ) self.will_stream = stream_url is not None @@ -195,6 +201,8 @@ class InterfaceVertex(Vertex): message=complete_message, sender=self.params.get("sender", ""), sender_name=self.params.get("sender_name", ""), + files=[{"path": file} if isinstance(file, str) else file for file in self.params.get("files", [])], + type=ArtifactType.OBJECT.value, ).model_dump() self.params[INPUT_FIELD_NAME] = complete_message self._built_object = Record(text=complete_message, data=self.artifacts) diff --git a/src/backend/base/langflow/graph/vertex/utils.py b/src/backend/base/langflow/graph/vertex/utils.py index 59a1c1949..0f69e4b2d 100644 --- a/src/backend/base/langflow/graph/vertex/utils.py +++ b/src/backend/base/langflow/graph/vertex/utils.py @@ -1,9 +1,5 @@ from typing import TYPE_CHECKING -from loguru import logger - -from langflow.services.deps import get_monitor_service - if TYPE_CHECKING: from langflow.graph.vertex.base import Vertex @@ -21,34 +17,3 @@ def build_clean_params(target: "Vertex") -> dict: if isinstance(value, list): params[key] = [item for item in value if isinstance(item, (str, int, bool, float, list, dict))] return params - - -def log_transaction(source: "Vertex", target: "Vertex", flow_id, status, error=None): - """ - Logs a transaction between two vertices. - - Args: - source (Vertex): The source vertex of the transaction. - target (Vertex): The target vertex of the transaction. - status: The status of the transaction. - error (Optional): Any error associated with the transaction. - - Raises: - Exception: If there is an error while logging the transaction. - - """ - try: - monitor_service = get_monitor_service() - clean_params = build_clean_params(target) - data = { - "source": source.vertex_type, - "target": target.vertex_type, - "target_args": clean_params, - "timestamp": monitor_service.get_timestamp(), - "status": status, - "error": error, - "flow_id": flow_id, - } - monitor_service.add_row(table_name="transactions", data=data) - except Exception as e: - logger.error(f"Error logging transaction: {e}") diff --git a/src/backend/base/langflow/helpers/__init__.py b/src/backend/base/langflow/helpers/__init__.py index adfa72088..38b460af2 100644 --- a/src/backend/base/langflow/helpers/__init__.py +++ b/src/backend/base/langflow/helpers/__init__.py @@ -1,3 +1,3 @@ -from .record import docs_to_records, records_to_text +from .record import docs_to_records, records_to_text, messages_to_text -__all__ = ["docs_to_records", "records_to_text"] +__all__ = ["docs_to_records", "records_to_text", "messages_to_text"] diff --git a/src/backend/base/langflow/helpers/flow.py b/src/backend/base/langflow/helpers/flow.py index 9a8a7c3b5..61674942a 100644 --- a/src/backend/base/langflow/helpers/flow.py +++ b/src/backend/base/langflow/helpers/flow.py @@ -6,7 +6,8 @@ from pydantic.v1 import BaseModel, Field, create_model from sqlmodel import Session, select from langflow.graph.schema import RunOutputs -from langflow.schema.schema import INPUT_FIELD_NAME, Record +from langflow.schema import Record +from langflow.schema.schema import INPUT_FIELD_NAME from langflow.services.database.models.flow import Flow from langflow.services.deps import get_session, get_settings_service, session_scope @@ -90,7 +91,9 @@ async def run_flow( fallback_to_env_vars = get_settings_service().settings.fallback_to_env_var - return await graph.arun(inputs_list, inputs_components=inputs_components, types=types, fallback_to_env_vars=fallback_to_env_vars) + return await graph.arun( + inputs_list, inputs_components=inputs_components, types=types, fallback_to_env_vars=fallback_to_env_vars + ) def generate_function_for_flow( @@ -249,7 +252,7 @@ def get_flow_by_id_or_endpoint_name( flow = db.get(Flow, flow_id) except ValueError: endpoint_name = flow_id_or_name - stmt = select(Flow).where(Flow.name == endpoint_name) + stmt = select(Flow).where(Flow.endpoint_name == endpoint_name) if user_id: stmt = stmt.where(Flow.user_id == user_id) flow = db.exec(stmt).first() @@ -257,3 +260,24 @@ def get_flow_by_id_or_endpoint_name( raise HTTPException(status_code=404, detail=f"Flow identifier {flow_id_or_name} not found") return flow + + +def generate_unique_flow_name(flow_name, user_id, session): + original_name = flow_name + n = 1 + while True: + # Check if a flow with the given name exists + existing_flow = session.exec( + select(Flow).where( + Flow.name == flow_name, + Flow.user_id == user_id, + ) + ).first() + + # If no flow with the given name exists, return the name + if not existing_flow: + return flow_name + + # If a flow with the name already exists, append (n) to the name and increment n + flow_name = f"{original_name} ({n})" + n += 1 diff --git a/src/backend/base/langflow/helpers/folders.py b/src/backend/base/langflow/helpers/folders.py new file mode 100644 index 000000000..fa6f27fcc --- /dev/null +++ b/src/backend/base/langflow/helpers/folders.py @@ -0,0 +1,23 @@ +from langflow.services.database.models.folder.model import Folder +from sqlalchemy import select + + +def generate_unique_folder_name(folder_name, user_id, session): + original_name = folder_name + n = 1 + while True: + # Check if a folder with the given name exists + existing_folder = session.exec( + select(Folder).where( + Folder.name == folder_name, + Folder.user_id == user_id, + ) + ).first() + + # If no folder with the given name exists, return the name + if not existing_folder: + return folder_name + + # If a folder with the name already exists, append (n) to the name and increment n + folder_name = f"{original_name} ({n})" + n += 1 \ No newline at end of file diff --git a/src/backend/base/langflow/helpers/record.py b/src/backend/base/langflow/helpers/record.py index 7c13a9ad4..88d0bcd13 100644 --- a/src/backend/base/langflow/helpers/record.py +++ b/src/backend/base/langflow/helpers/record.py @@ -1,7 +1,9 @@ from typing import Union + from langchain_core.documents import Document from langflow.schema import Record +from langflow.schema.message import Message def docs_to_records(documents: list[Document]) -> list[Record]: @@ -27,7 +29,7 @@ def records_to_text(template: str, records: Union[Record, list[Record]]) -> str: Returns: list[str]: The converted list of texts. """ - if isinstance(records, Record): + if isinstance(records, (Record)): records = [records] # Check if there are any format strings in the template _records = [] @@ -39,3 +41,27 @@ def records_to_text(template: str, records: Union[Record, list[Record]]) -> str: formated_records = [template.format(data=record.data, **record.data) for record in _records] return "\n".join(formated_records) + + +def messages_to_text(template: str, messages: Union[Message, list[Message]]) -> str: + """ + Converts a list of Messages to a list of texts. + + Args: + messages (list[Message]): The list of Messages to convert. + + Returns: + list[str]: The converted list of texts. + """ + if isinstance(messages, (Message)): + messages = [messages] + # Check if there are any format strings in the template + _messages = [] + for message in messages: + # If it is not a message, create one with the key "text" + if not isinstance(message, Message): + raise ValueError("All elements in the list must be of type Message.") + _messages.append(message) + + formated_messages = [template.format(data=message.model_dump(), **message.model_dump()) for message in _messages] + return "\n".join(formated_messages) diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-01.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-01.png new file mode 100644 index 000000000..fa4fed5d1 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-01.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-02.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-02.png new file mode 100644 index 000000000..6519e7657 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-02.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-03.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-03.png new file mode 100644 index 000000000..512a5b037 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-03.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-04.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-04.png new file mode 100644 index 000000000..6c84a0792 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-04.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-05.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-05.png new file mode 100644 index 000000000..8e4b22298 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-05.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-06.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-06.png new file mode 100644 index 000000000..d317eb499 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-06.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-07.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-07.png new file mode 100644 index 000000000..ed2ed8214 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-07.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-08.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-08.png new file mode 100644 index 000000000..0785f59ea Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-08.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-09.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-09.png new file mode 100644 index 000000000..8dd5b1677 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-09.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-10.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-10.png new file mode 100644 index 000000000..058dff1a8 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-10.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-11.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-11.png new file mode 100644 index 000000000..a517fad0d Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-11.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-12.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-12.png new file mode 100644 index 000000000..508590f18 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-12.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-13.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-13.png new file mode 100644 index 000000000..90865a49d Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-13.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-14.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-14.png new file mode 100644 index 000000000..269621bb2 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-14.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-15.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-15.png new file mode 100644 index 000000000..da85a4e30 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-15.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-16.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-16.png new file mode 100644 index 000000000..cf30ae136 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-16.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-17.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-17.png new file mode 100644 index 000000000..d53cd7997 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-17.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-18.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-18.png new file mode 100644 index 000000000..e0ac43aab Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-18.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-19.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-19.png new file mode 100644 index 000000000..d04a96a27 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-19.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-20.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-20.png new file mode 100644 index 000000000..e2d6e99bc Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-20.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-21.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-21.png new file mode 100644 index 000000000..392b004e2 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-21.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-22.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-22.png new file mode 100644 index 000000000..606aed15d Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-22.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-23.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-23.png new file mode 100644 index 000000000..c16d96d41 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-23.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-24.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-24.png new file mode 100644 index 000000000..e0dd6a1b2 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-24.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-25.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-25.png new file mode 100644 index 000000000..08aeb61c3 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-25.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-26.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-26.png new file mode 100644 index 000000000..312ef035c Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-26.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-27.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-27.png new file mode 100644 index 000000000..95972203e Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-01-27.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-01.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-01.png new file mode 100644 index 000000000..b55f875ac Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-01.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-02.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-02.png new file mode 100644 index 000000000..b51a3b136 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-02.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-03.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-03.png new file mode 100644 index 000000000..8b386a081 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-03.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-04.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-04.png new file mode 100644 index 000000000..e36804f81 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-04.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-05.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-05.png new file mode 100644 index 000000000..7e4661821 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-05.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-06.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-06.png new file mode 100644 index 000000000..5ce14b221 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-06.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-07.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-07.png new file mode 100644 index 000000000..c91f89584 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-07.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-08.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-08.png new file mode 100644 index 000000000..4f4c4ab2e Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-08.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-09.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-09.png new file mode 100644 index 000000000..9c1b5b34d Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-09.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-10.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-10.png new file mode 100644 index 000000000..6746a9cc1 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-10.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-11.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-11.png new file mode 100644 index 000000000..771ed1869 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-11.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-12.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-12.png new file mode 100644 index 000000000..f32aa58ee Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-12.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-13.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-13.png new file mode 100644 index 000000000..ae8db49a6 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-13.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-14.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-14.png new file mode 100644 index 000000000..038272194 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-14.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-15.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-15.png new file mode 100644 index 000000000..073b456a0 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-15.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-16.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-16.png new file mode 100644 index 000000000..5369cf9af Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-16.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-17.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-17.png new file mode 100644 index 000000000..1f143bc30 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-17.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-18.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-18.png new file mode 100644 index 000000000..3456bf542 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-18.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-19.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-19.png new file mode 100644 index 000000000..a8f673724 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-19.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-20.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-20.png new file mode 100644 index 000000000..5384bf7a1 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-20.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-21.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-21.png new file mode 100644 index 000000000..1c6a39bbf Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-21.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-22.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-22.png new file mode 100644 index 000000000..976f94dc1 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-22.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-23.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-23.png new file mode 100644 index 000000000..c6ca79192 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-23.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-24.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-24.png new file mode 100644 index 000000000..429f74046 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-24.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-25.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-25.png new file mode 100644 index 000000000..38aeea19c Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-25.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-26.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-26.png new file mode 100644 index 000000000..65342b59d Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-26.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-27.png b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-27.png new file mode 100644 index 000000000..1c2fc7717 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/People/People Avatar-02-27.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/026-alien.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/026-alien.png new file mode 100644 index 000000000..218c03407 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/026-alien.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/027-satellite.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/027-satellite.png new file mode 100644 index 000000000..f72f22226 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/027-satellite.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/028-alien.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/028-alien.png new file mode 100644 index 000000000..2e558a69d Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/028-alien.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/029-telescope.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/029-telescope.png new file mode 100644 index 000000000..6c3622fea Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/029-telescope.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/030-books.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/030-books.png new file mode 100644 index 000000000..f1b5cf777 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/030-books.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/031-planet.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/031-planet.png new file mode 100644 index 000000000..289a237c9 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/031-planet.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/032-constellation.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/032-constellation.png new file mode 100644 index 000000000..ca09192a6 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/032-constellation.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/033-planet.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/033-planet.png new file mode 100644 index 000000000..de3bf7548 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/033-planet.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/034-alien.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/034-alien.png new file mode 100644 index 000000000..0ed7f3a46 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/034-alien.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/035-globe.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/035-globe.png new file mode 100644 index 000000000..b58ec3148 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/035-globe.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/036-eclipse.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/036-eclipse.png new file mode 100644 index 000000000..4cb944fd2 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/036-eclipse.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/037-meteor.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/037-meteor.png new file mode 100644 index 000000000..f1e11b5ed Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/037-meteor.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/038-eclipse.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/038-eclipse.png new file mode 100644 index 000000000..63c8893ed Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/038-eclipse.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/039-Asteroid.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/039-Asteroid.png new file mode 100644 index 000000000..8b858f28e Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/039-Asteroid.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/040-mission.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/040-mission.png new file mode 100644 index 000000000..0befc44f1 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/040-mission.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/041-spaceship.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/041-spaceship.png new file mode 100644 index 000000000..d499c98b6 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/041-spaceship.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/042-space shuttle.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/042-space shuttle.png new file mode 100644 index 000000000..f13df646b Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/042-space shuttle.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/043-space shuttle.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/043-space shuttle.png new file mode 100644 index 000000000..136dc8031 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/043-space shuttle.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/044-rocket.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/044-rocket.png new file mode 100644 index 000000000..16d60e221 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/044-rocket.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/045-astronaut.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/045-astronaut.png new file mode 100644 index 000000000..fdb107548 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/045-astronaut.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/046-rocket.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/046-rocket.png new file mode 100644 index 000000000..e2808eb39 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/046-rocket.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/047-computer.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/047-computer.png new file mode 100644 index 000000000..cc3bbf904 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/047-computer.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/048-satellite.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/048-satellite.png new file mode 100644 index 000000000..0548cb820 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/048-satellite.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/049-astronaut.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/049-astronaut.png new file mode 100644 index 000000000..99654c52e Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/049-astronaut.png differ diff --git a/src/backend/base/langflow/initial_setup/profile_pictures/Space/050-space robot.png b/src/backend/base/langflow/initial_setup/profile_pictures/Space/050-space robot.png new file mode 100644 index 000000000..52c23b249 Binary files /dev/null and b/src/backend/base/langflow/initial_setup/profile_pictures/Space/050-space robot.png differ diff --git a/src/backend/base/langflow/initial_setup/setup.py b/src/backend/base/langflow/initial_setup/setup.py index 27574950c..56739cd5c 100644 --- a/src/backend/base/langflow/initial_setup/setup.py +++ b/src/backend/base/langflow/initial_setup/setup.py @@ -1,5 +1,7 @@ +import json import logging import os +import shutil from collections import defaultdict from copy import deepcopy from datetime import datetime, timezone @@ -11,17 +13,14 @@ from emoji import demojize, purely_emoji # type: ignore from loguru import logger from sqlmodel import select -from langflow.base.constants import FIELD_FORMAT_ATTRIBUTES, NODE_FORMAT_ATTRIBUTES +from langflow.base.constants import FIELD_FORMAT_ATTRIBUTES, NODE_FORMAT_ATTRIBUTES, ORJSON_OPTIONS from langflow.interface.types import get_all_components from langflow.services.auth.utils import create_super_user from langflow.services.database.models.flow.model import Flow, FlowCreate from langflow.services.database.models.folder.model import Folder, FolderCreate -from langflow.services.database.models.user.crud import get_user_by_username -from langflow.services.deps import get_settings_service, session_scope - from langflow.services.database.models.folder.utils import create_default_folder_if_it_doesnt_exist -from langflow.services.deps import get_settings_service, session_scope, get_variable_service - +from langflow.services.database.models.user.crud import get_user_by_username +from langflow.services.deps import get_settings_service, get_storage_service, get_variable_service, session_scope STARTER_FOLDER_NAME = "Starter Projects" STARTER_FOLDER_DESCRIPTION = "Starter projects to help you get started in Langflow." @@ -74,10 +73,84 @@ def update_projects_components_with_latest_component_versions(project_data, all_ } ) node_data["template"][field_name][attr] = field_dict[attr] + # Remove fields that are not in the latest template + if node_data.get("display_name") != "Prompt": + for field_name in list(node_data["template"].keys()): + if field_name not in latest_template: + node_data["template"].pop(field_name) log_node_changes(node_changes_log) return project_data_copy +def update_edges_with_latest_component_versions(project_data): + edge_changes_log = defaultdict(list) + project_data_copy = deepcopy(project_data) + for edge in project_data_copy.get("edges", []): + source_handle = edge.get("data").get("sourceHandle") + target_handle = edge.get("data").get("targetHandle") + # Now find the source and target nodes in the nodes list + source_node = next( + (node for node in project_data.get("nodes", []) if node.get("id") == edge.get("source")), None + ) + target_node = next( + (node for node in project_data.get("nodes", []) if node.get("id") == edge.get("target")), None + ) + if source_node and target_node: + source_node_data = source_node.get("data").get("node") + target_node_data = target_node.get("data").get("node") + new_base_classes = source_node_data.get("base_classes") + if source_handle["baseClasses"] != new_base_classes: + edge_changes_log[source_node_data["display_name"]].append( + { + "attr": "baseClasses", + "old_value": source_handle["baseClasses"], + "new_value": new_base_classes, + } + ) + source_handle["baseClasses"] = new_base_classes + + field_name = target_handle.get("fieldName") + if field_name in target_node_data.get("template"): + if target_handle["inputTypes"] != target_node_data.get("template").get(field_name).get("input_types"): + edge_changes_log[target_node_data["display_name"]].append( + { + "attr": "inputTypes", + "old_value": target_handle["inputTypes"], + "new_value": target_node_data.get("template").get(field_name).get("input_types"), + } + ) + target_handle["inputTypes"] = target_node_data.get("template").get(field_name).get("input_types") + escaped_source_handle = escape_json_dump(source_handle) + escaped_target_handle = escape_json_dump(target_handle) + if edge["sourceHandle"] != escaped_source_handle: + edge_changes_log[source_node_data["display_name"]].append( + { + "attr": "sourceHandle", + "old_value": edge["sourceHandle"], + "new_value": escaped_source_handle, + } + ) + edge["sourceHandle"] = escaped_source_handle + if edge["targetHandle"] != escaped_target_handle: + edge_changes_log[target_node_data["display_name"]].append( + { + "attr": "targetHandle", + "old_value": edge["targetHandle"], + "new_value": escaped_target_handle, + } + ) + edge["targetHandle"] = escaped_target_handle + + else: + logger.error(f"Source or target node not found for edge: {edge}") + log_node_changes(edge_changes_log) + return project_data_copy + + +def escape_json_dump(edge_dict): + return json.dumps(edge_dict).replace('"', "œ") + + def log_node_changes(node_changes_log): # The idea here is to log the changes that were made to the nodes in debug # Something like: @@ -104,6 +177,25 @@ def load_starter_projects() -> list[tuple[Path, dict]]: return starter_projects +def copy_profile_pictures(): + config_dir = get_storage_service().settings_service.settings.config_dir + origin = Path(__file__).parent / "profile_pictures" + target = Path(config_dir) / "profile_pictures" + + if not os.path.exists(origin): + raise ValueError(f"The source folder '{origin}' does not exist.") + + if not os.path.exists(target): + os.makedirs(target) + + try: + shutil.copytree(origin, target, dirs_exist_ok=True) + logger.debug(f"Folder copied from '{origin}' to '{target}'") + + except Exception as e: + logger.error(f"Error copying the folder: {e}") + + def get_project_data(project): project_name = project.get("name") project_description = project.get("description") @@ -135,7 +227,7 @@ def get_project_data(project): def update_project_file(project_path, project, updated_project_data): project["data"] = updated_project_data with open(project_path, "w", encoding="utf-8") as f: - f.write(orjson.dumps(project, option=orjson.OPT_INDENT_2).decode()) + f.write(orjson.dumps(project, option=ORJSON_OPTIONS).decode()) logger.info(f"Updated starter project {project['name']} file") @@ -221,6 +313,7 @@ def _is_valid_uuid(val): return False return str(uuid_obj) == val + def load_flows_from_directory(): settings_service = get_settings_service() flows_path = settings_service.settings.load_flows_path @@ -262,6 +355,7 @@ def load_flows_from_directory(): session.add(flow) session.commit() + def find_existing_flow(session, flow_id, flow_endpoint_name): if flow_endpoint_name: stmt = select(Flow).where(Flow.endpoint_name == flow_endpoint_name) @@ -271,6 +365,8 @@ def find_existing_flow(session, flow_id, flow_endpoint_name): if existing := session.exec(stmt).first(): return existing return None + + def create_or_update_starter_projects(): components_paths = get_settings_service().settings.components_path try: @@ -282,6 +378,7 @@ def create_or_update_starter_projects(): new_folder = create_starter_folder(session) starter_projects = load_starter_projects() delete_start_projects(session, new_folder.id) + copy_profile_pictures() for project_path, project in starter_projects: ( project_name, @@ -295,6 +392,7 @@ def create_or_update_starter_projects(): updated_project_data = update_projects_components_with_latest_component_versions( project_data, all_types_dict ) + updated_project_data = update_edges_with_latest_component_versions(updated_project_data) if updated_project_data != project_data: project_data = updated_project_data # We also need to update the project data in the file diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json index 38e01111b..ca22d72ed 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json @@ -1,353 +1,251 @@ { - "id": "c091a57f-43a7-4a5e-b352-035ae8d8379c", "data": { + "edges": [ + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "Text", + "str" + ], + "dataType": "OpenAIModel", + "id": "OpenAIModel-k39HS" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-njtka", + "inputTypes": [ + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-k39HS{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-k39HSœ}-ChatOutput-njtka{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-njtkaœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-k39HS", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œTextœ, œstrœ], œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-k39HSœ}", + "style": { + "stroke": "#555" + }, + "target": "ChatOutput-njtka", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-njtkaœ, œinputTypesœ: [œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "str", + "Text" + ], + "dataType": "Prompt", + "id": "Prompt-uxBqP" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-k39HS", + "inputTypes": [ + "Text", + "Record", + "Prompt" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-uxBqP{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-uxBqPœ}-OpenAIModel-k39HS{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-k39HSœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "Prompt-uxBqP", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œstrœ, œTextœ], œdataTypeœ: œPromptœ, œidœ: œPrompt-uxBqPœ}", + "style": { + "stroke": "#555" + }, + "target": "OpenAIModel-k39HS", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-k39HSœ, œinputTypesœ: [œTextœ, œRecordœ, œPromptœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "Record", + "str", + "Text" + ], + "dataType": "ChatInput", + "id": "ChatInput-P3fgL" + }, + "targetHandle": { + "fieldName": "user_input", + "id": "Prompt-uxBqP", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-ChatInput-P3fgL{œbaseClassesœ:[œobjectœ,œRecordœ,œstrœ,œTextœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-P3fgLœ}-Prompt-uxBqP{œfieldNameœ:œuser_inputœ,œidœ:œPrompt-uxBqPœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "ChatInput-P3fgL", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œRecordœ, œstrœ, œTextœ], œdataTypeœ: œChatInputœ, œidœ: œChatInput-P3fgLœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-uxBqP", + "targetHandle": "{œfieldNameœ: œuser_inputœ, œidœ: œPrompt-uxBqPœ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + } + ], "nodes": [ { - "id": "Prompt-uxBqP", - "type": "genericNode", - "position": { - "x": 53.588791333410654, - "y": -107.07318910019967 - }, "data": { - "type": "Prompt", + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "id": "Prompt-uxBqP", "node": { + "base_classes": [ + "object", + "str", + "Text" + ], + "beta": false, + "custom_fields": { + "template": [ + "user_input" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "documentation": "", + "error": null, + "field_formatters": {}, + "field_order": [], + "frozen": false, + "full_path": null, + "icon": "prompts", + "is_composition": null, + "is_input": null, + "is_output": null, + "name": "", + "output_types": [ + "Prompt" + ], "template": { + "_type": "CustomComponent", "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", "advanced": true, "dynamic": true, + "fileTypes": [], + "file_path": "", "info": "", + "list": false, "load_from_db": false, - "title_case": false + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import CustomComponent\nfrom langflow.field_typing import TemplateField\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Prompt:\n prompt = await Prompt.from_template_and_variables(template, kwargs)\n self.status = prompt.format_text()\n return prompt\n" }, "template": { - "type": "prompt", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "Answer the user as if you were a pirate.\n\nUser: {user_input}\n\nAnswer: ", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "template", + "advanced": false, "display_name": "Template", - "advanced": false, - "input_types": ["Text"], "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent", - "user_input": { - "field_type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", "fileTypes": [], "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "template", "password": false, - "name": "user_input", - "display_name": "user_input", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "prompt", + "value": "Answer the user as if you were a pirate.\n\nUser: {user_input}\n\nAnswer: " + }, + "user_input": { "advanced": false, + "display_name": "user_input", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", "input_types": [ "Document", "BaseOutputParser", "Record", "Text" ], - "dynamic": false, - "info": "", + "list": false, "load_from_db": false, + "multiline": true, + "name": "user_input", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "type": "str" + "type": "str", + "value": "" } - }, - "description": "Create a prompt template with dynamic variables.", - "icon": "prompts", - "is_input": null, - "is_output": null, - "is_composition": null, - "base_classes": ["object", "str", "Text"], - "name": "", - "display_name": "Prompt", - "documentation": "", - "custom_fields": { - "template": ["user_input"] - }, - "output_types": ["Text"], - "full_path": null, - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false, - "error": null + } }, - "id": "Prompt-uxBqP", - "description": "Create a prompt template with dynamic variables.", - "display_name": "Prompt" + "type": "Prompt" }, - "selected": true, - "width": 384, - "height": 383, "dragging": false, + "height": 383, + "id": "Prompt-uxBqP", + "position": { + "x": 53.588791333410654, + "y": -107.07318910019967 + }, "positionAbsolute": { "x": 53.588791333410654, "y": -107.07318910019967 - } + }, + "selected": true, + "type": "genericNode", + "width": 384 }, { - "id": "OpenAIModel-k39HS", - "type": "genericNode", - "position": { - "x": 634.8148772766217, - "y": 27.035057029045305 - }, "data": { - "type": "OpenAIModel", + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "id": "OpenAIModel-k39HS", "node": { - "template": { - "input_value": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Input", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "max_tokens": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 256, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "max_tokens", - "display_name": "Max Tokens", - "advanced": true, - "dynamic": false, - "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", - "load_from_db": false, - "title_case": false - }, - "model_kwargs": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "model_kwargs", - "display_name": "Model Kwargs", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "model_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "gpt-3.5-turbo", - "fileTypes": [], - "file_path": "", - "password": false, - "options": [ - "gpt-4o", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125" - ], - "name": "model_name", - "display_name": "Model Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_base": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_api_base", - "display_name": "OpenAI API Base", - "advanced": true, - "dynamic": false, - "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_key": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_key", - "display_name": "OpenAI API Key", - "advanced": false, - "dynamic": false, - "info": "The OpenAI API Key to use for the OpenAI model.", - "load_from_db": true, - "title_case": false, - "input_types": ["Text"], - "value": "OPENAI_API_KEY" - }, - "stream": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "stream", - "display_name": "Stream", - "advanced": true, - "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "load_from_db": false, - "title_case": false - }, - "system_message": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "system_message", - "display_name": "System Message", - "advanced": true, - "dynamic": false, - "info": "System message to pass to the model.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "temperature": { - "type": "float", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 0.1, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "temperature", - "display_name": "Temperature", - "advanced": false, - "dynamic": false, - "info": "", - "rangeSpec": { - "step_type": "float", - "min": -1, - "max": 1, - "step": 0.1 - }, - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent" - }, - "description": "Generates text using OpenAI LLMs.", - "icon": "OpenAI", - "base_classes": ["object", "Text", "str"], - "display_name": "OpenAI", - "documentation": "", + "base_classes": [ + "object", + "Text", + "str" + ], + "beta": false, "custom_fields": { "input_value": null, - "openai_api_key": null, - "temperature": null, - "model_name": null, "max_tokens": null, "model_kwargs": null, + "model_name": null, "openai_api_base": null, + "openai_api_key": null, "stream": null, - "system_message": null + "system_message": null, + "temperature": null }, - "output_types": ["Text"], + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [ "max_tokens", "model_kwargs", @@ -359,432 +257,561 @@ "system_message", "stream" ], - "beta": false + "frozen": false, + "icon": "OpenAI", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Input", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text", + "Record", + "Prompt" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str" + }, + "max_tokens": { + "advanced": true, + "display_name": "Max Tokens", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "max_tokens", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 256 + }, + "model_kwargs": { + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "model_kwargs", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} + }, + "model_name": { + "advanced": false, + "display_name": "Model Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "model_name", + "options": [ + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "gpt-3.5-turbo" + }, + "openai_api_base": { + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_api_base", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_api_key": { + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The OpenAI API Key to use for the OpenAI model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": true, + "multiline": false, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "stream": { + "advanced": true, + "display_name": "Stream", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "stream", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": true + }, + "system_message": { + "advanced": true, + "display_name": "System Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "System message to pass to the model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "system_message", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "temperature": { + "advanced": false, + "display_name": "Temperature", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "temperature", + "password": false, + "placeholder": "", + "rangeSpec": { + "max": 1, + "min": -1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "type": "float", + "value": 0.1 + } + } }, - "id": "OpenAIModel-k39HS", - "description": "Generates text using OpenAI LLMs.", - "display_name": "OpenAI" + "type": "OpenAIModel" }, - "selected": false, - "width": 384, + "dragging": false, "height": 563, + "id": "OpenAIModel-k39HS", + "position": { + "x": 634.8148772766217, + "y": 27.035057029045305 + }, "positionAbsolute": { "x": 634.8148772766217, "y": 27.035057029045305 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "ChatOutput-njtka", - "type": "genericNode", - "position": { - "x": 1193.250417197867, - "y": 71.88476890163852 - }, "data": { - "type": "ChatOutput", + "id": "ChatOutput-njtka", "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "{text}", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "In case of Message being a Record, this template will be used to convert it to text.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Machine", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "AI", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": true, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" + "base_classes": [ + "Record", + "Text", + "str", + "object" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "record_template": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null }, "description": "Display a chat message in the Playground.", - "icon": "ChatOutput", - "base_classes": ["Record", "Text", "str", "object"], "display_name": "Chat Output", "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null, - "record_template": null - }, - "output_types": ["Text", "Record"], "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "ChatOutput", + "output_types": [ + "Message" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n files: Optional[list[str]] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n files=files,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Machine" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "AI" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + } + } }, - "id": "ChatOutput-njtka" + "type": "ChatOutput" }, - "selected": false, - "width": 384, + "dragging": false, "height": 383, + "id": "ChatOutput-njtka", + "position": { + "x": 1193.250417197867, + "y": 71.88476890163852 + }, "positionAbsolute": { "x": 1193.250417197867, "y": 71.88476890163852 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { + "data": { + "id": "ChatInput-P3fgL", + "node": { + "base_classes": [ + "object", + "Record", + "str", + "Text" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null + }, + "description": "Get chat inputs from the Playground.", + "display_name": "Chat Input", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "ChatInput", + "output_types": [ + "Message" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n files: Optional[list[str]] = None,\n session_id: Optional[str] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n files=files,\n session_id=session_id,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "hi" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "User" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "User" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + } + } + }, + "type": "ChatInput" + }, + "dragging": false, + "height": 375, "id": "ChatInput-P3fgL", - "type": "genericNode", "position": { "x": -495.2223093083827, "y": -232.56998443685862 }, - "data": { - "type": "ChatInput", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": [], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "value": "hi" - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "User", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "User", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": true, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Get chat inputs from the Playground.", - "icon": "ChatInput", - "base_classes": ["object", "Record", "str", "Text"], - "display_name": "Chat Input", - "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null - }, - "output_types": ["Text", "Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false - }, - "id": "ChatInput-P3fgL" - }, - "selected": false, - "width": 384, - "height": 375, "positionAbsolute": { "x": -495.2223093083827, "y": -232.56998443685862 }, - "dragging": false - } - ], - "edges": [ - { - "source": "OpenAIModel-k39HS", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-k39HSœ}", - "target": "ChatOutput-njtka", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-njtkaœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-njtka", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "Text", "str"], - "dataType": "OpenAIModel", - "id": "OpenAIModel-k39HS" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-OpenAIModel-k39HS{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-k39HSœ}-ChatOutput-njtka{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-njtkaœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "Prompt-uxBqP", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-uxBqPœ}", - "target": "OpenAIModel-k39HS", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-k39HSœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "OpenAIModel-k39HS", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "str", "Text"], - "dataType": "Prompt", - "id": "Prompt-uxBqP" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-Prompt-uxBqP{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-uxBqPœ}-OpenAIModel-k39HS{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-k39HSœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "ChatInput-P3fgL", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œRecordœ,œstrœ,œTextœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-P3fgLœ}", - "target": "Prompt-uxBqP", - "targetHandle": "{œfieldNameœ:œuser_inputœ,œidœ:œPrompt-uxBqPœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "user_input", - "id": "Prompt-uxBqP", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "Record", "str", "Text"], - "dataType": "ChatInput", - "id": "ChatInput-P3fgL" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-ChatInput-P3fgL{œbaseClassesœ:[œobjectœ,œRecordœ,œstrœ,œTextœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-P3fgLœ}-Prompt-uxBqP{œfieldNameœ:œuser_inputœ,œidœ:œPrompt-uxBqPœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}" + "selected": false, + "type": "genericNode", + "width": 384 } ], "viewport": { @@ -794,7 +821,8 @@ } }, "description": "This flow will get you experimenting with the basics of the UI, the Chat and the Prompt component. \n\nTry changing the Template in it to see how the model behaves. \nYou can change it to this and a Text Input into the `type_of_person` variable : \"Answer the user as if you were a pirate.\n\nUser: {user_input}\n\nAnswer: \" ", - "name": "Basic Prompting (Hello, World)", + "id": "c091a57f-43a7-4a5e-b352-035ae8d8379c", + "is_component": false, "last_tested_version": "1.0.0a4", - "is_component": false -} + "name": "Basic Prompting (Hello, World)" +} \ No newline at end of file diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writter.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writter.json index fcdef4056..4069fa6bd 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writter.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writter.json @@ -1,657 +1,603 @@ { - "id": "6ad5559d-fb66-4fdc-8f98-96f4ac12799d", "data": { + "edges": [ + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Record" + ], + "dataType": "URL", + "id": "URL-HYPkR" + }, + "targetHandle": { + "fieldName": "reference_2", + "id": "Prompt-Rse03", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-URL-HYPkR{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-HYPkRœ}-Prompt-Rse03{œfieldNameœ:œreference_2œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "selected": false, + "source": "URL-HYPkR", + "sourceHandle": "{œbaseClassesœ: [œRecordœ], œdataTypeœ: œURLœ, œidœ: œURL-HYPkRœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-Rse03", + "targetHandle": "{œfieldNameœ: œreference_2œ, œidœ: œPrompt-Rse03œ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "str", + "Text", + "object" + ], + "dataType": "OpenAIModel", + "id": "OpenAIModel-gi29P" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-JPlxl", + "inputTypes": [ + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-gi29P{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-gi29Pœ}-ChatOutput-JPlxl{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-JPlxlœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-gi29P", + "sourceHandle": "{œbaseClassesœ: [œstrœ, œTextœ, œobjectœ], œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-gi29Pœ}", + "style": { + "stroke": "#555" + }, + "target": "ChatOutput-JPlxl", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-JPlxlœ, œinputTypesœ: [œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Record" + ], + "dataType": "URL", + "id": "URL-2cX90" + }, + "targetHandle": { + "fieldName": "reference_1", + "id": "Prompt-Rse03", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-URL-2cX90{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-2cX90œ}-Prompt-Rse03{œfieldNameœ:œreference_1œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "URL-2cX90", + "sourceHandle": "{œbaseClassesœ: [œRecordœ], œdataTypeœ: œURLœ, œidœ: œURL-2cX90œ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-Rse03", + "targetHandle": "{œfieldNameœ: œreference_1œ, œidœ: œPrompt-Rse03œ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "Text", + "str" + ], + "dataType": "TextInput", + "id": "TextInput-og8Or" + }, + "targetHandle": { + "fieldName": "instructions", + "id": "Prompt-Rse03", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-TextInput-og8Or{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-og8Orœ}-Prompt-Rse03{œfieldNameœ:œinstructionsœ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "TextInput-og8Or", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œTextœ, œstrœ], œdataTypeœ: œTextInputœ, œidœ: œTextInput-og8Orœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-Rse03", + "targetHandle": "{œfieldNameœ: œinstructionsœ, œidœ: œPrompt-Rse03œ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "Text", + "str" + ], + "dataType": "Prompt", + "id": "Prompt-Rse03" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-gi29P", + "inputTypes": [ + "Text", + "Record", + "Prompt" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-Rse03{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-Rse03œ}-OpenAIModel-gi29P{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-gi29Pœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "selected": false, + "source": "Prompt-Rse03", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œTextœ, œstrœ], œdataTypeœ: œPromptœ, œidœ: œPrompt-Rse03œ}", + "style": { + "stroke": "#555" + }, + "target": "OpenAIModel-gi29P", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-gi29Pœ, œinputTypesœ: [œTextœ, œRecordœ, œPromptœ], œtypeœ: œstrœ}" + } + ], "nodes": [ { + "data": { + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "id": "Prompt-Rse03", + "node": { + "base_classes": [ + "object", + "Text", + "str" + ], + "beta": false, + "custom_fields": { + "template": [ + "reference_1", + "reference_2", + "instructions" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "documentation": "", + "error": null, + "field_formatters": {}, + "field_order": [], + "frozen": false, + "full_path": null, + "icon": "prompts", + "is_composition": null, + "is_input": null, + "is_output": null, + "name": "", + "output_types": [ + "Prompt" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import CustomComponent\nfrom langflow.field_typing import TemplateField\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Prompt:\n prompt = await Prompt.from_template_and_variables(template, kwargs)\n self.status = prompt.format_text()\n return prompt\n" + }, + "instructions": { + "advanced": false, + "display_name": "instructions", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "instructions", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "reference_1": { + "advanced": false, + "display_name": "reference_1", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "reference_1", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "reference_2": { + "advanced": false, + "display_name": "reference_2", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "reference_2", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "template": { + "advanced": false, + "display_name": "Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "prompt", + "value": "Reference 1:\n\n{reference_1}\n\n---\n\nReference 2:\n\n{reference_2}\n\n---\n\n{instructions}\n\nBlog: \n\n\n" + } + } + }, + "type": "Prompt" + }, + "dragging": false, + "height": 571, "id": "Prompt-Rse03", - "type": "genericNode", "position": { "x": 1331.381712783371, "y": 535.0279854229713 }, - "data": { - "type": "Prompt", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "template": { - "type": "prompt", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "Reference 1:\n\n{reference_1}\n\n---\n\nReference 2:\n\n{reference_2}\n\n---\n\n{instructions}\n\nBlog: \n\n\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "template", - "display_name": "Template", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent", - "reference_1": { - "field_type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "reference_1", - "display_name": "reference_1", - "advanced": false, - "input_types": [ - "Document", - "BaseOutputParser", - "Record", - "Text" - ], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "type": "str" - }, - "reference_2": { - "field_type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "reference_2", - "display_name": "reference_2", - "advanced": false, - "input_types": [ - "Document", - "BaseOutputParser", - "Record", - "Text" - ], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "type": "str" - }, - "instructions": { - "field_type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "instructions", - "display_name": "instructions", - "advanced": false, - "input_types": [ - "Document", - "BaseOutputParser", - "Record", - "Text" - ], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "type": "str" - } - }, - "description": "Create a prompt template with dynamic variables.", - "icon": "prompts", - "is_input": null, - "is_output": null, - "is_composition": null, - "base_classes": ["object", "Text", "str"], - "name": "", - "display_name": "Prompt", - "documentation": "", - "custom_fields": { - "template": ["reference_1", "reference_2", "instructions"] - }, - "output_types": ["Text"], - "full_path": null, - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false, - "error": null - }, - "id": "Prompt-Rse03", - "description": "Create a prompt template with dynamic variables.", - "display_name": "Prompt" - }, - "selected": false, - "width": 384, - "height": 571, - "dragging": false, "positionAbsolute": { "x": 1331.381712783371, "y": 535.0279854229713 - } + }, + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "URL-HYPkR", - "type": "genericNode", - "position": { - "x": 568.2971412887712, - "y": 700.9983368007821 - }, "data": { - "type": "URL", + "id": "URL-HYPkR", "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Any, Dict\n\nfrom langchain_community.document_loaders.web_base import WebBaseLoader\n\nfrom langflow.custom import CustomComponent\nfrom langflow.schema import Record\n\n\nclass URLComponent(CustomComponent):\n display_name = \"URL\"\n description = \"Fetch content from one or more URLs.\"\n icon = \"layout-template\"\n\n def build_config(self) -> Dict[str, Any]:\n return {\n \"urls\": {\"display_name\": \"URL\"},\n }\n\n def build(\n self,\n urls: list[str],\n ) -> list[Record]:\n loader = WebBaseLoader(web_paths=urls)\n docs = loader.load()\n records = self.to_records(docs)\n self.status = records\n return records\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "urls": { - "type": "str", - "required": true, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "urls", - "display_name": "URL", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"], - "value": [ - "https://www.promptingguide.ai/techniques/prompt_chaining" - ] - }, - "_type": "CustomComponent" - }, - "description": "Fetch content from one or more URLs.", - "icon": "layout-template", - "base_classes": ["Record"], - "display_name": "URL", - "documentation": "", + "base_classes": [ + "Record" + ], + "beta": false, "custom_fields": { "urls": null }, - "output_types": ["Record"], + "description": "Fetch content from one or more URLs.", + "display_name": "URL", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "layout-template", + "output_types": [ + "Record" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any, Dict\n\nfrom langchain_community.document_loaders.web_base import WebBaseLoader\n\nfrom langflow.custom import CustomComponent\nfrom langflow.schema import Record\n\n\nclass URLComponent(CustomComponent):\n display_name = \"URL\"\n description = \"Fetch content from one or more URLs.\"\n icon = \"layout-template\"\n\n def build_config(self) -> Dict[str, Any]:\n return {\n \"urls\": {\"display_name\": \"URL\"},\n }\n\n def build(\n self,\n urls: list[str],\n ) -> list[Record]:\n loader = WebBaseLoader(web_paths=urls)\n docs = loader.load()\n records = self.to_records(docs)\n self.status = records\n return records\n" + }, + "urls": { + "advanced": false, + "display_name": "URL", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "urls", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": [ + "https://www.promptingguide.ai/techniques/prompt_chaining" + ] + } + } }, - "id": "URL-HYPkR" + "type": "URL" }, - "selected": false, - "width": 384, + "dragging": false, "height": 281, + "id": "URL-HYPkR", + "position": { + "x": 568.2971412887712, + "y": 700.9983368007821 + }, "positionAbsolute": { "x": 568.2971412887712, "y": 700.9983368007821 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { + "data": { + "id": "ChatOutput-JPlxl", + "node": { + "base_classes": [ + "Text", + "Record", + "object", + "str" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "record_template": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null + }, + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "ChatOutput", + "output_types": [ + "Message" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n files: Optional[list[str]] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n files=files,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Machine" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "AI" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + } + } + }, + "type": "ChatOutput" + }, + "height": 383, "id": "ChatOutput-JPlxl", - "type": "genericNode", "position": { "x": 2503.8617424688505, "y": 789.3005578928434 }, - "data": { - "type": "ChatOutput", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "{text}", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "In case of Message being a Record, this template will be used to convert it to text.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Machine", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "AI", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": true, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Display a chat message in the Playground.", - "icon": "ChatOutput", - "base_classes": ["Text", "Record", "object", "str"], - "display_name": "Chat Output", - "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null, - "record_template": null - }, - "output_types": ["Text", "Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false - }, - "id": "ChatOutput-JPlxl" - }, "selected": false, - "width": 384, - "height": 383 + "type": "genericNode", + "width": 384 }, { - "id": "OpenAIModel-gi29P", - "type": "genericNode", - "position": { - "x": 1917.7089968570963, - "y": 575.9186499244129 - }, "data": { - "type": "OpenAIModel", + "id": "OpenAIModel-gi29P", "node": { - "template": { - "input_value": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Input", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "max_tokens": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "1024", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "max_tokens", - "display_name": "Max Tokens", - "advanced": true, - "dynamic": false, - "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", - "load_from_db": false, - "title_case": false - }, - "model_kwargs": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "model_kwargs", - "display_name": "Model Kwargs", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "model_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "gpt-3.5-turbo-0125", - "fileTypes": [], - "file_path": "", - "password": false, - "options": [ - "gpt-4o", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125" - ], - "name": "model_name", - "display_name": "Model Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_base": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_api_base", - "display_name": "OpenAI API Base", - "advanced": true, - "dynamic": false, - "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_key": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_key", - "display_name": "OpenAI API Key", - "advanced": false, - "dynamic": false, - "info": "The OpenAI API Key to use for the OpenAI model.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"], - "value": "OPENAI_API_KEY" - }, - "stream": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "stream", - "display_name": "Stream", - "advanced": true, - "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "load_from_db": false, - "title_case": false - }, - "system_message": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "system_message", - "display_name": "System Message", - "advanced": true, - "dynamic": false, - "info": "System message to pass to the model.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "temperature": { - "type": "float", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "0.1", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "temperature", - "display_name": "Temperature", - "advanced": false, - "dynamic": false, - "info": "", - "rangeSpec": { - "step_type": "float", - "min": -1, - "max": 1, - "step": 0.1 - }, - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent" - }, - "description": "Generates text using OpenAI LLMs.", - "icon": "OpenAI", - "base_classes": ["str", "Text", "object"], - "display_name": "OpenAI", - "documentation": "", + "base_classes": [ + "str", + "Text", + "object" + ], + "beta": false, "custom_fields": { "input_value": null, - "openai_api_key": null, - "temperature": null, - "model_name": null, "max_tokens": null, "model_kwargs": null, + "model_name": null, "openai_api_base": null, + "openai_api_key": null, "stream": null, - "system_message": null + "system_message": null, + "temperature": null }, - "output_types": ["Text"], + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [ "max_tokens", "model_kwargs", @@ -663,315 +609,439 @@ "system_message", "stream" ], - "beta": false + "frozen": false, + "icon": "OpenAI", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Input", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text", + "Record", + "Prompt" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str" + }, + "max_tokens": { + "advanced": true, + "display_name": "Max Tokens", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "max_tokens", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": "1024" + }, + "model_kwargs": { + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "model_kwargs", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} + }, + "model_name": { + "advanced": false, + "display_name": "Model Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "model_name", + "options": [ + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "gpt-3.5-turbo-0125" + }, + "openai_api_base": { + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_api_base", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_api_key": { + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The OpenAI API Key to use for the OpenAI model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": true, + "multiline": false, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "stream": { + "advanced": true, + "display_name": "Stream", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "stream", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": true + }, + "system_message": { + "advanced": true, + "display_name": "System Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "System message to pass to the model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "system_message", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "temperature": { + "advanced": false, + "display_name": "Temperature", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "temperature", + "password": false, + "placeholder": "", + "rangeSpec": { + "max": 1, + "min": -1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "type": "float", + "value": "0.1" + } + } }, - "id": "OpenAIModel-gi29P" + "type": "OpenAIModel" }, - "selected": false, - "width": 384, + "dragging": false, "height": 563, + "id": "OpenAIModel-gi29P", + "position": { + "x": 1917.7089968570963, + "y": 575.9186499244129 + }, "positionAbsolute": { "x": 1917.7089968570963, "y": 575.9186499244129 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { + "data": { + "id": "URL-2cX90", + "node": { + "base_classes": [ + "Record" + ], + "beta": false, + "custom_fields": { + "urls": null + }, + "description": "Fetch content from one or more URLs.", + "display_name": "URL", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "layout-template", + "output_types": [ + "Record" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any, Dict\n\nfrom langchain_community.document_loaders.web_base import WebBaseLoader\n\nfrom langflow.custom import CustomComponent\nfrom langflow.schema import Record\n\n\nclass URLComponent(CustomComponent):\n display_name = \"URL\"\n description = \"Fetch content from one or more URLs.\"\n icon = \"layout-template\"\n\n def build_config(self) -> Dict[str, Any]:\n return {\n \"urls\": {\"display_name\": \"URL\"},\n }\n\n def build(\n self,\n urls: list[str],\n ) -> list[Record]:\n loader = WebBaseLoader(web_paths=urls)\n docs = loader.load()\n records = self.to_records(docs)\n self.status = records\n return records\n" + }, + "urls": { + "advanced": false, + "display_name": "URL", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "urls", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": [ + "https://www.promptingguide.ai/introduction/basics" + ] + } + } + }, + "type": "URL" + }, + "dragging": false, + "height": 281, "id": "URL-2cX90", - "type": "genericNode", "position": { "x": 573.961301764604, "y": 336.41463436122086 }, - "data": { - "type": "URL", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Any, Dict\n\nfrom langchain_community.document_loaders.web_base import WebBaseLoader\n\nfrom langflow.custom import CustomComponent\nfrom langflow.schema import Record\n\n\nclass URLComponent(CustomComponent):\n display_name = \"URL\"\n description = \"Fetch content from one or more URLs.\"\n icon = \"layout-template\"\n\n def build_config(self) -> Dict[str, Any]:\n return {\n \"urls\": {\"display_name\": \"URL\"},\n }\n\n def build(\n self,\n urls: list[str],\n ) -> list[Record]:\n loader = WebBaseLoader(web_paths=urls)\n docs = loader.load()\n records = self.to_records(docs)\n self.status = records\n return records\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "urls": { - "type": "str", - "required": true, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "urls", - "display_name": "URL", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"], - "value": ["https://www.promptingguide.ai/introduction/basics"] - }, - "_type": "CustomComponent" - }, - "description": "Fetch content from one or more URLs.", - "icon": "layout-template", - "base_classes": ["Record"], - "display_name": "URL", - "documentation": "", - "custom_fields": { - "urls": null - }, - "output_types": ["Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false - }, - "id": "URL-2cX90" - }, - "selected": false, - "width": 384, - "height": 281, "positionAbsolute": { "x": 573.961301764604, "y": 336.41463436122086 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "TextInput-og8Or", - "type": "genericNode", - "position": { - "x": 569.9387927203336, - "y": 1095.3352160671316 - }, "data": { - "type": "TextInput", + "id": "TextInput-og8Or", "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextInput(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as input.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Optional[str] = \"\",\n record_template: Optional[str] = \"\",\n ) -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "Use the references above for style to write a new blog/tutorial about prompt engineering techniques. Suggest non-covered topics.", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Value", - "advanced": false, - "input_types": ["Record", "Text"], - "dynamic": false, - "info": "Text or Record to be passed as input.", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Get text inputs from the Playground.", - "icon": "type", - "base_classes": ["object", "Text", "str"], - "display_name": "Instructions", - "documentation": "", + "base_classes": [ + "object", + "Text", + "str" + ], + "beta": false, "custom_fields": { "input_value": null, "record_template": null }, - "output_types": ["Text"], + "description": "Get text inputs from the Playground.", + "display_name": "Instructions", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "type", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextInput(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as input.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Optional[str] = \"\",\n record_template: Optional[str] = \"\",\n ) -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Value", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Text or Record to be passed as input.", + "input_types": [ + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Use the references above for style to write a new blog/tutorial about prompt engineering techniques. Suggest non-covered topics." + }, + "record_template": { + "advanced": true, + "display_name": "Record Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "record_template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + } + } }, - "id": "TextInput-og8Or" + "type": "TextInput" }, - "selected": false, - "width": 384, + "dragging": false, "height": 289, + "id": "TextInput-og8Or", + "position": { + "x": 569.9387927203336, + "y": 1095.3352160671316 + }, "positionAbsolute": { "x": 569.9387927203336, "y": 1095.3352160671316 }, - "dragging": false - } - ], - "edges": [ - { - "source": "URL-HYPkR", - "target": "Prompt-Rse03", - "sourceHandle": "{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-HYPkRœ}", - "targetHandle": "{œfieldNameœ:œreference_2œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "id": "reactflow__edge-URL-HYPkR{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-HYPkRœ}-Prompt-Rse03{œfieldNameœ:œreference_2œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "reference_2", - "id": "Prompt-Rse03", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["Record"], - "dataType": "URL", - "id": "URL-HYPkR" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "selected": false - }, - { - "source": "OpenAIModel-gi29P", - "sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-gi29Pœ}", - "target": "ChatOutput-JPlxl", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-JPlxlœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-JPlxl", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["str", "Text", "object"], - "dataType": "OpenAIModel", - "id": "OpenAIModel-gi29P" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-OpenAIModel-gi29P{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-gi29Pœ}-ChatOutput-JPlxl{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-JPlxlœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "URL-2cX90", - "sourceHandle": "{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-2cX90œ}", - "target": "Prompt-Rse03", - "targetHandle": "{œfieldNameœ:œreference_1œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "reference_1", - "id": "Prompt-Rse03", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["Record"], - "dataType": "URL", - "id": "URL-2cX90" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-URL-2cX90{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-2cX90œ}-Prompt-Rse03{œfieldNameœ:œreference_1œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "TextInput-og8Or", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-og8Orœ}", - "target": "Prompt-Rse03", - "targetHandle": "{œfieldNameœ:œinstructionsœ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "instructions", - "id": "Prompt-Rse03", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "Text", "str"], - "dataType": "TextInput", - "id": "TextInput-og8Or" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-TextInput-og8Or{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-og8Orœ}-Prompt-Rse03{œfieldNameœ:œinstructionsœ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "Prompt-Rse03", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-Rse03œ}", - "target": "OpenAIModel-gi29P", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-gi29Pœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "OpenAIModel-gi29P", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "Text", "str"], - "dataType": "Prompt", - "id": "Prompt-Rse03" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-Prompt-Rse03{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-Rse03œ}-OpenAIModel-gi29P{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-gi29Pœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "selected": false + "selected": false, + "type": "genericNode", + "width": 384 } ], "viewport": { @@ -981,7 +1051,8 @@ } }, "description": "This flow can be used to create a blog post following instructions from the user, using two other blogs as reference.", - "name": "Blog Writer", + "id": "6ad5559d-fb66-4fdc-8f98-96f4ac12799d", + "is_component": false, "last_tested_version": "1.0.0a0", - "is_component": false -} + "name": "Blog Writer" +} \ No newline at end of file diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json index 5fa8547a1..ecbcd04d7 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json @@ -1,161 +1,327 @@ { - "id": "fecbce42-6f11-454c-8ab2-db6eddbbbb0f", "data": { + "edges": [ + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "str", + "Record", + "Text", + "object" + ], + "dataType": "ChatInput", + "id": "ChatInput-MsSJ9" + }, + "targetHandle": { + "fieldName": "Question", + "id": "Prompt-tHwPf", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-ChatInput-MsSJ9{œbaseClassesœ:[œstrœ,œRecordœ,œTextœ,œobjectœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-MsSJ9œ}-Prompt-tHwPf{œfieldNameœ:œQuestionœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "ChatInput-MsSJ9", + "sourceHandle": "{œbaseClassesœ: [œstrœ, œRecordœ, œTextœ, œobjectœ], œdataTypeœ: œChatInputœ, œidœ: œChatInput-MsSJ9œ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-tHwPf", + "targetHandle": "{œfieldNameœ: œQuestionœ, œidœ: œPrompt-tHwPfœ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Record" + ], + "dataType": "File", + "id": "File-6TEsD" + }, + "targetHandle": { + "fieldName": "Document", + "id": "Prompt-tHwPf", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-File-6TEsD{œbaseClassesœ:[œRecordœ],œdataTypeœ:œFileœ,œidœ:œFile-6TEsDœ}-Prompt-tHwPf{œfieldNameœ:œDocumentœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "File-6TEsD", + "sourceHandle": "{œbaseClassesœ: [œRecordœ], œdataTypeœ: œFileœ, œidœ: œFile-6TEsDœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-tHwPf", + "targetHandle": "{œfieldNameœ: œDocumentœ, œidœ: œPrompt-tHwPfœ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "str", + "Text" + ], + "dataType": "Prompt", + "id": "Prompt-tHwPf" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-Bt067", + "inputTypes": [ + "Text", + "Record", + "Prompt" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-tHwPf{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-tHwPfœ}-OpenAIModel-Bt067{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-Bt067œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "Prompt-tHwPf", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œstrœ, œTextœ], œdataTypeœ: œPromptœ, œidœ: œPrompt-tHwPfœ}", + "style": { + "stroke": "#555" + }, + "target": "OpenAIModel-Bt067", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-Bt067œ, œinputTypesœ: [œTextœ, œRecordœ, œPromptœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "str", + "Text" + ], + "dataType": "OpenAIModel", + "id": "OpenAIModel-Bt067" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-F5Awj", + "inputTypes": [ + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-Bt067{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-Bt067œ}-ChatOutput-F5Awj{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-F5Awjœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-Bt067", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œstrœ, œTextœ], œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-Bt067œ}", + "style": { + "stroke": "#555" + }, + "target": "ChatOutput-F5Awj", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-F5Awjœ, œinputTypesœ: [œTextœ], œtypeœ: œstrœ}" + } + ], "nodes": [ { + "data": { + "description": "A component for creating prompt templates using dynamic variables.", + "display_name": "Prompt", + "id": "Prompt-tHwPf", + "node": { + "base_classes": [ + "object", + "str", + "Text" + ], + "beta": false, + "custom_fields": { + "template": [ + "Document", + "Question" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "documentation": "", + "error": null, + "field_formatters": {}, + "field_order": [], + "frozen": false, + "full_path": null, + "icon": "prompts", + "is_composition": null, + "is_input": null, + "is_output": null, + "name": "", + "output_types": [ + "Prompt" + ], + "template": { + "Document": { + "advanced": false, + "display_name": "Document", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "Document", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "Question": { + "advanced": false, + "display_name": "Question", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "Question", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import CustomComponent\nfrom langflow.field_typing import TemplateField\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Prompt:\n prompt = await Prompt.from_template_and_variables(template, kwargs)\n self.status = prompt.format_text()\n return prompt\n" + }, + "template": { + "advanced": false, + "display_name": "Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "prompt", + "value": "Answer user's questions based on the document below:\n\n---\n\n{Document}\n\n---\n\nQuestion:\n{Question}\n\nAnswer:\n" + } + } + }, + "type": "Prompt" + }, + "dragging": false, + "height": 479, "id": "Prompt-tHwPf", - "type": "genericNode", "position": { "x": 585.7906101139403, "y": 117.52115876762832 }, - "data": { - "type": "Prompt", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "template": { - "type": "prompt", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "Answer user's questions based on the document below:\n\n---\n\n{Document}\n\n---\n\nQuestion:\n{Question}\n\nAnswer:\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "template", - "display_name": "Template", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent", - "Document": { - "field_type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "Document", - "display_name": "Document", - "advanced": false, - "input_types": [ - "Document", - "BaseOutputParser", - "Record", - "Text" - ], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "type": "str" - }, - "Question": { - "field_type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "Question", - "display_name": "Question", - "advanced": false, - "input_types": [ - "Document", - "BaseOutputParser", - "Record", - "Text" - ], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "type": "str" - } - }, - "description": "Create a prompt template with dynamic variables.", - "icon": "prompts", - "is_input": null, - "is_output": null, - "is_composition": null, - "base_classes": ["object", "str", "Text"], - "name": "", - "display_name": "Prompt", - "documentation": "", - "custom_fields": { - "template": ["Document", "Question"] - }, - "output_types": ["Text"], - "full_path": null, - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false, - "error": null - }, - "id": "Prompt-tHwPf", - "description": "A component for creating prompt templates using dynamic variables.", - "display_name": "Prompt" - }, - "selected": false, - "width": 384, - "height": 479, "positionAbsolute": { "x": 585.7906101139403, "y": 117.52115876762832 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "File-6TEsD", - "type": "genericNode", - "position": { - "x": -18.636536329280602, - "y": 3.951948774836353 - }, "data": { - "type": "File", + "id": "File-6TEsD", "node": { + "base_classes": [ + "Record" + ], + "beta": false, + "custom_fields": { + "path": null, + "silent_errors": null + }, + "description": "A generic file loader.", + "display_name": "Files", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "output_types": [ + "Record" + ], "template": { - "path": { - "type": "file", - "required": true, - "placeholder": "", + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, "show": true, - "multiline": false, + "title_case": false, + "type": "code", + "value": "from pathlib import Path\nfrom typing import Any, Dict\n\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parse_text_file_to_record\nfrom langflow.interface.custom.custom_component import CustomComponent\nfrom langflow.schema import Record\n\n\nclass FileComponent(CustomComponent):\n display_name = \"Files\"\n description = \"A generic file loader.\"\n\n def build_config(self) -> Dict[str, Any]:\n return {\n \"path\": {\n \"display_name\": \"Path\",\n \"field_type\": \"file\",\n \"file_types\": TEXT_FILE_TYPES,\n \"info\": f\"Supported file types: {', '.join(TEXT_FILE_TYPES)}\",\n },\n \"silent_errors\": {\n \"display_name\": \"Silent Errors\",\n \"advanced\": True,\n \"info\": \"If true, errors will not raise an exception.\",\n },\n }\n\n def load_file(self, path: str, silent_errors: bool = False) -> Record:\n resolved_path = self.resolve_path(path)\n path_obj = Path(resolved_path)\n extension = path_obj.suffix[1:].lower()\n if extension == \"doc\":\n raise ValueError(\"doc files are not supported. Please save as .docx\")\n if extension not in TEXT_FILE_TYPES:\n raise ValueError(f\"Unsupported file type: {extension}\")\n record = parse_text_file_to_record(resolved_path, silent_errors)\n self.status = record if record else \"No data\"\n return record or Record()\n\n def build(\n self,\n path: str,\n silent_errors: bool = False,\n ) -> Record:\n record = self.load_file(path, silent_errors)\n self.status = record\n return record\n" + }, + "path": { + "advanced": false, + "display_name": "Path", + "dynamic": false, "fileTypes": [ ".txt", ".md", @@ -170,633 +336,395 @@ ".pdf", ".docx" ], - "password": false, - "name": "path", - "display_name": "Path", - "advanced": false, - "dynamic": false, "info": "Supported file types: txt, md, mdx, csv, json, yaml, yml, xml, html, htm, pdf, docx", + "list": false, "load_from_db": false, + "multiline": false, + "name": "path", + "password": false, + "placeholder": "", + "required": true, + "show": true, "title_case": false, + "type": "file", "value": "" }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from pathlib import Path\nfrom typing import Any, Dict\n\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parse_text_file_to_record\nfrom langflow.interface.custom.custom_component import CustomComponent\nfrom langflow.schema import Record\n\n\nclass FileComponent(CustomComponent):\n display_name = \"Files\"\n description = \"A generic file loader.\"\n\n def build_config(self) -> Dict[str, Any]:\n return {\n \"path\": {\n \"display_name\": \"Path\",\n \"field_type\": \"file\",\n \"file_types\": TEXT_FILE_TYPES,\n \"info\": f\"Supported file types: {', '.join(TEXT_FILE_TYPES)}\",\n },\n \"silent_errors\": {\n \"display_name\": \"Silent Errors\",\n \"advanced\": True,\n \"info\": \"If true, errors will not raise an exception.\",\n },\n }\n\n def load_file(self, path: str, silent_errors: bool = False) -> Record:\n resolved_path = self.resolve_path(path)\n path_obj = Path(resolved_path)\n extension = path_obj.suffix[1:].lower()\n if extension == \"doc\":\n raise ValueError(\"doc files are not supported. Please save as .docx\")\n if extension not in TEXT_FILE_TYPES:\n raise ValueError(f\"Unsupported file type: {extension}\")\n record = parse_text_file_to_record(resolved_path, silent_errors)\n self.status = record if record else \"No data\"\n return record or Record()\n\n def build(\n self,\n path: str,\n silent_errors: bool = False,\n ) -> Record:\n record = self.load_file(path, silent_errors)\n self.status = record\n return record\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, "silent_errors": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, + "advanced": true, + "display_name": "Silent Errors", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "silent_errors", - "display_name": "Silent Errors", - "advanced": true, - "dynamic": false, "info": "If true, errors will not raise an exception.", + "list": false, "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent" - }, - "description": "A generic file loader.", - "base_classes": ["Record"], - "display_name": "Files", - "documentation": "", - "custom_fields": { - "path": null, - "silent_errors": null - }, - "output_types": ["Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false + "multiline": false, + "name": "silent_errors", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + } + } }, - "id": "File-6TEsD" + "type": "File" }, - "selected": false, - "width": 384, + "dragging": false, "height": 282, + "id": "File-6TEsD", + "position": { + "x": -18.636536329280602, + "y": 3.951948774836353 + }, "positionAbsolute": { "x": -18.636536329280602, "y": 3.951948774836353 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "ChatInput-MsSJ9", - "type": "genericNode", - "position": { - "x": -28.80036300619821, - "y": 379.81180230285355 - }, "data": { - "type": "ChatInput", + "id": "ChatInput-MsSJ9", "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": [], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "value": "" - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "User", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "User", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": true, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" + "base_classes": [ + "str", + "Record", + "Text", + "object" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null }, "description": "Get chat inputs from the Playground.", - "icon": "ChatInput", - "base_classes": ["str", "Record", "Text", "object"], "display_name": "Chat Input", "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null - }, - "output_types": ["Text", "Record"], "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "ChatInput", + "output_types": [ + "Message" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n files: Optional[list[str]] = None,\n session_id: Optional[str] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n files=files,\n session_id=session_id,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "User" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "User" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + } + } }, - "id": "ChatInput-MsSJ9" + "type": "ChatInput" }, - "selected": true, - "width": 384, + "dragging": false, "height": 377, + "id": "ChatInput-MsSJ9", + "position": { + "x": -28.80036300619821, + "y": 379.81180230285355 + }, "positionAbsolute": { "x": -28.80036300619821, "y": 379.81180230285355 }, - "dragging": false + "selected": true, + "type": "genericNode", + "width": 384 }, { + "data": { + "id": "ChatOutput-F5Awj", + "node": { + "base_classes": [ + "str", + "Record", + "Text", + "object" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null + }, + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "ChatOutput", + "output_types": [ + "Message" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n files: Optional[list[str]] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n files=files,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Machine" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "AI" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + } + } + }, + "type": "ChatOutput" + }, + "dragging": false, + "height": 385, "id": "ChatOutput-F5Awj", - "type": "genericNode", "position": { "x": 1733.3012915204283, "y": 168.76098809939327 }, - "data": { - "type": "ChatOutput", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Machine", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "AI", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": true, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Display a chat message in the Playground.", - "icon": "ChatOutput", - "base_classes": ["str", "Record", "Text", "object"], - "display_name": "Chat Output", - "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null - }, - "output_types": ["Text", "Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false - }, - "id": "ChatOutput-F5Awj" - }, - "selected": false, - "width": 384, - "height": 385, "positionAbsolute": { "x": 1733.3012915204283, "y": 168.76098809939327 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "OpenAIModel-Bt067", - "type": "genericNode", - "position": { - "x": 1137.6078582863759, - "y": -14.41920034020356 - }, "data": { - "type": "OpenAIModel", + "id": "OpenAIModel-Bt067", "node": { - "template": { - "input_value": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Input", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "max_tokens": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 256, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "max_tokens", - "display_name": "Max Tokens", - "advanced": true, - "dynamic": false, - "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", - "load_from_db": false, - "title_case": false - }, - "model_kwargs": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "model_kwargs", - "display_name": "Model Kwargs", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "model_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "gpt-4-turbo-preview", - "fileTypes": [], - "file_path": "", - "password": false, - "options": [ - "gpt-4o", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125" - ], - "name": "model_name", - "display_name": "Model Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_base": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_api_base", - "display_name": "OpenAI API Base", - "advanced": true, - "dynamic": false, - "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_key": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_key", - "display_name": "OpenAI API Key", - "advanced": false, - "dynamic": false, - "info": "The OpenAI API Key to use for the OpenAI model.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"], - "value": "OPENAI_API_KEY" - }, - "stream": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "stream", - "display_name": "Stream", - "advanced": false, - "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "load_from_db": false, - "title_case": false - }, - "system_message": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "system_message", - "display_name": "System Message", - "advanced": true, - "dynamic": false, - "info": "System message to pass to the model.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "temperature": { - "type": "float", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 0.1, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "temperature", - "display_name": "Temperature", - "advanced": false, - "dynamic": false, - "info": "", - "rangeSpec": { - "step_type": "float", - "min": -1, - "max": 1, - "step": 0.1 - }, - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent" - }, - "description": "Generates text using OpenAI LLMs.", - "icon": "OpenAI", - "base_classes": ["object", "str", "Text"], - "display_name": "OpenAI", - "documentation": "", + "base_classes": [ + "object", + "str", + "Text" + ], + "beta": false, "custom_fields": { "input_value": null, - "openai_api_key": null, - "temperature": null, - "model_name": null, "max_tokens": null, "model_kwargs": null, + "model_name": null, "openai_api_base": null, + "openai_api_key": null, "stream": null, - "system_message": null + "system_message": null, + "temperature": null }, - "output_types": ["Text"], + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [ "max_tokens", "model_kwargs", @@ -808,116 +736,247 @@ "system_message", "stream" ], - "beta": false + "frozen": false, + "icon": "OpenAI", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Input", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text", + "Record", + "Prompt" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str" + }, + "max_tokens": { + "advanced": true, + "display_name": "Max Tokens", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "max_tokens", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 256 + }, + "model_kwargs": { + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "model_kwargs", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} + }, + "model_name": { + "advanced": false, + "display_name": "Model Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "model_name", + "options": [ + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "gpt-4-turbo-preview" + }, + "openai_api_base": { + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_api_base", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_api_key": { + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The OpenAI API Key to use for the OpenAI model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": true, + "multiline": false, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "stream": { + "advanced": false, + "display_name": "Stream", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "stream", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": true + }, + "system_message": { + "advanced": true, + "display_name": "System Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "System message to pass to the model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "system_message", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "temperature": { + "advanced": false, + "display_name": "Temperature", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "temperature", + "password": false, + "placeholder": "", + "rangeSpec": { + "max": 1, + "min": -1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "type": "float", + "value": 0.1 + } + } }, - "id": "OpenAIModel-Bt067" + "type": "OpenAIModel" }, - "selected": false, - "width": 384, + "dragging": false, "height": 642, + "id": "OpenAIModel-Bt067", + "position": { + "x": 1137.6078582863759, + "y": -14.41920034020356 + }, "positionAbsolute": { "x": 1137.6078582863759, "y": -14.41920034020356 }, - "dragging": false - } - ], - "edges": [ - { - "source": "ChatInput-MsSJ9", - "sourceHandle": "{œbaseClassesœ:[œstrœ,œRecordœ,œTextœ,œobjectœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-MsSJ9œ}", - "target": "Prompt-tHwPf", - "targetHandle": "{œfieldNameœ:œQuestionœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "Question", - "id": "Prompt-tHwPf", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["str", "Record", "Text", "object"], - "dataType": "ChatInput", - "id": "ChatInput-MsSJ9" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-ChatInput-MsSJ9{œbaseClassesœ:[œstrœ,œRecordœ,œTextœ,œobjectœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-MsSJ9œ}-Prompt-tHwPf{œfieldNameœ:œQuestionœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "File-6TEsD", - "sourceHandle": "{œbaseClassesœ:[œRecordœ],œdataTypeœ:œFileœ,œidœ:œFile-6TEsDœ}", - "target": "Prompt-tHwPf", - "targetHandle": "{œfieldNameœ:œDocumentœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "Document", - "id": "Prompt-tHwPf", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["Record"], - "dataType": "File", - "id": "File-6TEsD" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-File-6TEsD{œbaseClassesœ:[œRecordœ],œdataTypeœ:œFileœ,œidœ:œFile-6TEsDœ}-Prompt-tHwPf{œfieldNameœ:œDocumentœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "Prompt-tHwPf", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-tHwPfœ}", - "target": "OpenAIModel-Bt067", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-Bt067œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "OpenAIModel-Bt067", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "str", "Text"], - "dataType": "Prompt", - "id": "Prompt-tHwPf" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-Prompt-tHwPf{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-tHwPfœ}-OpenAIModel-Bt067{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-Bt067œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "OpenAIModel-Bt067", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-Bt067œ}", - "target": "ChatOutput-F5Awj", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-F5Awjœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-F5Awj", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "str", "Text"], - "dataType": "OpenAIModel", - "id": "OpenAIModel-Bt067" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-OpenAIModel-Bt067{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-Bt067œ}-ChatOutput-F5Awj{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-F5Awjœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" + "selected": false, + "type": "genericNode", + "width": 384 } ], "viewport": { @@ -927,7 +986,8 @@ } }, "description": "This flow integrates PDF reading with a language model to answer document-specific questions. Ideal for small-scale texts, it facilitates direct queries with immediate insights.", - "name": "Document QA", + "id": "fecbce42-6f11-454c-8ab2-db6eddbbbb0f", + "is_component": false, "last_tested_version": "1.0.0a0", - "is_component": false -} + "name": "Document QA" +} \ No newline at end of file diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json index 1002b721a..c23f7b08c 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json @@ -1,883 +1,863 @@ { - "id": "08d5cccf-d098-4367-b14b-1078429c9ed9", - "icon": "🤖", - "icon_bg_color": "#FFD700", "data": { + "edges": [ + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "str", + "Text", + "object" + ], + "dataType": "MemoryComponent", + "id": "MemoryComponent-cdA1J" + }, + "targetHandle": { + "fieldName": "context", + "id": "Prompt-ODkUx", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-MemoryComponent-cdA1J{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}-Prompt-ODkUx{œfieldNameœ:œcontextœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "selected": false, + "source": "MemoryComponent-cdA1J", + "sourceHandle": "{œbaseClassesœ: [œstrœ, œTextœ, œobjectœ], œdataTypeœ: œMemoryComponentœ, œidœ: œMemoryComponent-cdA1Jœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-ODkUx", + "targetHandle": "{œfieldNameœ: œcontextœ, œidœ: œPrompt-ODkUxœ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Text", + "object", + "Record", + "str" + ], + "dataType": "ChatInput", + "id": "ChatInput-t7F8v" + }, + "targetHandle": { + "fieldName": "user_message", + "id": "Prompt-ODkUx", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-ChatInput-t7F8v{œbaseClassesœ:[œTextœ,œobjectœ,œRecordœ,œstrœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-t7F8vœ}-Prompt-ODkUx{œfieldNameœ:œuser_messageœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "selected": false, + "source": "ChatInput-t7F8v", + "sourceHandle": "{œbaseClassesœ: [œTextœ, œobjectœ, œRecordœ, œstrœ], œdataTypeœ: œChatInputœ, œidœ: œChatInput-t7F8vœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-ODkUx", + "targetHandle": "{œfieldNameœ: œuser_messageœ, œidœ: œPrompt-ODkUxœ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Text", + "str", + "object" + ], + "dataType": "Prompt", + "id": "Prompt-ODkUx" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-9RykF", + "inputTypes": [ + "Text", + "Record", + "Prompt" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-ODkUx{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-ODkUxœ}-OpenAIModel-9RykF{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-9RykFœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "Prompt-ODkUx", + "sourceHandle": "{œbaseClassesœ: [œTextœ, œstrœ, œobjectœ], œdataTypeœ: œPromptœ, œidœ: œPrompt-ODkUxœ}", + "style": { + "stroke": "#555" + }, + "target": "OpenAIModel-9RykF", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-9RykFœ, œinputTypesœ: [œTextœ, œRecordœ, œPromptœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "str", + "object", + "Text" + ], + "dataType": "OpenAIModel", + "id": "OpenAIModel-9RykF" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-P1jEe", + "inputTypes": [ + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-9RykF{œbaseClassesœ:[œstrœ,œobjectœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-9RykFœ}-ChatOutput-P1jEe{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-P1jEeœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-9RykF", + "sourceHandle": "{œbaseClassesœ: [œstrœ, œobjectœ, œTextœ], œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-9RykFœ}", + "style": { + "stroke": "#555" + }, + "target": "ChatOutput-P1jEe", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-P1jEeœ, œinputTypesœ: [œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-foreground stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "str", + "Text", + "object" + ], + "dataType": "MemoryComponent", + "id": "MemoryComponent-cdA1J" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "TextOutput-vrs6T", + "inputTypes": [ + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-MemoryComponent-cdA1J{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}-TextOutput-vrs6T{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-vrs6Tœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "MemoryComponent-cdA1J", + "sourceHandle": "{œbaseClassesœ: [œstrœ, œTextœ, œobjectœ], œdataTypeœ: œMemoryComponentœ, œidœ: œMemoryComponent-cdA1Jœ}", + "style": { + "stroke": "#555" + }, + "target": "TextOutput-vrs6T", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œTextOutput-vrs6Tœ, œinputTypesœ: [œRecordœ, œTextœ], œtypeœ: œstrœ}" + } + ], "nodes": [ { - "id": "ChatInput-t7F8v", - "type": "genericNode", - "position": { - "x": 1283.2700598313072, - "y": 982.5953650473145 - }, "data": { - "type": "ChatInput", + "id": "ChatInput-t7F8v", "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": [], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "value": "" - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "User", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "User", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": false, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"], - "value": "MySessionID" - }, - "_type": "CustomComponent" + "base_classes": [ + "Text", + "object", + "Record", + "str" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null }, "description": "Get chat inputs from the Playground.", - "icon": "ChatInput", - "base_classes": ["Text", "object", "Record", "str"], "display_name": "Chat Input", "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null - }, - "output_types": ["Text", "Record"], "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "ChatInput", + "output_types": [ + "Message" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n files: Optional[list[str]] = None,\n session_id: Optional[str] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n files=files,\n session_id=session_id,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "User" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "User" + }, + "session_id": { + "advanced": false, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "MySessionID" + } + } }, - "id": "ChatInput-t7F8v" + "type": "ChatInput" }, - "selected": false, - "width": 384, + "dragging": false, "height": 469, + "id": "ChatInput-t7F8v", + "position": { + "x": 1283.2700598313072, + "y": 982.5953650473145 + }, "positionAbsolute": { "x": 1283.2700598313072, "y": 982.5953650473145 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "ChatOutput-P1jEe", - "type": "genericNode", - "position": { - "x": 3154.916355514023, - "y": 851.051882666333 - }, "data": { - "type": "ChatOutput", + "id": "ChatOutput-P1jEe", "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Machine", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "AI", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": false, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"], - "value": "MySessionID" - }, - "_type": "CustomComponent" + "base_classes": [ + "Text", + "object", + "Record", + "str" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null }, "description": "Display a chat message in the Playground.", - "icon": "ChatOutput", - "base_classes": ["Text", "object", "Record", "str"], "display_name": "Chat Output", "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null - }, - "output_types": ["Text", "Record"], "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "ChatOutput", + "output_types": [ + "Message" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n files: Optional[list[str]] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n files=files,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Machine" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "AI" + }, + "session_id": { + "advanced": false, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "MySessionID" + } + } }, - "id": "ChatOutput-P1jEe" + "type": "ChatOutput" }, - "selected": false, - "width": 384, - "height": 477, "dragging": false, + "height": 477, + "id": "ChatOutput-P1jEe", + "position": { + "x": 3154.916355514023, + "y": 851.051882666333 + }, "positionAbsolute": { "x": 3154.916355514023, "y": 851.051882666333 - } + }, + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "MemoryComponent-cdA1J", - "type": "genericNode", - "position": { - "x": 1289.9606870058817, - "y": 442.16804561053766 - }, "data": { - "type": "MemoryComponent", + "description": "Retrieves stored chat messages given a specific Session ID.", + "display_name": "Chat Memory", + "id": "MemoryComponent-cdA1J", "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langflow.base.memory.memory import BaseMemoryComponent\nfrom langflow.field_typing import Text\nfrom langflow.helpers.record import records_to_text\nfrom langflow.memory import get_messages\nfrom langflow.schema.schema import Record\n\n\nclass MemoryComponent(BaseMemoryComponent):\n display_name = \"Chat Memory\"\n description = \"Retrieves stored chat messages given a specific Session ID.\"\n beta: bool = True\n icon = \"history\"\n\n def build_config(self):\n return {\n \"sender\": {\n \"options\": [\"Machine\", \"User\", \"Machine and User\"],\n \"display_name\": \"Sender Type\",\n },\n \"sender_name\": {\"display_name\": \"Sender Name\", \"advanced\": True},\n \"n_messages\": {\n \"display_name\": \"Number of Messages\",\n \"info\": \"Number of messages to retrieve.\",\n },\n \"session_id\": {\n \"display_name\": \"Session ID\",\n \"info\": \"Session ID of the chat history.\",\n \"input_types\": [\"Text\"],\n },\n \"order\": {\n \"options\": [\"Ascending\", \"Descending\"],\n \"display_name\": \"Order\",\n \"info\": \"Order of the messages.\",\n \"advanced\": True,\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def get_messages(self, **kwargs) -> list[Record]:\n # Validate kwargs by checking if it contains the correct keys\n if \"sender\" not in kwargs:\n kwargs[\"sender\"] = None\n if \"sender_name\" not in kwargs:\n kwargs[\"sender_name\"] = None\n if \"session_id\" not in kwargs:\n kwargs[\"session_id\"] = None\n if \"limit\" not in kwargs:\n kwargs[\"limit\"] = 5\n if \"order\" not in kwargs:\n kwargs[\"order\"] = \"Descending\"\n\n kwargs[\"order\"] = \"DESC\" if kwargs[\"order\"] == \"Descending\" else \"ASC\"\n if kwargs[\"sender\"] == \"Machine and User\":\n kwargs[\"sender\"] = None\n return get_messages(**kwargs)\n\n def build(\n self,\n sender: Optional[str] = \"Machine and User\",\n sender_name: Optional[str] = None,\n session_id: Optional[str] = None,\n n_messages: int = 5,\n order: Optional[str] = \"Descending\",\n record_template: Optional[str] = \"{sender_name}: {text}\",\n ) -> Text:\n messages = self.get_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n messages_str = records_to_text(template=record_template or \"\", records=messages)\n self.status = messages_str\n return messages_str\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "n_messages": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 5, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "n_messages", - "display_name": "Number of Messages", - "advanced": false, - "dynamic": false, - "info": "Number of messages to retrieve.", - "load_from_db": false, - "title_case": false - }, - "order": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Descending", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Ascending", "Descending"], - "name": "order", - "display_name": "Order", - "advanced": true, - "dynamic": false, - "info": "Order of the messages.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "{sender_name}: {text}", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Machine and User", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User", "Machine and User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "Session ID of the chat history.", - "load_from_db": false, - "title_case": false, - "value": "MySessionID" - }, - "_type": "CustomComponent" - }, - "description": "Retrieves stored chat messages given a specific Session ID.", - "icon": "history", - "base_classes": ["str", "Text", "object"], - "display_name": "Chat Memory", - "documentation": "", + "base_classes": [ + "str", + "Text", + "object" + ], + "beta": true, "custom_fields": { - "sender": null, - "sender_name": null, - "session_id": null, "n_messages": null, "order": null, - "record_template": null + "record_template": null, + "sender": null, + "sender_name": null, + "session_id": null }, - "output_types": ["Text"], + "description": "Retrieves stored chat messages given a specific Session ID.", + "display_name": "Chat Memory", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": true + "frozen": false, + "icon": "history", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.memory.memory import BaseMemoryComponent\nfrom langflow.field_typing import Text\nfrom langflow.helpers.record import messages_to_text\nfrom langflow.memory import get_messages\nfrom langflow.schema.message import Message\n\n\nclass MemoryComponent(BaseMemoryComponent):\n display_name = \"Chat Memory\"\n description = \"Retrieves stored chat messages given a specific Session ID.\"\n beta: bool = True\n icon = \"history\"\n\n def build_config(self):\n return {\n \"sender\": {\n \"options\": [\"Machine\", \"User\", \"Machine and User\"],\n \"display_name\": \"Sender Type\",\n },\n \"sender_name\": {\"display_name\": \"Sender Name\", \"advanced\": True},\n \"n_messages\": {\n \"display_name\": \"Number of Messages\",\n \"info\": \"Number of messages to retrieve.\",\n },\n \"session_id\": {\n \"display_name\": \"Session ID\",\n \"info\": \"Session ID of the chat history.\",\n \"input_types\": [\"Text\"],\n },\n \"order\": {\n \"options\": [\"Ascending\", \"Descending\"],\n \"display_name\": \"Order\",\n \"info\": \"Order of the messages.\",\n \"advanced\": True,\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def get_messages(self, **kwargs) -> list[Message]:\n # Validate kwargs by checking if it contains the correct keys\n if \"sender\" not in kwargs:\n kwargs[\"sender\"] = None\n if \"sender_name\" not in kwargs:\n kwargs[\"sender_name\"] = None\n if \"session_id\" not in kwargs:\n kwargs[\"session_id\"] = None\n if \"limit\" not in kwargs:\n kwargs[\"limit\"] = 5\n if \"order\" not in kwargs:\n kwargs[\"order\"] = \"Descending\"\n\n kwargs[\"order\"] = \"DESC\" if kwargs[\"order\"] == \"Descending\" else \"ASC\"\n if kwargs[\"sender\"] == \"Machine and User\":\n kwargs[\"sender\"] = None\n return get_messages(**kwargs)\n\n def build(\n self,\n sender: Optional[str] = \"Machine and User\",\n sender_name: Optional[str] = None,\n session_id: Optional[str] = None,\n n_messages: int = 5,\n order: Optional[str] = \"Descending\",\n record_template: Optional[str] = \"{sender_name}: {text}\",\n ) -> Text:\n messages = self.get_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n messages_str = messages_to_text(template=record_template or \"\", messages=messages)\n self.status = messages_str\n return messages_str\n" + }, + "n_messages": { + "advanced": false, + "display_name": "Number of Messages", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Number of messages to retrieve.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "n_messages", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 5 + }, + "order": { + "advanced": true, + "display_name": "Order", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Order of the messages.", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "order", + "options": [ + "Ascending", + "Descending" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Descending" + }, + "record_template": { + "advanced": true, + "display_name": "Record Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "record_template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "{sender_name}: {text}" + }, + "sender": { + "advanced": false, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User", + "Machine and User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Machine and User" + }, + "sender_name": { + "advanced": true, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "session_id": { + "advanced": false, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Session ID of the chat history.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "MySessionID" + } + } }, - "id": "MemoryComponent-cdA1J", - "description": "Retrieves stored chat messages given a specific Session ID.", - "display_name": "Chat Memory" + "type": "MemoryComponent" }, - "selected": false, - "width": 384, - "height": 489, "dragging": false, + "height": 489, + "id": "MemoryComponent-cdA1J", + "position": { + "x": 1289.9606870058817, + "y": 442.16804561053766 + }, "positionAbsolute": { "x": 1289.9606870058817, "y": 442.16804561053766 - } + }, + "selected": false, + "type": "genericNode", + "width": 384 }, { + "data": { + "description": "A component for creating prompt templates using dynamic variables.", + "display_name": "Prompt", + "id": "Prompt-ODkUx", + "node": { + "base_classes": [ + "Text", + "str", + "object" + ], + "beta": false, + "custom_fields": { + "template": [ + "context", + "user_message" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "documentation": "", + "error": null, + "field_formatters": {}, + "field_order": [], + "frozen": false, + "full_path": null, + "icon": "prompts", + "is_composition": null, + "is_input": null, + "is_output": null, + "name": "", + "output_types": [ + "Prompt" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import CustomComponent\nfrom langflow.field_typing import TemplateField\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Prompt:\n prompt = await Prompt.from_template_and_variables(template, kwargs)\n self.status = prompt.format_text()\n return prompt\n" + }, + "context": { + "advanced": false, + "display_name": "context", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "context", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "template": { + "advanced": false, + "display_name": "Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "prompt", + "value": "{context}\n\nUser: {user_message}\nAI: " + }, + "user_message": { + "advanced": false, + "display_name": "user_message", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "user_message", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + } + } + }, + "type": "Prompt" + }, + "dragging": false, + "height": 477, "id": "Prompt-ODkUx", - "type": "genericNode", "position": { "x": 1894.594426342426, "y": 753.3797365481901 }, - "data": { - "type": "Prompt", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "template": { - "type": "prompt", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "{context}\n\nUser: {user_message}\nAI: ", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "template", - "display_name": "Template", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent", - "context": { - "field_type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "context", - "display_name": "context", - "advanced": false, - "input_types": [ - "Document", - "BaseOutputParser", - "Record", - "Text" - ], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "type": "str" - }, - "user_message": { - "field_type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "user_message", - "display_name": "user_message", - "advanced": false, - "input_types": [ - "Document", - "BaseOutputParser", - "Record", - "Text" - ], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "type": "str" - } - }, - "description": "Create a prompt template with dynamic variables.", - "icon": "prompts", - "is_input": null, - "is_output": null, - "is_composition": null, - "base_classes": ["Text", "str", "object"], - "name": "", - "display_name": "Prompt", - "documentation": "", - "custom_fields": { - "template": ["context", "user_message"] - }, - "output_types": ["Text"], - "full_path": null, - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false, - "error": null - }, - "id": "Prompt-ODkUx", - "description": "A component for creating prompt templates using dynamic variables.", - "display_name": "Prompt" - }, - "selected": false, - "width": 384, - "height": 477, - "dragging": false, "positionAbsolute": { "x": 1894.594426342426, "y": 753.3797365481901 - } + }, + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "OpenAIModel-9RykF", - "type": "genericNode", - "position": { - "x": 2561.5850334731617, - "y": 553.2745131130916 - }, "data": { - "type": "OpenAIModel", + "id": "OpenAIModel-9RykF", "node": { - "template": { - "input_value": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Input", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "max_tokens": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 256, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "max_tokens", - "display_name": "Max Tokens", - "advanced": true, - "dynamic": false, - "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", - "load_from_db": false, - "title_case": false - }, - "model_kwargs": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "model_kwargs", - "display_name": "Model Kwargs", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "model_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "gpt-4-1106-preview", - "fileTypes": [], - "file_path": "", - "password": false, - "options": [ - "gpt-4o", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125" - ], - "name": "model_name", - "display_name": "Model Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_base": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_api_base", - "display_name": "OpenAI API Base", - "advanced": true, - "dynamic": false, - "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_key": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_key", - "display_name": "OpenAI API Key", - "advanced": false, - "dynamic": false, - "info": "The OpenAI API Key to use for the OpenAI model.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"], - "value": "OPENAI_API_KEY" - }, - "stream": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "stream", - "display_name": "Stream", - "advanced": true, - "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "load_from_db": false, - "title_case": false - }, - "system_message": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "system_message", - "display_name": "System Message", - "advanced": true, - "dynamic": false, - "info": "System message to pass to the model.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "temperature": { - "type": "float", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "0.2", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "temperature", - "display_name": "Temperature", - "advanced": false, - "dynamic": false, - "info": "", - "rangeSpec": { - "step_type": "float", - "min": -1, - "max": 1, - "step": 0.1 - }, - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent" - }, - "description": "Generates text using OpenAI LLMs.", - "icon": "OpenAI", - "base_classes": ["str", "object", "Text"], - "display_name": "OpenAI", - "documentation": "", + "base_classes": [ + "str", + "object", + "Text" + ], + "beta": false, "custom_fields": { "input_value": null, - "openai_api_key": null, - "temperature": null, - "model_name": null, "max_tokens": null, "model_kwargs": null, + "model_name": null, "openai_api_base": null, + "openai_api_key": null, "stream": null, - "system_message": null + "system_message": null, + "temperature": null }, - "output_types": ["Text"], + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [ "max_tokens", "model_kwargs", @@ -889,239 +869,355 @@ "system_message", "stream" ], - "beta": false + "frozen": false, + "icon": "OpenAI", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Input", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text", + "Record", + "Prompt" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str" + }, + "max_tokens": { + "advanced": true, + "display_name": "Max Tokens", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "max_tokens", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 256 + }, + "model_kwargs": { + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "model_kwargs", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} + }, + "model_name": { + "advanced": false, + "display_name": "Model Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "model_name", + "options": [ + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "gpt-4-1106-preview" + }, + "openai_api_base": { + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_api_base", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_api_key": { + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The OpenAI API Key to use for the OpenAI model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": true, + "multiline": false, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "stream": { + "advanced": true, + "display_name": "Stream", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "stream", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + }, + "system_message": { + "advanced": true, + "display_name": "System Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "System message to pass to the model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "system_message", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "temperature": { + "advanced": false, + "display_name": "Temperature", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "temperature", + "password": false, + "placeholder": "", + "rangeSpec": { + "max": 1, + "min": -1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "type": "float", + "value": "0.2" + } + } }, - "id": "OpenAIModel-9RykF" + "type": "OpenAIModel" }, - "selected": false, - "width": 384, + "dragging": false, "height": 563, + "id": "OpenAIModel-9RykF", + "position": { + "x": 2561.5850334731617, + "y": 553.2745131130916 + }, "positionAbsolute": { "x": 2561.5850334731617, "y": 553.2745131130916 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "TextOutput-vrs6T", - "type": "genericNode", - "position": { - "x": 1911.4785906252087, - "y": 247.39079954376987 - }, "data": { - "type": "TextOutput", + "id": "TextOutput-vrs6T", "node": { - "template": { - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Value", - "advanced": false, - "input_types": ["Record", "Text"], - "dynamic": false, - "info": "Text or Record to be passed as output.", - "load_from_db": false, - "title_case": false - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Display a text output in the Playground.", - "icon": "type", - "base_classes": ["str", "object", "Text"], - "display_name": "Inspect Memory", - "documentation": "", + "base_classes": [ + "str", + "object", + "Text" + ], + "beta": false, "custom_fields": { "input_value": null, "record_template": null }, - "output_types": ["Text"], + "description": "Display a text output in the Playground.", + "display_name": "Inspect Memory", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "type", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Value", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Text or Record to be passed as output.", + "input_types": [ + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "record_template": { + "advanced": true, + "display_name": "Record Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "record_template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + } + } }, - "id": "TextOutput-vrs6T" + "type": "TextOutput" }, - "selected": false, - "width": 384, + "dragging": false, "height": 289, + "id": "TextOutput-vrs6T", + "position": { + "x": 1911.4785906252087, + "y": 247.39079954376987 + }, "positionAbsolute": { "x": 1911.4785906252087, "y": 247.39079954376987 }, - "dragging": false - } - ], - "edges": [ - { - "source": "MemoryComponent-cdA1J", - "sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}", - "target": "Prompt-ODkUx", - "targetHandle": "{œfieldNameœ:œcontextœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "context", - "type": "str", - "id": "Prompt-ODkUx", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"] - }, - "sourceHandle": { - "baseClasses": ["str", "Text", "object"], - "dataType": "MemoryComponent", - "id": "MemoryComponent-cdA1J" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-MemoryComponent-cdA1J{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}-Prompt-ODkUx{œfieldNameœ:œcontextœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "selected": false - }, - { - "source": "ChatInput-t7F8v", - "sourceHandle": "{œbaseClassesœ:[œTextœ,œobjectœ,œRecordœ,œstrœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-t7F8vœ}", - "target": "Prompt-ODkUx", - "targetHandle": "{œfieldNameœ:œuser_messageœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "user_message", - "type": "str", - "id": "Prompt-ODkUx", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"] - }, - "sourceHandle": { - "baseClasses": ["Text", "object", "Record", "str"], - "dataType": "ChatInput", - "id": "ChatInput-t7F8v" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-ChatInput-t7F8v{œbaseClassesœ:[œTextœ,œobjectœ,œRecordœ,œstrœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-t7F8vœ}-Prompt-ODkUx{œfieldNameœ:œuser_messageœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "selected": false - }, - { - "source": "Prompt-ODkUx", - "sourceHandle": "{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-ODkUxœ}", - "target": "OpenAIModel-9RykF", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-9RykFœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "OpenAIModel-9RykF", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["Text", "str", "object"], - "dataType": "Prompt", - "id": "Prompt-ODkUx" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-Prompt-ODkUx{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-ODkUxœ}-OpenAIModel-9RykF{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-9RykFœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "OpenAIModel-9RykF", - "sourceHandle": "{œbaseClassesœ:[œstrœ,œobjectœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-9RykFœ}", - "target": "ChatOutput-P1jEe", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-P1jEeœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-P1jEe", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["str", "object", "Text"], - "dataType": "OpenAIModel", - "id": "OpenAIModel-9RykF" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-OpenAIModel-9RykF{œbaseClassesœ:[œstrœ,œobjectœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-9RykFœ}-ChatOutput-P1jEe{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-P1jEeœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "MemoryComponent-cdA1J", - "sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}", - "target": "TextOutput-vrs6T", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-vrs6Tœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "TextOutput-vrs6T", - "inputTypes": ["Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["str", "Text", "object"], - "dataType": "MemoryComponent", - "id": "MemoryComponent-cdA1J" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-foreground stroke-connection", - "id": "reactflow__edge-MemoryComponent-cdA1J{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}-TextOutput-vrs6T{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-vrs6Tœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}" + "selected": false, + "type": "genericNode", + "width": 384 } ], "viewport": { @@ -1131,7 +1227,10 @@ } }, "description": "This project can be used as a starting point for building a Chat experience with user specific memory. You can set a different Session ID to start a new message history.", - "name": "Memory Chatbot", + "icon": "🤖", + "icon_bg_color": "#FFD700", + "id": "08d5cccf-d098-4367-b14b-1078429c9ed9", + "is_component": false, "last_tested_version": "1.0.0a0", - "is_component": false -} + "name": "Memory Chatbot" +} \ No newline at end of file diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json index d49eba054..e43723236 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json @@ -1,863 +1,1155 @@ { - "id": "85392e54-20f3-4ab5-a179-cb4bef16f639", "data": { + "edges": [ + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "str", + "Text", + "object" + ], + "dataType": "TextInput", + "id": "TextInput-sptaH" + }, + "targetHandle": { + "fieldName": "document", + "id": "Prompt-amqBu", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-TextInput-sptaH{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-sptaHœ}-Prompt-amqBu{œfieldNameœ:œdocumentœ,œidœ:œPrompt-amqBuœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "TextInput-sptaH", + "sourceHandle": "{œbaseClassesœ: [œstrœ, œTextœ, œobjectœ], œdataTypeœ: œTextInputœ, œidœ: œTextInput-sptaHœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-amqBu", + "targetHandle": "{œfieldNameœ: œdocumentœ, œidœ: œPrompt-amqBuœ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "str", + "Text" + ], + "dataType": "Prompt", + "id": "Prompt-amqBu" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "TextOutput-2MS4a", + "inputTypes": [ + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-amqBu{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}-TextOutput-2MS4a{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-2MS4aœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "Prompt-amqBu", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œstrœ, œTextœ], œdataTypeœ: œPromptœ, œidœ: œPrompt-amqBuœ}", + "style": { + "stroke": "#555" + }, + "target": "TextOutput-2MS4a", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œTextOutput-2MS4aœ, œinputTypesœ: [œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "str", + "Text" + ], + "dataType": "Prompt", + "id": "Prompt-amqBu" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-uYXZJ", + "inputTypes": [ + "Text", + "Record", + "Prompt" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-amqBu{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}-OpenAIModel-uYXZJ{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-uYXZJœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "Prompt-amqBu", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œstrœ, œTextœ], œdataTypeœ: œPromptœ, œidœ: œPrompt-amqBuœ}", + "style": { + "stroke": "#555" + }, + "target": "OpenAIModel-uYXZJ", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-uYXZJœ, œinputTypesœ: [œTextœ, œRecordœ, œPromptœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "str", + "Text", + "object" + ], + "dataType": "OpenAIModel", + "id": "OpenAIModel-uYXZJ" + }, + "targetHandle": { + "fieldName": "summary", + "id": "Prompt-gTNiz", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-Prompt-gTNiz{œfieldNameœ:œsummaryœ,œidœ:œPrompt-gTNizœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-uYXZJ", + "sourceHandle": "{œbaseClassesœ: [œstrœ, œTextœ, œobjectœ], œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-uYXZJœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-gTNiz", + "targetHandle": "{œfieldNameœ: œsummaryœ, œidœ: œPrompt-gTNizœ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "str", + "Text", + "object" + ], + "dataType": "OpenAIModel", + "id": "OpenAIModel-uYXZJ" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-EJkG3", + "inputTypes": [ + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-ChatOutput-EJkG3{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-EJkG3œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-uYXZJ", + "sourceHandle": "{œbaseClassesœ: [œstrœ, œTextœ, œobjectœ], œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-uYXZJœ}", + "style": { + "stroke": "#555" + }, + "target": "ChatOutput-EJkG3", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-EJkG3œ, œinputTypesœ: [œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "str", + "Text" + ], + "dataType": "Prompt", + "id": "Prompt-gTNiz" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "TextOutput-MUDOR", + "inputTypes": [ + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-gTNiz{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}-TextOutput-MUDOR{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-MUDORœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "Prompt-gTNiz", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œstrœ, œTextœ], œdataTypeœ: œPromptœ, œidœ: œPrompt-gTNizœ}", + "style": { + "stroke": "#555" + }, + "target": "TextOutput-MUDOR", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œTextOutput-MUDORœ, œinputTypesœ: [œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "str", + "Text" + ], + "dataType": "Prompt", + "id": "Prompt-gTNiz" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-XawYB", + "inputTypes": [ + "Text", + "Record", + "Prompt" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-gTNiz{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}-OpenAIModel-XawYB{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-XawYBœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "Prompt-gTNiz", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œstrœ, œTextœ], œdataTypeœ: œPromptœ, œidœ: œPrompt-gTNizœ}", + "style": { + "stroke": "#555" + }, + "target": "OpenAIModel-XawYB", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-XawYBœ, œinputTypesœ: [œTextœ, œRecordœ, œPromptœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "str", + "Text", + "object" + ], + "dataType": "OpenAIModel", + "id": "OpenAIModel-XawYB" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-DNmvg", + "inputTypes": [ + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-XawYB{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-XawYBœ}-ChatOutput-DNmvg{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-DNmvgœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-XawYB", + "sourceHandle": "{œbaseClassesœ: [œstrœ, œTextœ, œobjectœ], œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-XawYBœ}", + "style": { + "stroke": "#555" + }, + "target": "ChatOutput-DNmvg", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-DNmvgœ, œinputTypesœ: [œTextœ], œtypeœ: œstrœ}" + } + ], "nodes": [ { - "id": "Prompt-amqBu", - "type": "genericNode", - "position": { - "x": 2191.5837146441663, - "y": 1047.9307944451873 - }, "data": { - "type": "Prompt", + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "id": "Prompt-amqBu", "node": { + "base_classes": [ + "object", + "str", + "Text" + ], + "beta": false, + "custom_fields": { + "template": [ + "document" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "documentation": "", + "error": null, + "field_formatters": {}, + "field_order": [], + "frozen": false, + "full_path": null, + "icon": "prompts", + "is_composition": null, + "is_input": null, + "is_output": null, + "name": "", + "output_types": [ + "Prompt" + ], "template": { + "_type": "CustomComponent", "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", "advanced": true, "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "template": { - "type": "prompt", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "You are a helpful assistant. Given a long document, your task is to create a concise summary that captures the main points and key details. The summary should be clear, accurate, and succinct. Please provide the summary in the format below:\n####\n{document}\n####\n", "fileTypes": [], "file_path": "", - "password": false, - "name": "template", - "display_name": "Template", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, "info": "", - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent", - "document": { - "field_type": "str", - "required": false, - "placeholder": "", "list": false, - "show": true, + "load_from_db": false, "multiline": true, - "value": "", + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import CustomComponent\nfrom langflow.field_typing import TemplateField\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Prompt:\n prompt = await Prompt.from_template_and_variables(template, kwargs)\n self.status = prompt.format_text()\n return prompt\n" + }, + "document": { + "advanced": false, + "display_name": "document", + "dynamic": false, + "field_type": "str", "fileTypes": [], "file_path": "", - "password": false, - "name": "document", - "display_name": "document", - "advanced": false, + "info": "", "input_types": [ "Document", "BaseOutputParser", "Record", "Text" ], - "dynamic": false, - "info": "", + "list": false, "load_from_db": false, + "multiline": true, + "name": "document", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "type": "str" + "type": "str", + "value": "" + }, + "template": { + "advanced": false, + "display_name": "Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "prompt", + "value": "You are a helpful assistant. Given a long document, your task is to create a concise summary that captures the main points and key details. The summary should be clear, accurate, and succinct. Please provide the summary in the format below:\n####\n{document}\n####\n" } - }, - "description": "Create a prompt template with dynamic variables.", - "icon": "prompts", - "is_input": null, - "is_output": null, - "is_composition": null, - "base_classes": ["object", "str", "Text"], - "name": "", - "display_name": "Prompt", - "documentation": "", - "custom_fields": { - "template": ["document"] - }, - "output_types": ["Text"], - "full_path": null, - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false, - "error": null + } }, - "id": "Prompt-amqBu", - "description": "Create a prompt template with dynamic variables.", - "display_name": "Prompt" + "type": "Prompt" }, - "selected": false, - "width": 384, + "dragging": false, "height": 385, + "id": "Prompt-amqBu", + "position": { + "x": 2191.5837146441663, + "y": 1047.9307944451873 + }, "positionAbsolute": { "x": 2191.5837146441663, "y": 1047.9307944451873 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { + "data": { + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "id": "Prompt-gTNiz", + "node": { + "base_classes": [ + "object", + "str", + "Text" + ], + "beta": false, + "custom_fields": { + "template": [ + "summary" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "documentation": "", + "error": null, + "field_formatters": {}, + "field_order": [], + "frozen": false, + "full_path": null, + "icon": "prompts", + "is_composition": null, + "is_input": null, + "is_output": null, + "name": "", + "output_types": [ + "Prompt" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import CustomComponent\nfrom langflow.field_typing import TemplateField\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Prompt:\n prompt = await Prompt.from_template_and_variables(template, kwargs)\n self.status = prompt.format_text()\n return prompt\n" + }, + "summary": { + "advanced": false, + "display_name": "summary", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "summary", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "template": { + "advanced": false, + "display_name": "Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "prompt", + "value": "Given a summary of an article, please create two multiple-choice questions that cover the key points and details mentioned. Ensure the questions are clear and provide three options (A, B, C), with one correct answer.\n####\n{summary}\n####" + } + } + }, + "type": "Prompt" + }, + "dragging": false, + "height": 385, "id": "Prompt-gTNiz", - "type": "genericNode", "position": { "x": 3731.0813766902447, "y": 799.631909121391 }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { "data": { - "type": "Prompt", + "id": "ChatOutput-EJkG3", "node": { + "base_classes": [ + "object", + "Record", + "Text", + "str" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "record_template": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null + }, + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "ChatOutput", + "output_types": [ + "Message" + ], "template": { + "_type": "CustomComponent", "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", "advanced": true, "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "template": { - "type": "prompt", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "Given a summary of an article, please create two multiple-choice questions that cover the key points and details mentioned. Ensure the questions are clear and provide three options (A, B, C), with one correct answer.\n####\n{summary}\n####", "fileTypes": [], "file_path": "", - "password": false, - "name": "template", - "display_name": "Template", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, "info": "", - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent", - "summary": { - "field_type": "str", - "required": false, - "placeholder": "", "list": false, - "show": true, + "load_from_db": false, "multiline": true, - "value": "", + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n files: Optional[list[str]] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n files=files,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "summary", - "display_name": "summary", - "advanced": false, + "info": "", "input_types": [ - "Document", - "BaseOutputParser", - "Record", "Text" ], - "dynamic": false, - "info": "", + "list": false, "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Machine" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Summarizer" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, "type": "str" } - }, - "description": "Create a prompt template with dynamic variables.", - "icon": "prompts", - "is_input": null, - "is_output": null, - "is_composition": null, - "base_classes": ["object", "str", "Text"], - "name": "", - "display_name": "Prompt", - "documentation": "", - "custom_fields": { - "template": ["summary"] - }, - "output_types": ["Text"], - "full_path": null, - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false, - "error": null + } }, - "id": "Prompt-gTNiz", - "description": "Create a prompt template with dynamic variables.", - "display_name": "Prompt" + "type": "ChatOutput" }, - "selected": false, - "width": 384, + "dragging": false, "height": 385, - "dragging": false - }, - { "id": "ChatOutput-EJkG3", - "type": "genericNode", "position": { "x": 3722.1747844849388, "y": 1283.413553222214 }, - "data": { - "type": "ChatOutput", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "{text}", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "In case of Message being a Record, this template will be used to convert it to text.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Machine", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "Summarizer", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": true, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Display a chat message in the Playground.", - "icon": "ChatOutput", - "base_classes": ["object", "Record", "Text", "str"], - "display_name": "Chat Output", - "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null, - "record_template": null - }, - "output_types": ["Text", "Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false - }, - "id": "ChatOutput-EJkG3" - }, "selected": false, - "width": 384, - "height": 385, - "dragging": false + "type": "genericNode", + "width": 384 }, { + "data": { + "id": "ChatOutput-DNmvg", + "node": { + "base_classes": [ + "object", + "Record", + "Text", + "str" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "record_template": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null + }, + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "ChatOutput", + "output_types": [ + "Message" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n files: Optional[list[str]] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n files=files,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Machine" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Question Generator" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + } + } + }, + "type": "ChatOutput" + }, + "height": 385, "id": "ChatOutput-DNmvg", - "type": "genericNode", "position": { "x": 5077.71285886074, "y": 1232.9152769735522 }, - "data": { - "type": "ChatOutput", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "{text}", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "In case of Message being a Record, this template will be used to convert it to text.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Machine", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "Question Generator", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": true, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Display a chat message in the Playground.", - "icon": "ChatOutput", - "base_classes": ["object", "Record", "Text", "str"], - "display_name": "Chat Output", - "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null, - "record_template": null - }, - "output_types": ["Text", "Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false - }, - "id": "ChatOutput-DNmvg" - }, "selected": false, - "width": 384, - "height": 385 + "type": "genericNode", + "width": 384 }, { - "id": "TextInput-sptaH", - "type": "genericNode", - "position": { - "x": 1700.5624822024752, - "y": 1039.603088937466 - }, "data": { - "type": "TextInput", + "id": "TextInput-sptaH", "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextInput(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as input.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Optional[Text] = \"\",\n record_template: Optional[str] = \"\",\n ) -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "Revolutionary Nano-Battery Technology Unveiled In a groundbreaking announcement yesterday, researchers from the fictional Tech Innovations Institute revealed the development of a new nano-battery technology that promises to revolutionize energy storage. The new battery, dubbed the \"EnerGCell\", uses advanced nanomaterials to achieve unprecedented efficiency and storage capacities. According to lead researcher Dr. Ada Byron, the EnerGCell can store up to ten times more energy than the best lithium-ion batteries available today, while charging in just a fraction of the time. \"We're talking about charging your electric vehicle in just five minutes for a range of over 1,000 miles,\" Dr. Byron stated during the press conference. The technology behind the EnerGCell involves a complex arrangement of nanostructured electrodes that allow for rapid ion transfer and extremely high energy density. This breakthrough was achieved after a decade of research into nanomaterials and their applications in energy storage. The implications of this technology are vast, promising to accelerate the adoption of renewable energy by making it more practical and affordable to store wind and solar power. It could also lead to significant advancements in electric vehicles, mobile devices, and any other technology that relies on batteries. Despite the excitement, some experts are calling for patience, noting that the EnerGCell is still in its early stages of development and may take several years before it's commercially available. However, the potential impact of such a technology on the environment and the global economy is undeniable. Tech Innovations Institute plans to continue refining the EnerGCell and begin pilot projects with select partners in the coming year. If successful, this nano-battery technology could indeed be the breakthrough needed to usher in a new era of clean energy and technology.", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Value", - "advanced": false, - "input_types": ["Record", "Text"], - "dynamic": false, - "info": "Text or Record to be passed as input.", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" + "base_classes": [ + "str", + "Text", + "object" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "record_template": null }, "description": "Get text inputs from the Playground.", - "icon": "type", - "base_classes": ["str", "Text", "object"], "display_name": "Text Input", "documentation": "", - "custom_fields": { - "input_value": null, - "record_template": null - }, - "output_types": ["Text"], "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "type", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextInput(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as input.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Optional[Text] = \"\",\n record_template: Optional[str] = \"\",\n ) -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Value", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Text or Record to be passed as input.", + "input_types": [ + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Revolutionary Nano-Battery Technology Unveiled In a groundbreaking announcement yesterday, researchers from the fictional Tech Innovations Institute revealed the development of a new nano-battery technology that promises to revolutionize energy storage. The new battery, dubbed the \"EnerGCell\", uses advanced nanomaterials to achieve unprecedented efficiency and storage capacities. According to lead researcher Dr. Ada Byron, the EnerGCell can store up to ten times more energy than the best lithium-ion batteries available today, while charging in just a fraction of the time. \"We're talking about charging your electric vehicle in just five minutes for a range of over 1,000 miles,\" Dr. Byron stated during the press conference. The technology behind the EnerGCell involves a complex arrangement of nanostructured electrodes that allow for rapid ion transfer and extremely high energy density. This breakthrough was achieved after a decade of research into nanomaterials and their applications in energy storage. The implications of this technology are vast, promising to accelerate the adoption of renewable energy by making it more practical and affordable to store wind and solar power. It could also lead to significant advancements in electric vehicles, mobile devices, and any other technology that relies on batteries. Despite the excitement, some experts are calling for patience, noting that the EnerGCell is still in its early stages of development and may take several years before it's commercially available. However, the potential impact of such a technology on the environment and the global economy is undeniable. Tech Innovations Institute plans to continue refining the EnerGCell and begin pilot projects with select partners in the coming year. If successful, this nano-battery technology could indeed be the breakthrough needed to usher in a new era of clean energy and technology." + }, + "record_template": { + "advanced": true, + "display_name": "Record Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "record_template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + } + } }, - "id": "TextInput-sptaH" + "type": "TextInput" }, - "selected": false, - "width": 384, + "dragging": false, "height": 290, + "id": "TextInput-sptaH", + "position": { + "x": 1700.5624822024752, + "y": 1039.603088937466 + }, "positionAbsolute": { "x": 1700.5624822024752, "y": 1039.603088937466 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "TextOutput-2MS4a", - "type": "genericNode", - "position": { - "x": 2917.216113690115, - "y": 513.0058511435552 - }, "data": { - "type": "TextOutput", + "id": "TextOutput-2MS4a", "node": { - "template": { - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Value", - "advanced": false, - "input_types": ["Record", "Text"], - "dynamic": false, - "info": "Text or Record to be passed as output.", - "load_from_db": false, - "title_case": false - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Display a text output in the Playground.", - "icon": "type", - "base_classes": ["str", "Text", "object"], - "display_name": "First Prompt", - "documentation": "", + "base_classes": [ + "str", + "Text", + "object" + ], + "beta": false, "custom_fields": { "input_value": null, "record_template": null }, - "output_types": ["Text"], + "description": "Display a text output in the Playground.", + "display_name": "First Prompt", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "type", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Value", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Text or Record to be passed as output.", + "input_types": [ + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "record_template": { + "advanced": true, + "display_name": "Record Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "record_template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + } + } }, - "id": "TextOutput-2MS4a" + "type": "TextOutput" }, - "selected": false, - "width": 384, + "dragging": false, "height": 290, + "id": "TextOutput-2MS4a", + "position": { + "x": 2917.216113690115, + "y": 513.0058511435552 + }, "positionAbsolute": { "x": 2917.216113690115, "y": 513.0058511435552 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "OpenAIModel-uYXZJ", - "type": "genericNode", - "position": { - "x": 2925.784767523062, - "y": 933.6465680967775 - }, "data": { - "type": "OpenAIModel", + "id": "OpenAIModel-uYXZJ", "node": { + "base_classes": [ + "str", + "Text", + "object" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "max_tokens": null, + "model_kwargs": null, + "model_name": null, + "openai_api_base": null, + "openai_api_key": null, + "stream": null, + "system_message": null, + "temperature": null + }, + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "documentation": "", + "field_formatters": {}, + "field_order": [ + "max_tokens", + "model_kwargs", + "model_name", + "openai_api_base", + "openai_api_key", + "temperature", + "input_value", + "system_message", + "stream" + ], + "frozen": false, + "icon": "OpenAI", + "output_types": [ + "Text" + ], "template": { - "input_value": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Input", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, + "_type": "CustomComponent", "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", "advanced": true, "dynamic": true, + "fileTypes": [], + "file_path": "", "info": "", + "list": false, "load_from_db": false, - "title_case": false + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Input", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text", + "Record", + "Prompt" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str" }, "max_tokens": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 256, + "advanced": true, + "display_name": "Max Tokens", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "max_tokens", - "display_name": "Max Tokens", - "advanced": true, - "dynamic": false, "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "max_tokens", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 256 }, "model_kwargs": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "model_kwargs", - "display_name": "Model Kwargs", - "advanced": true, - "dynamic": false, "info": "", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "model_kwargs", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} }, "model_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "gpt-4-turbo-preview", + "advanced": false, + "display_name": "Model Name", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "model_name", "options": [ "gpt-4o", "gpt-4-turbo", @@ -865,138 +1157,274 @@ "gpt-3.5-turbo", "gpt-3.5-turbo-0125" ], - "name": "model_name", - "display_name": "Model Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str", + "value": "gpt-4-turbo-preview" }, "openai_api_base": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "openai_api_base", - "display_name": "OpenAI API Base", - "advanced": true, - "dynamic": false, "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "openai_api_base", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str" }, "openai_api_key": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": true, - "name": "openai_api_key", - "display_name": "OpenAI API Key", - "advanced": false, - "dynamic": false, "info": "The OpenAI API Key to use for the OpenAI model.", - "load_from_db": false, + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": true, + "multiline": false, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, "title_case": false, - "input_types": ["Text"], + "type": "str", "value": "OPENAI_API_KEY" }, "stream": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, + "advanced": true, + "display_name": "Stream", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "stream", - "display_name": "Stream", - "advanced": true, - "dynamic": false, "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "stream", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false }, "system_message": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "System Message", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "system_message", - "display_name": "System Message", - "advanced": true, - "dynamic": false, "info": "System message to pass to the model.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "system_message", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str" }, "temperature": { - "type": "float", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 0.1, + "advanced": false, + "display_name": "Temperature", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "temperature", - "display_name": "Temperature", - "advanced": false, - "dynamic": false, "info": "", - "rangeSpec": { - "step_type": "float", - "min": -1, - "max": 1, - "step": 0.1 - }, + "list": false, "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent" - }, - "description": "Generates text using OpenAI LLMs.", - "icon": "OpenAI", - "base_classes": ["str", "Text", "object"], - "display_name": "OpenAI", - "documentation": "", + "multiline": false, + "name": "temperature", + "password": false, + "placeholder": "", + "rangeSpec": { + "max": 1, + "min": -1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "type": "float", + "value": 0.1 + } + } + }, + "type": "OpenAIModel" + }, + "dragging": false, + "height": 565, + "id": "OpenAIModel-uYXZJ", + "position": { + "x": 2925.784767523062, + "y": 933.6465680967775 + }, + "positionAbsolute": { + "x": 2925.784767523062, + "y": 933.6465680967775 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "TextOutput-MUDOR", + "node": { + "base_classes": [ + "str", + "Text", + "object" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "record_template": null + }, + "description": "Display a text output in the Playground.", + "display_name": "Second Prompt", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "type", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Value", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Text or Record to be passed as output.", + "input_types": [ + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "record_template": { + "advanced": true, + "display_name": "Record Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "record_template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + } + } + }, + "type": "TextOutput" + }, + "dragging": false, + "height": 290, + "id": "TextOutput-MUDOR", + "position": { + "x": 4446.064323520379, + "y": 633.833297518702 + }, + "positionAbsolute": { + "x": 4446.064323520379, + "y": 633.833297518702 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "OpenAIModel-XawYB", + "node": { + "base_classes": [ + "str", + "Text", + "object" + ], + "beta": false, "custom_fields": { "input_value": null, - "openai_api_key": null, - "temperature": null, - "model_name": null, "max_tokens": null, "model_kwargs": null, + "model_name": null, "openai_api_base": null, + "openai_api_key": null, "stream": null, - "system_message": null + "system_message": null, + "temperature": null }, - "output_types": ["Text"], + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [ "max_tokens", "model_kwargs", @@ -1008,213 +1436,106 @@ "system_message", "stream" ], - "beta": false - }, - "id": "OpenAIModel-uYXZJ" - }, - "selected": false, - "width": 384, - "height": 565, - "positionAbsolute": { - "x": 2925.784767523062, - "y": 933.6465680967775 - }, - "dragging": false - }, - { - "id": "TextOutput-MUDOR", - "type": "genericNode", - "position": { - "x": 4446.064323520379, - "y": 633.833297518702 - }, - "data": { - "type": "TextOutput", - "node": { - "template": { - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Value", - "advanced": false, - "input_types": ["Record", "Text"], - "dynamic": false, - "info": "Text or Record to be passed as output.", - "load_from_db": false, - "title_case": false - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Display a text output in the Playground.", - "icon": "type", - "base_classes": ["str", "Text", "object"], - "display_name": "Second Prompt", - "documentation": "", - "custom_fields": { - "input_value": null, - "record_template": null - }, - "output_types": ["Text"], - "field_formatters": {}, "frozen": false, - "field_order": [], - "beta": false - }, - "id": "TextOutput-MUDOR" - }, - "selected": false, - "width": 384, - "height": 290, - "dragging": false, - "positionAbsolute": { - "x": 4446.064323520379, - "y": 633.833297518702 - } - }, - { - "id": "OpenAIModel-XawYB", - "type": "genericNode", - "position": { - "x": 4500.152018344182, - "y": 1027.7382026227656 - }, - "data": { - "type": "OpenAIModel", - "node": { + "icon": "OpenAI", + "output_types": [ + "Text" + ], "template": { - "input_value": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Input", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, + "_type": "CustomComponent", "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", "advanced": true, "dynamic": true, + "fileTypes": [], + "file_path": "", "info": "", + "list": false, "load_from_db": false, - "title_case": false + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Input", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text", + "Record", + "Prompt" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str" }, "max_tokens": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 256, + "advanced": true, + "display_name": "Max Tokens", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "max_tokens", - "display_name": "Max Tokens", - "advanced": true, - "dynamic": false, "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "max_tokens", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 256 }, "model_kwargs": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "model_kwargs", - "display_name": "Model Kwargs", - "advanced": true, - "dynamic": false, "info": "", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "model_kwargs", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} }, "model_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "gpt-4-turbo-preview", + "advanced": false, + "display_name": "Model Name", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "model_name", "options": [ "gpt-4o", "gpt-4-turbo", @@ -1222,355 +1543,140 @@ "gpt-3.5-turbo", "gpt-3.5-turbo-0125" ], - "name": "model_name", - "display_name": "Model Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str", + "value": "gpt-4-turbo-preview" }, "openai_api_base": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "openai_api_base", - "display_name": "OpenAI API Base", - "advanced": true, - "dynamic": false, "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "openai_api_base", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str" }, "openai_api_key": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": true, - "name": "openai_api_key", - "display_name": "OpenAI API Key", - "advanced": false, - "dynamic": false, "info": "The OpenAI API Key to use for the OpenAI model.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, "title_case": false, - "input_types": ["Text"], + "type": "str", "value": "" }, "stream": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, + "advanced": true, + "display_name": "Stream", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "stream", - "display_name": "Stream", - "advanced": true, - "dynamic": false, "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "stream", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false }, "system_message": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "System Message", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "system_message", - "display_name": "System Message", - "advanced": true, - "dynamic": false, "info": "System message to pass to the model.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "system_message", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str" }, "temperature": { - "type": "float", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 0.1, + "advanced": false, + "display_name": "Temperature", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "temperature", - "display_name": "Temperature", - "advanced": false, - "dynamic": false, "info": "", - "rangeSpec": { - "step_type": "float", - "min": -1, - "max": 1, - "step": 0.1 - }, + "list": false, "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent" - }, - "description": "Generates text using OpenAI LLMs.", - "icon": "OpenAI", - "base_classes": ["str", "Text", "object"], - "display_name": "OpenAI", - "documentation": "", - "custom_fields": { - "input_value": null, - "openai_api_key": null, - "temperature": null, - "model_name": null, - "max_tokens": null, - "model_kwargs": null, - "openai_api_base": null, - "stream": null, - "system_message": null - }, - "output_types": ["Text"], - "field_formatters": {}, - "frozen": false, - "field_order": [ - "max_tokens", - "model_kwargs", - "model_name", - "openai_api_base", - "openai_api_key", - "temperature", - "input_value", - "system_message", - "stream" - ], - "beta": false + "multiline": false, + "name": "temperature", + "password": false, + "placeholder": "", + "rangeSpec": { + "max": 1, + "min": -1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "type": "float", + "value": 0.1 + } + } }, - "id": "OpenAIModel-XawYB" + "type": "OpenAIModel" }, - "selected": false, - "width": 384, + "dragging": false, "height": 565, + "id": "OpenAIModel-XawYB", + "position": { + "x": 4500.152018344182, + "y": 1027.7382026227656 + }, "positionAbsolute": { "x": 4500.152018344182, "y": 1027.7382026227656 }, - "dragging": false - } - ], - "edges": [ - { - "source": "TextInput-sptaH", - "sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-sptaHœ}", - "target": "Prompt-amqBu", - "targetHandle": "{œfieldNameœ:œdocumentœ,œidœ:œPrompt-amqBuœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "document", - "id": "Prompt-amqBu", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["str", "Text", "object"], - "dataType": "TextInput", - "id": "TextInput-sptaH" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-TextInput-sptaH{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-sptaHœ}-Prompt-amqBu{œfieldNameœ:œdocumentœ,œidœ:œPrompt-amqBuœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "Prompt-amqBu", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}", - "target": "TextOutput-2MS4a", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-2MS4aœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "TextOutput-2MS4a", - "inputTypes": ["Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "str", "Text"], - "dataType": "Prompt", - "id": "Prompt-amqBu" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-Prompt-amqBu{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}-TextOutput-2MS4a{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-2MS4aœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "Prompt-amqBu", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}", - "target": "OpenAIModel-uYXZJ", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-uYXZJœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "OpenAIModel-uYXZJ", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "str", "Text"], - "dataType": "Prompt", - "id": "Prompt-amqBu" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-Prompt-amqBu{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}-OpenAIModel-uYXZJ{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-uYXZJœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "OpenAIModel-uYXZJ", - "sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}", - "target": "Prompt-gTNiz", - "targetHandle": "{œfieldNameœ:œsummaryœ,œidœ:œPrompt-gTNizœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "summary", - "id": "Prompt-gTNiz", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["str", "Text", "object"], - "dataType": "OpenAIModel", - "id": "OpenAIModel-uYXZJ" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-Prompt-gTNiz{œfieldNameœ:œsummaryœ,œidœ:œPrompt-gTNizœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "OpenAIModel-uYXZJ", - "sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}", - "target": "ChatOutput-EJkG3", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-EJkG3œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-EJkG3", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["str", "Text", "object"], - "dataType": "OpenAIModel", - "id": "OpenAIModel-uYXZJ" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-ChatOutput-EJkG3{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-EJkG3œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "Prompt-gTNiz", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}", - "target": "TextOutput-MUDOR", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-MUDORœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "TextOutput-MUDOR", - "inputTypes": ["Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "str", "Text"], - "dataType": "Prompt", - "id": "Prompt-gTNiz" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-Prompt-gTNiz{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}-TextOutput-MUDOR{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-MUDORœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "Prompt-gTNiz", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}", - "target": "OpenAIModel-XawYB", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-XawYBœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "OpenAIModel-XawYB", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "str", "Text"], - "dataType": "Prompt", - "id": "Prompt-gTNiz" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-Prompt-gTNiz{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}-OpenAIModel-XawYB{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-XawYBœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "OpenAIModel-XawYB", - "sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-XawYBœ}", - "target": "ChatOutput-DNmvg", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-DNmvgœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-DNmvg", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["str", "Text", "object"], - "dataType": "OpenAIModel", - "id": "OpenAIModel-XawYB" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-OpenAIModel-XawYB{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-XawYBœ}-ChatOutput-DNmvg{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-DNmvgœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" + "selected": false, + "type": "genericNode", + "width": 384 } ], "viewport": { @@ -1580,7 +1686,8 @@ } }, "description": "The Prompt Chaining flow chains prompts with LLMs, refining outputs through iterative stages.", - "name": "Prompt Chaining", + "id": "85392e54-20f3-4ab5-a179-cb4bef16f639", + "is_component": false, "last_tested_version": "1.0.0a0", - "is_component": false -} + "name": "Prompt Chaining" +} \ No newline at end of file diff --git a/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json b/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json index bbf59c8b1..57e164986 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json @@ -1,738 +1,592 @@ { - "id": "51e2b78a-199b-4054-9f32-e288eef6924c", "data": { + "edges": [ + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "Text", + "str" + ], + "dataType": "TextOutput", + "id": "TextOutput-BDknO" + }, + "targetHandle": { + "fieldName": "context", + "id": "Prompt-xeI6K", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-TextOutput-BDknO{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œTextOutputœ,œidœ:œTextOutput-BDknOœ}-Prompt-xeI6K{œfieldNameœ:œcontextœ,œidœ:œPrompt-xeI6Kœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextOutput-BDknO", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œTextœ, œstrœ], œdataTypeœ: œTextOutputœ, œidœ: œTextOutput-BDknOœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-xeI6K", + "targetHandle": "{œfieldNameœ: œcontextœ, œidœ: œPrompt-xeI6Kœ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Text", + "str", + "object", + "Record" + ], + "dataType": "ChatInput", + "id": "ChatInput-yxMKE" + }, + "targetHandle": { + "fieldName": "question", + "id": "Prompt-xeI6K", + "inputTypes": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-ChatInput-yxMKE{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ,œRecordœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-yxMKEœ}-Prompt-xeI6K{œfieldNameœ:œquestionœ,œidœ:œPrompt-xeI6Kœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", + "selected": false, + "source": "ChatInput-yxMKE", + "sourceHandle": "{œbaseClassesœ: [œTextœ, œstrœ, œobjectœ, œRecordœ], œdataTypeœ: œChatInputœ, œidœ: œChatInput-yxMKEœ}", + "style": { + "stroke": "#555" + }, + "target": "Prompt-xeI6K", + "targetHandle": "{œfieldNameœ: œquestionœ, œidœ: œPrompt-xeI6Kœ, œinputTypesœ: [œDocumentœ, œBaseOutputParserœ, œRecordœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "Text", + "str" + ], + "dataType": "Prompt", + "id": "Prompt-xeI6K" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-EjXlN", + "inputTypes": [ + "Text", + "Record", + "Prompt" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-xeI6K{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-xeI6Kœ}-OpenAIModel-EjXlN{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-EjXlNœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "selected": false, + "source": "Prompt-xeI6K", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œTextœ, œstrœ], œdataTypeœ: œPromptœ, œidœ: œPrompt-xeI6Kœ}", + "style": { + "stroke": "#555" + }, + "target": "OpenAIModel-EjXlN", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-EjXlNœ, œinputTypesœ: [œTextœ, œRecordœ, œPromptœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "object", + "Text", + "str" + ], + "dataType": "OpenAIModel", + "id": "OpenAIModel-EjXlN" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-Q39I8", + "inputTypes": [ + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-EjXlN{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-EjXlNœ}-ChatOutput-Q39I8{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-Q39I8œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "selected": false, + "source": "OpenAIModel-EjXlN", + "sourceHandle": "{œbaseClassesœ: [œobjectœ, œTextœ, œstrœ], œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-EjXlNœ}", + "style": { + "stroke": "#555" + }, + "target": "ChatOutput-Q39I8", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-Q39I8œ, œinputTypesœ: [œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Record" + ], + "dataType": "File", + "id": "File-t0a6a" + }, + "targetHandle": { + "fieldName": "inputs", + "id": "RecursiveCharacterTextSplitter-tR9QM", + "inputTypes": [ + "Document", + "Record" + ], + "type": "Document" + } + }, + "id": "reactflow__edge-File-t0a6a{œbaseClassesœ:[œRecordœ],œdataTypeœ:œFileœ,œidœ:œFile-t0a6aœ}-RecursiveCharacterTextSplitter-tR9QM{œfieldNameœ:œinputsœ,œidœ:œRecursiveCharacterTextSplitter-tR9QMœ,œinputTypesœ:[œDocumentœ,œRecordœ],œtypeœ:œDocumentœ}", + "selected": false, + "source": "File-t0a6a", + "sourceHandle": "{œbaseClassesœ: [œRecordœ], œdataTypeœ: œFileœ, œidœ: œFile-t0a6aœ}", + "style": { + "stroke": "#555" + }, + "target": "RecursiveCharacterTextSplitter-tR9QM", + "targetHandle": "{œfieldNameœ: œinputsœ, œidœ: œRecursiveCharacterTextSplitter-tR9QMœ, œinputTypesœ: [œDocumentœ, œRecordœ], œtypeœ: œDocumentœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Embeddings" + ], + "dataType": "OpenAIEmbeddings", + "id": "OpenAIEmbeddings-ZlOk1" + }, + "targetHandle": { + "fieldName": "embedding", + "id": "AstraDBSearch-41nRz", + "inputTypes": null, + "type": "Embeddings" + } + }, + "id": "reactflow__edge-OpenAIEmbeddings-ZlOk1{œbaseClassesœ:[œEmbeddingsœ],œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-ZlOk1œ}-AstraDBSearch-41nRz{œfieldNameœ:œembeddingœ,œidœ:œAstraDBSearch-41nRzœ,œinputTypesœ:null,œtypeœ:œEmbeddingsœ}", + "source": "OpenAIEmbeddings-ZlOk1", + "sourceHandle": "{œbaseClassesœ: [œEmbeddingsœ], œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-ZlOk1œ}", + "style": { + "stroke": "#555" + }, + "target": "AstraDBSearch-41nRz", + "targetHandle": "{œfieldNameœ: œembeddingœ, œidœ: œAstraDBSearch-41nRzœ, œinputTypesœ: null, œtypeœ: œEmbeddingsœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Text", + "str", + "object", + "Record" + ], + "dataType": "ChatInput", + "id": "ChatInput-yxMKE" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "AstraDBSearch-41nRz", + "inputTypes": [ + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-ChatInput-yxMKE{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ,œRecordœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-yxMKEœ}-AstraDBSearch-41nRz{œfieldNameœ:œinput_valueœ,œidœ:œAstraDBSearch-41nRzœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", + "source": "ChatInput-yxMKE", + "sourceHandle": "{œbaseClassesœ: [œTextœ, œstrœ, œobjectœ, œRecordœ], œdataTypeœ: œChatInputœ, œidœ: œChatInput-yxMKEœ}", + "style": { + "stroke": "#555" + }, + "target": "AstraDBSearch-41nRz", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œAstraDBSearch-41nRzœ, œinputTypesœ: [œTextœ], œtypeœ: œstrœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Record" + ], + "dataType": "RecursiveCharacterTextSplitter", + "id": "RecursiveCharacterTextSplitter-tR9QM" + }, + "targetHandle": { + "fieldName": "inputs", + "id": "AstraDB-eUCSS", + "inputTypes": null, + "type": "Record" + } + }, + "id": "reactflow__edge-RecursiveCharacterTextSplitter-tR9QM{œbaseClassesœ:[œRecordœ],œdataTypeœ:œRecursiveCharacterTextSplitterœ,œidœ:œRecursiveCharacterTextSplitter-tR9QMœ}-AstraDB-eUCSS{œfieldNameœ:œinputsœ,œidœ:œAstraDB-eUCSSœ,œinputTypesœ:null,œtypeœ:œRecordœ}", + "selected": false, + "source": "RecursiveCharacterTextSplitter-tR9QM", + "sourceHandle": "{œbaseClassesœ: [œRecordœ], œdataTypeœ: œRecursiveCharacterTextSplitterœ, œidœ: œRecursiveCharacterTextSplitter-tR9QMœ}", + "style": { + "stroke": "#555" + }, + "target": "AstraDB-eUCSS", + "targetHandle": "{œfieldNameœ: œinputsœ, œidœ: œAstraDB-eUCSSœ, œinputTypesœ: null, œtypeœ: œRecordœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Embeddings" + ], + "dataType": "OpenAIEmbeddings", + "id": "OpenAIEmbeddings-9TPjc" + }, + "targetHandle": { + "fieldName": "embedding", + "id": "AstraDB-eUCSS", + "inputTypes": null, + "type": "Embeddings" + } + }, + "id": "reactflow__edge-OpenAIEmbeddings-9TPjc{œbaseClassesœ:[œEmbeddingsœ],œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-9TPjcœ}-AstraDB-eUCSS{œfieldNameœ:œembeddingœ,œidœ:œAstraDB-eUCSSœ,œinputTypesœ:null,œtypeœ:œEmbeddingsœ}", + "selected": false, + "source": "OpenAIEmbeddings-9TPjc", + "sourceHandle": "{œbaseClassesœ: [œEmbeddingsœ], œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-9TPjcœ}", + "style": { + "stroke": "#555" + }, + "target": "AstraDB-eUCSS", + "targetHandle": "{œfieldNameœ: œembeddingœ, œidœ: œAstraDB-eUCSSœ, œinputTypesœ: null, œtypeœ: œEmbeddingsœ}" + }, + { + "className": "stroke-gray-900 stroke-connection", + "data": { + "sourceHandle": { + "baseClasses": [ + "Record" + ], + "dataType": "AstraDBSearch", + "id": "AstraDBSearch-41nRz" + }, + "targetHandle": { + "fieldName": "input_value", + "id": "TextOutput-BDknO", + "inputTypes": [ + "Record", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-AstraDBSearch-41nRz{œbaseClassesœ:[œRecordœ],œdataTypeœ:œAstraDBSearchœ,œidœ:œAstraDBSearch-41nRzœ}-TextOutput-BDknO{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-BDknOœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}", + "source": "AstraDBSearch-41nRz", + "sourceHandle": "{œbaseClassesœ: [œRecordœ], œdataTypeœ: œAstraDBSearchœ, œidœ: œAstraDBSearch-41nRzœ}", + "style": { + "stroke": "#555" + }, + "target": "TextOutput-BDknO", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œTextOutput-BDknOœ, œinputTypesœ: [œRecordœ, œTextœ], œtypeœ: œstrœ}" + } + ], "nodes": [ { + "data": { + "id": "ChatInput-yxMKE", + "node": { + "base_classes": [ + "Text", + "str", + "object", + "Record" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null + }, + "description": "Get chat inputs from the Playground.", + "display_name": "Chat Input", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "ChatInput", + "output_types": [ + "Message" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n files: Optional[list[str]] = None,\n session_id: Optional[str] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n files=files,\n session_id=session_id,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "what is a line" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "User" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "User" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + } + } + }, + "type": "ChatInput" + }, + "height": 383, "id": "ChatInput-yxMKE", - "type": "genericNode", "position": { "x": 1195.5276981160775, "y": 209.421875 }, - "data": { - "type": "ChatInput", - "node": { - "template": { - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build_no_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": [], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "value": "what is a line" - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "User", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "User", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": true, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Get chat inputs from the Playground.", - "icon": "ChatInput", - "base_classes": ["Text", "str", "object", "Record"], - "display_name": "Chat Input", - "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null - }, - "output_types": ["Text", "Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false - }, - "id": "ChatInput-yxMKE" - }, "selected": false, - "width": 384, - "height": 383 + "type": "genericNode", + "width": 384 }, { - "id": "TextOutput-BDknO", - "type": "genericNode", - "position": { - "x": 2322.600672827879, - "y": 604.9467307442569 - }, "data": { - "type": "TextOutput", + "id": "TextOutput-BDknO", "node": { - "template": { - "input_value": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Value", - "advanced": false, - "input_types": ["Record", "Text"], - "dynamic": false, - "info": "Text or Record to be passed as output.", - "load_from_db": false, - "title_case": false - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "{text}", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Display a text output in the Playground.", - "icon": "type", - "base_classes": ["object", "Text", "str"], - "display_name": "Extracted Chunks", - "documentation": "", + "base_classes": [ + "object", + "Text", + "str" + ], + "beta": false, "custom_fields": { "input_value": null, "record_template": null }, - "output_types": ["Text"], + "description": "Display a text output in the Playground.", + "display_name": "Extracted Chunks", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "icon": "type", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Value", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Text or Record to be passed as output.", + "input_types": [ + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "record_template": { + "advanced": true, + "display_name": "Record Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "record_template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "{text}" + } + } }, - "id": "TextOutput-BDknO" + "type": "TextOutput" }, - "selected": false, - "width": 384, + "dragging": false, "height": 289, + "id": "TextOutput-BDknO", + "position": { + "x": 2322.600672827879, + "y": 604.9467307442569 + }, "positionAbsolute": { "x": 2322.600672827879, "y": 604.9467307442569 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "OpenAIEmbeddings-ZlOk1", - "type": "genericNode", - "position": { - "x": 1183.667250865064, - "y": 687.3171828430261 - }, "data": { - "type": "OpenAIEmbeddings", + "id": "OpenAIEmbeddings-ZlOk1", "node": { - "template": { - "allowed_special": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": [], - "fileTypes": [], - "file_path": "", - "password": false, - "name": "allowed_special", - "display_name": "Allowed Special", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "chunk_size": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 1000, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "chunk_size", - "display_name": "Chunk Size", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "client": { - "type": "Any", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "client", - "display_name": "Client", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Dict, List, Optional\n\nfrom langchain_openai.embeddings.base import OpenAIEmbeddings\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Embeddings, NestedDict\n\n\nclass OpenAIEmbeddingsComponent(CustomComponent):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n\n def build_config(self):\n return {\n \"allowed_special\": {\n \"display_name\": \"Allowed Special\",\n \"advanced\": True,\n \"field_type\": \"str\",\n \"is_list\": True,\n },\n \"default_headers\": {\n \"display_name\": \"Default Headers\",\n \"advanced\": True,\n \"field_type\": \"dict\",\n },\n \"default_query\": {\n \"display_name\": \"Default Query\",\n \"advanced\": True,\n \"field_type\": \"NestedDict\",\n },\n \"disallowed_special\": {\n \"display_name\": \"Disallowed Special\",\n \"advanced\": True,\n \"field_type\": \"str\",\n \"is_list\": True,\n },\n \"chunk_size\": {\"display_name\": \"Chunk Size\", \"advanced\": True},\n \"client\": {\"display_name\": \"Client\", \"advanced\": True},\n \"deployment\": {\"display_name\": \"Deployment\", \"advanced\": True},\n \"embedding_ctx_length\": {\n \"display_name\": \"Embedding Context Length\",\n \"advanced\": True,\n },\n \"max_retries\": {\"display_name\": \"Max Retries\", \"advanced\": True},\n \"model\": {\n \"display_name\": \"Model\",\n \"advanced\": False,\n \"options\": [\n \"text-embedding-3-small\",\n \"text-embedding-3-large\",\n \"text-embedding-ada-002\",\n ],\n },\n \"model_kwargs\": {\"display_name\": \"Model Kwargs\", \"advanced\": True},\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"password\": True,\n \"advanced\": True,\n },\n \"openai_api_key\": {\"display_name\": \"OpenAI API Key\", \"password\": True},\n \"openai_api_type\": {\n \"display_name\": \"OpenAI API Type\",\n \"advanced\": True,\n \"password\": True,\n },\n \"openai_api_version\": {\n \"display_name\": \"OpenAI API Version\",\n \"advanced\": True,\n },\n \"openai_organization\": {\n \"display_name\": \"OpenAI Organization\",\n \"advanced\": True,\n },\n \"openai_proxy\": {\"display_name\": \"OpenAI Proxy\", \"advanced\": True},\n \"request_timeout\": {\"display_name\": \"Request Timeout\", \"advanced\": True},\n \"show_progress_bar\": {\n \"display_name\": \"Show Progress Bar\",\n \"advanced\": True,\n },\n \"skip_empty\": {\"display_name\": \"Skip Empty\", \"advanced\": True},\n \"tiktoken_model_name\": {\n \"display_name\": \"TikToken Model Name\",\n \"advanced\": True,\n },\n \"tiktoken_enable\": {\"display_name\": \"TikToken Enable\", \"advanced\": True},\n }\n\n def build(\n self,\n openai_api_key: str,\n default_headers: Optional[Dict[str, str]] = None,\n default_query: Optional[NestedDict] = {},\n allowed_special: List[str] = [],\n disallowed_special: List[str] = [\"all\"],\n chunk_size: int = 1000,\n deployment: str = \"text-embedding-ada-002\",\n embedding_ctx_length: int = 8191,\n max_retries: int = 6,\n model: str = \"text-embedding-ada-002\",\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n openai_api_type: Optional[str] = None,\n openai_api_version: Optional[str] = None,\n openai_organization: Optional[str] = None,\n openai_proxy: Optional[str] = None,\n request_timeout: Optional[float] = None,\n show_progress_bar: bool = False,\n skip_empty: bool = False,\n tiktoken_enable: bool = True,\n tiktoken_model_name: Optional[str] = None,\n ) -> Embeddings:\n # This is to avoid errors with Vector Stores (e.g Chroma)\n if disallowed_special == [\"all\"]:\n disallowed_special = \"all\" # type: ignore\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n return OpenAIEmbeddings(\n tiktoken_enabled=tiktoken_enable,\n default_headers=default_headers,\n default_query=default_query,\n allowed_special=set(allowed_special),\n disallowed_special=\"all\",\n chunk_size=chunk_size,\n deployment=deployment,\n embedding_ctx_length=embedding_ctx_length,\n max_retries=max_retries,\n model=model,\n model_kwargs=model_kwargs,\n base_url=openai_api_base,\n api_key=api_key,\n openai_api_type=openai_api_type,\n api_version=openai_api_version,\n organization=openai_organization,\n openai_proxy=openai_proxy,\n timeout=request_timeout,\n show_progress_bar=show_progress_bar,\n skip_empty=skip_empty,\n tiktoken_model_name=tiktoken_model_name,\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "default_headers": { - "type": "dict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "default_headers", - "display_name": "Default Headers", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "default_query": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "default_query", - "display_name": "Default Query", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "deployment": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "text-embedding-ada-002", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "deployment", - "display_name": "Deployment", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "disallowed_special": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": ["all"], - "fileTypes": [], - "file_path": "", - "password": false, - "name": "disallowed_special", - "display_name": "Disallowed Special", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "embedding_ctx_length": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 8191, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "embedding_ctx_length", - "display_name": "Embedding Context Length", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "max_retries": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 6, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "max_retries", - "display_name": "Max Retries", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "model": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "text-embedding-ada-002", - "fileTypes": [], - "file_path": "", - "password": false, - "options": [ - "text-embedding-3-small", - "text-embedding-3-large", - "text-embedding-ada-002" - ], - "name": "model", - "display_name": "Model", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "model_kwargs": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "model_kwargs", - "display_name": "Model Kwargs", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "openai_api_base": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_base", - "display_name": "OpenAI API Base", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_key": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_key", - "display_name": "OpenAI API Key", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": true, - "title_case": false, - "input_types": ["Text"], - "value": "OPENAI_API_KEY" - }, - "openai_api_type": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_type", - "display_name": "OpenAI API Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_version": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_api_version", - "display_name": "OpenAI API Version", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_organization": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_organization", - "display_name": "OpenAI Organization", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_proxy": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_proxy", - "display_name": "OpenAI Proxy", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "request_timeout": { - "type": "float", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "request_timeout", - "display_name": "Request Timeout", - "advanced": true, - "dynamic": false, - "info": "", - "rangeSpec": { - "step_type": "float", - "min": -1, - "max": 1, - "step": 0.1 - }, - "load_from_db": false, - "title_case": false - }, - "show_progress_bar": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "show_progress_bar", - "display_name": "Show Progress Bar", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "skip_empty": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "skip_empty", - "display_name": "Skip Empty", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "tiktoken_enable": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "tiktoken_enable", - "display_name": "TikToken Enable", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "tiktoken_model_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "tiktoken_model_name", - "display_name": "TikToken Model Name", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Generate embeddings using OpenAI models.", - "base_classes": ["Embeddings"], - "display_name": "OpenAI Embeddings", - "documentation": "", + "base_classes": [ + "Embeddings" + ], + "beta": false, "custom_fields": { - "openai_api_key": null, - "default_headers": null, - "default_query": null, "allowed_special": null, - "disallowed_special": null, "chunk_size": null, "client": null, + "default_headers": null, + "default_query": null, "deployment": null, + "disallowed_special": null, "embedding_ctx_length": null, "max_retries": null, "model": null, "model_kwargs": null, "openai_api_base": null, + "openai_api_key": null, "openai_api_type": null, "openai_api_version": null, "openai_organization": null, @@ -743,255 +597,512 @@ "tiktoken_enable": null, "tiktoken_model_name": null }, - "output_types": ["Embeddings"], + "description": "Generate embeddings using OpenAI models.", + "display_name": "OpenAI Embeddings", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false - }, - "id": "OpenAIEmbeddings-ZlOk1" - }, - "selected": false, - "width": 384, - "height": 383, - "dragging": false - }, - { - "id": "OpenAIModel-EjXlN", - "type": "genericNode", - "position": { - "x": 3410.117202077183, - "y": 431.2038048137648 - }, - "data": { - "type": "OpenAIModel", - "node": { + "frozen": false, + "output_types": [ + "Embeddings" + ], "template": { - "input_value": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "_type": "CustomComponent", + "allowed_special": { + "advanced": true, + "display_name": "Allowed Special", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Input", - "advanced": false, - "dynamic": false, "info": "", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "allowed_special", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str", + "value": [] + }, + "chunk_size": { + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "chunk_size", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 1000 }, "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", "advanced": true, "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "max_tokens": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 256, "fileTypes": [], "file_path": "", - "password": false, - "name": "max_tokens", - "display_name": "Max Tokens", - "advanced": true, - "dynamic": false, - "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "info": "", + "list": false, "load_from_db": false, - "title_case": false + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Dict, List, Optional\n\nfrom langchain_openai.embeddings.base import OpenAIEmbeddings\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Embeddings, NestedDict\n\n\nclass OpenAIEmbeddingsComponent(CustomComponent):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n\n def build_config(self):\n return {\n \"allowed_special\": {\n \"display_name\": \"Allowed Special\",\n \"advanced\": True,\n \"field_type\": \"str\",\n \"is_list\": True,\n },\n \"default_headers\": {\n \"display_name\": \"Default Headers\",\n \"advanced\": True,\n \"field_type\": \"dict\",\n },\n \"default_query\": {\n \"display_name\": \"Default Query\",\n \"advanced\": True,\n \"field_type\": \"NestedDict\",\n },\n \"disallowed_special\": {\n \"display_name\": \"Disallowed Special\",\n \"advanced\": True,\n \"field_type\": \"str\",\n \"is_list\": True,\n },\n \"chunk_size\": {\"display_name\": \"Chunk Size\", \"advanced\": True},\n \"client\": {\"display_name\": \"Client\", \"advanced\": True},\n \"deployment\": {\"display_name\": \"Deployment\", \"advanced\": True},\n \"embedding_ctx_length\": {\n \"display_name\": \"Embedding Context Length\",\n \"advanced\": True,\n },\n \"max_retries\": {\"display_name\": \"Max Retries\", \"advanced\": True},\n \"model\": {\n \"display_name\": \"Model\",\n \"advanced\": False,\n \"options\": [\n \"text-embedding-3-small\",\n \"text-embedding-3-large\",\n \"text-embedding-ada-002\",\n ],\n },\n \"model_kwargs\": {\"display_name\": \"Model Kwargs\", \"advanced\": True},\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"password\": True,\n \"advanced\": True,\n },\n \"openai_api_key\": {\"display_name\": \"OpenAI API Key\", \"password\": True},\n \"openai_api_type\": {\n \"display_name\": \"OpenAI API Type\",\n \"advanced\": True,\n \"password\": True,\n },\n \"openai_api_version\": {\n \"display_name\": \"OpenAI API Version\",\n \"advanced\": True,\n },\n \"openai_organization\": {\n \"display_name\": \"OpenAI Organization\",\n \"advanced\": True,\n },\n \"openai_proxy\": {\"display_name\": \"OpenAI Proxy\", \"advanced\": True},\n \"request_timeout\": {\"display_name\": \"Request Timeout\", \"advanced\": True},\n \"show_progress_bar\": {\n \"display_name\": \"Show Progress Bar\",\n \"advanced\": True,\n },\n \"skip_empty\": {\"display_name\": \"Skip Empty\", \"advanced\": True},\n \"tiktoken_model_name\": {\n \"display_name\": \"TikToken Model Name\",\n \"advanced\": True,\n },\n \"tiktoken_enable\": {\"display_name\": \"TikToken Enable\", \"advanced\": True},\n }\n\n def build(\n self,\n openai_api_key: str,\n default_headers: Optional[Dict[str, str]] = None,\n default_query: Optional[NestedDict] = {},\n allowed_special: List[str] = [],\n disallowed_special: List[str] = [\"all\"],\n chunk_size: int = 1000,\n deployment: str = \"text-embedding-ada-002\",\n embedding_ctx_length: int = 8191,\n max_retries: int = 6,\n model: str = \"text-embedding-ada-002\",\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n openai_api_type: Optional[str] = None,\n openai_api_version: Optional[str] = None,\n openai_organization: Optional[str] = None,\n openai_proxy: Optional[str] = None,\n request_timeout: Optional[float] = None,\n show_progress_bar: bool = False,\n skip_empty: bool = False,\n tiktoken_enable: bool = True,\n tiktoken_model_name: Optional[str] = None,\n ) -> Embeddings:\n # This is to avoid errors with Vector Stores (e.g Chroma)\n if disallowed_special == [\"all\"]:\n disallowed_special = \"all\" # type: ignore\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n return OpenAIEmbeddings(\n tiktoken_enabled=tiktoken_enable,\n default_headers=default_headers,\n default_query=default_query,\n allowed_special=set(allowed_special),\n disallowed_special=\"all\",\n chunk_size=chunk_size,\n deployment=deployment,\n embedding_ctx_length=embedding_ctx_length,\n max_retries=max_retries,\n model=model,\n model_kwargs=model_kwargs,\n base_url=openai_api_base,\n api_key=api_key,\n openai_api_type=openai_api_type,\n api_version=openai_api_version,\n organization=openai_organization,\n openai_proxy=openai_proxy,\n timeout=request_timeout,\n show_progress_bar=show_progress_bar,\n skip_empty=skip_empty,\n tiktoken_model_name=tiktoken_model_name,\n )\n" + }, + "default_headers": { + "advanced": true, + "display_name": "Default Headers", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "default_headers", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "dict" + }, + "default_query": { + "advanced": true, + "display_name": "Default Query", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "default_query", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} + }, + "deployment": { + "advanced": true, + "display_name": "Deployment", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "deployment", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "text-embedding-ada-002" + }, + "disallowed_special": { + "advanced": true, + "display_name": "Disallowed Special", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "disallowed_special", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": [ + "all" + ] + }, + "embedding_ctx_length": { + "advanced": true, + "display_name": "Embedding Context Length", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "embedding_ctx_length", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 8191 + }, + "max_retries": { + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "max_retries", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 6 + }, + "model": { + "advanced": false, + "display_name": "Model", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "model", + "options": [ + "text-embedding-3-small", + "text-embedding-3-large", + "text-embedding-ada-002" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "text-embedding-ada-002" }, "model_kwargs": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "model_kwargs", - "display_name": "Model Kwargs", "advanced": true, + "display_name": "Model Kwargs", "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "model_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "gpt-3.5-turbo", "fileTypes": [], "file_path": "", - "password": false, - "options": [ - "gpt-4o", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125" - ], - "name": "model_name", - "display_name": "Model Name", - "advanced": false, - "dynamic": false, "info": "", + "list": false, "load_from_db": false, + "multiline": false, + "name": "model_kwargs", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "NestedDict", + "value": {} }, "openai_api_base": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "openai_api_base", - "display_name": "OpenAI API Base", - "advanced": true, - "dynamic": false, - "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", + "info": "", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "openai_api_base", + "password": true, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str" }, "openai_api_key": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": true, - "name": "openai_api_key", - "display_name": "OpenAI API Key", - "advanced": false, - "dynamic": false, - "info": "The OpenAI API Key to use for the OpenAI model.", + "info": "", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": true, + "multiline": false, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, "title_case": false, - "input_types": ["Text"], + "type": "str", "value": "OPENAI_API_KEY" }, - "stream": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "stream", - "display_name": "Stream", + "openai_api_type": { "advanced": true, + "display_name": "OpenAI API Type", "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "load_from_db": false, - "title_case": false - }, - "system_message": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "system_message", - "display_name": "System Message", - "advanced": true, - "dynamic": false, - "info": "System message to pass to the model.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "temperature": { - "type": "float", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 0.1, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "temperature", - "display_name": "Temperature", - "advanced": false, - "dynamic": false, "info": "", - "rangeSpec": { - "step_type": "float", - "min": -1, - "max": 1, - "step": 0.1 - }, + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "openai_api_type", + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" }, - "_type": "CustomComponent" - }, - "description": "Generates text using OpenAI LLMs.", - "icon": "OpenAI", - "base_classes": ["object", "Text", "str"], - "display_name": "OpenAI", - "documentation": "", + "openai_api_version": { + "advanced": true, + "display_name": "OpenAI API Version", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_api_version", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_organization": { + "advanced": true, + "display_name": "OpenAI Organization", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_organization", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_proxy": { + "advanced": true, + "display_name": "OpenAI Proxy", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_proxy", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "request_timeout": { + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "request_timeout", + "password": false, + "placeholder": "", + "rangeSpec": { + "max": 1, + "min": -1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "type": "float" + }, + "show_progress_bar": { + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "show_progress_bar", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + }, + "skip_empty": { + "advanced": true, + "display_name": "Skip Empty", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "skip_empty", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + }, + "tiktoken_enable": { + "advanced": true, + "display_name": "TikToken Enable", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "tiktoken_enable", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": true + }, + "tiktoken_model_name": { + "advanced": true, + "display_name": "TikToken Model Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "tiktoken_model_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + } + } + }, + "type": "OpenAIEmbeddings" + }, + "dragging": false, + "height": 383, + "id": "OpenAIEmbeddings-ZlOk1", + "position": { + "x": 1183.667250865064, + "y": 687.3171828430261 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "OpenAIModel-EjXlN", + "node": { + "base_classes": [ + "object", + "Text", + "str" + ], + "beta": false, "custom_fields": { "input_value": null, - "openai_api_key": null, - "temperature": null, - "model_name": null, "max_tokens": null, "model_kwargs": null, + "model_name": null, "openai_api_base": null, + "openai_api_key": null, "stream": null, - "system_message": null + "system_message": null, + "temperature": null }, - "output_types": ["Text"], + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [ "max_tokens", "model_kwargs", @@ -1003,352 +1114,599 @@ "system_message", "stream" ], - "beta": false + "frozen": false, + "icon": "OpenAI", + "output_types": [ + "Text" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n" + }, + "input_value": { + "advanced": false, + "display_name": "Input", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text", + "Record", + "Prompt" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str" + }, + "max_tokens": { + "advanced": true, + "display_name": "Max Tokens", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "max_tokens", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 256 + }, + "model_kwargs": { + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "model_kwargs", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} + }, + "model_name": { + "advanced": false, + "display_name": "Model Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "model_name", + "options": [ + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "gpt-3.5-turbo" + }, + "openai_api_base": { + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_api_base", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_api_key": { + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The OpenAI API Key to use for the OpenAI model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": true, + "multiline": false, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "stream": { + "advanced": true, + "display_name": "Stream", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "stream", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + }, + "system_message": { + "advanced": true, + "display_name": "System Message", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "System message to pass to the model.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "system_message", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "temperature": { + "advanced": false, + "display_name": "Temperature", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "temperature", + "password": false, + "placeholder": "", + "rangeSpec": { + "max": 1, + "min": -1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "type": "float", + "value": 0.1 + } + } }, - "id": "OpenAIModel-EjXlN" + "type": "OpenAIModel" }, - "selected": true, - "width": 384, + "dragging": false, "height": 563, + "id": "OpenAIModel-EjXlN", + "position": { + "x": 3410.117202077183, + "y": 431.2038048137648 + }, "positionAbsolute": { "x": 3410.117202077183, "y": 431.2038048137648 }, - "dragging": false + "selected": true, + "type": "genericNode", + "width": 384 }, { + "data": { + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "id": "Prompt-xeI6K", + "node": { + "base_classes": [ + "object", + "Text", + "str" + ], + "beta": false, + "custom_fields": { + "template": [ + "context", + "question" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "documentation": "", + "error": null, + "field_formatters": {}, + "field_order": [], + "frozen": false, + "full_path": null, + "icon": "prompts", + "is_composition": null, + "is_input": null, + "is_output": null, + "name": "", + "output_types": [ + "Prompt" + ], + "template": { + "_type": "CustomComponent", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import CustomComponent\nfrom langflow.field_typing import TemplateField\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n async def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Prompt:\n prompt = await Prompt.from_template_and_variables(template, kwargs)\n self.status = prompt.format_text()\n return prompt\n" + }, + "context": { + "advanced": false, + "display_name": "context", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "context", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "question": { + "advanced": false, + "display_name": "question", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Document", + "BaseOutputParser", + "Record", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "question", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "template": { + "advanced": false, + "display_name": "Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "prompt", + "value": "{context}\n\n---\n\nGiven the context above, answer the question as best as possible.\n\nQuestion: {question}\n\nAnswer: " + } + } + }, + "type": "Prompt" + }, + "dragging": false, + "height": 477, "id": "Prompt-xeI6K", - "type": "genericNode", "position": { "x": 2969.0261961391298, "y": 442.1613649809069 }, + "positionAbsolute": { + "x": 2969.0261961391298, + "y": 442.1613649809069 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { "data": { - "type": "Prompt", + "id": "ChatOutput-Q39I8", "node": { + "base_classes": [ + "object", + "Text", + "Record", + "str" + ], + "beta": false, + "custom_fields": { + "input_value": null, + "record_template": null, + "return_record": null, + "sender": null, + "sender_name": null, + "session_id": null + }, + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "ChatOutput", + "output_types": [ + "Message" + ], "template": { + "_type": "CustomComponent", "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from langchain_core.prompts import PromptTemplate\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Prompt, TemplateField, Text\n\n\nclass PromptComponent(CustomComponent):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": TemplateField(display_name=\"Template\"),\n \"code\": TemplateField(advanced=True),\n }\n\n def build(\n self,\n template: Prompt,\n **kwargs,\n ) -> Text:\n from langflow.base.prompts.utils import dict_values_to_string\n\n prompt_template = PromptTemplate.from_template(Text(template))\n kwargs = dict_values_to_string(kwargs)\n kwargs = {k: \"\\n\".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n try:\n formated_prompt = prompt_template.format(**kwargs)\n except Exception as exc:\n raise ValueError(f\"Error formatting prompt: {exc}\") from exc\n self.status = f'Prompt:\\n\"{formated_prompt}\"'\n return formated_prompt\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", "advanced": true, "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "template": { - "type": "prompt", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "{context}\n\n---\n\nGiven the context above, answer the question as best as possible.\n\nQuestion: {question}\n\nAnswer: ", "fileTypes": [], "file_path": "", - "password": false, - "name": "template", - "display_name": "Template", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, "info": "", - "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent", - "context": { - "field_type": "str", - "required": false, - "placeholder": "", "list": false, - "show": true, + "load_from_db": false, "multiline": true, - "value": "", + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.schema.message import Message\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n files: Optional[list[str]] = None,\n ) -> Message:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n files=files,\n )\n" + }, + "input_value": { + "advanced": false, + "display_name": "Message", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "context", - "display_name": "context", - "advanced": false, + "info": "", "input_types": [ - "Document", - "BaseOutputParser", - "Record", "Text" ], - "dynamic": false, - "info": "", + "list": false, "load_from_db": false, + "multiline": true, + "name": "input_value", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, "type": "str" }, - "question": { - "field_type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "", + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "question", - "display_name": "question", - "advanced": false, + "info": "", "input_types": [ - "Document", - "BaseOutputParser", - "Record", "Text" ], - "dynamic": false, - "info": "", + "list": true, "load_from_db": false, + "multiline": false, + "name": "sender", + "options": [ + "Machine", + "User" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Machine" + }, + "sender_name": { + "advanced": false, + "display_name": "Sender Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "sender_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "AI" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "If provided, the message will be stored in the memory.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "session_id", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, "type": "str" } - }, - "description": "Create a prompt template with dynamic variables.", - "icon": "prompts", - "is_input": null, - "is_output": null, - "is_composition": null, - "base_classes": ["object", "Text", "str"], - "name": "", - "display_name": "Prompt", - "documentation": "", - "custom_fields": { - "template": ["context", "question"] - }, - "output_types": ["Text"], - "full_path": null, - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false, - "error": null + } }, - "id": "Prompt-xeI6K", - "description": "Create a prompt template with dynamic variables.", - "display_name": "Prompt" + "type": "ChatOutput" }, - "selected": false, - "width": 384, - "height": 477, - "positionAbsolute": { - "x": 2969.0261961391298, - "y": 442.1613649809069 - }, - "dragging": false - }, - { + "dragging": false, + "height": 383, "id": "ChatOutput-Q39I8", - "type": "genericNode", "position": { "x": 3887.2073667611485, "y": 588.4801225794856 }, + "positionAbsolute": { + "x": 3887.2073667611485, + "y": 588.4801225794856 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { "data": { - "type": "ChatOutput", + "id": "File-t0a6a", "node": { + "base_classes": [ + "Record" + ], + "beta": false, + "custom_fields": { + "path": null, + "silent_errors": null + }, + "description": "A generic file loader.", + "display_name": "File", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "file-text", + "output_types": [ + "Record" + ], "template": { + "_type": "CustomComponent", "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build_with_record(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template or \"\",\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", "advanced": true, "dynamic": true, + "fileTypes": [], + "file_path": "", "info": "", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": false, - "placeholder": "", "list": false, - "show": true, + "load_from_db": false, "multiline": true, - "fileTypes": [], - "file_path": "", + "name": "code", "password": false, - "name": "input_value", - "display_name": "Message", - "advanced": false, - "input_types": ["Text"], - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "record_template": { - "type": "str", - "required": false, "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "{text}", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "record_template", - "display_name": "Record Template", - "advanced": true, - "dynamic": false, - "info": "In case of Message being a Record, this template will be used to convert it to text.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "return_record": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "return_record", - "display_name": "Return Record", - "advanced": true, - "dynamic": false, - "info": "Return the message as a record containing the sender, sender_name, and session_id.", - "load_from_db": false, - "title_case": false - }, - "sender": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Machine", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Machine", "User"], - "name": "sender", - "display_name": "Sender Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "sender_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "AI", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "sender_name", - "display_name": "Sender Name", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "session_id": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "session_id", - "display_name": "Session ID", - "advanced": true, - "dynamic": false, - "info": "If provided, the message will be stored in the memory.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Display a chat message in the Playground.", - "icon": "ChatOutput", - "base_classes": ["object", "Text", "Record", "str"], - "display_name": "Chat Output", - "documentation": "", - "custom_fields": { - "sender": null, - "sender_name": null, - "input_value": null, - "session_id": null, - "return_record": null, - "record_template": null - }, - "output_types": ["Text", "Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false - }, - "id": "ChatOutput-Q39I8" - }, - "selected": false, - "width": 384, - "height": 383, - "positionAbsolute": { - "x": 3887.2073667611485, - "y": 588.4801225794856 - }, - "dragging": false - }, - { - "id": "File-t0a6a", - "type": "genericNode", - "position": { - "x": 2257.233450682836, - "y": 1747.5389618367233 - }, - "data": { - "type": "File", - "node": { - "template": { - "path": { - "type": "file", "required": true, - "placeholder": "", - "list": false, "show": true, - "multiline": false, + "title_case": false, + "type": "code", + "value": "from pathlib import Path\nfrom typing import Any, Dict\n\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parse_text_file_to_record\nfrom langflow.custom import CustomComponent\nfrom langflow.schema import Record\n\n\nclass FileComponent(CustomComponent):\n display_name = \"File\"\n description = \"A generic file loader.\"\n icon = \"file-text\"\n\n def build_config(self) -> Dict[str, Any]:\n return {\n \"path\": {\n \"display_name\": \"Path\",\n \"field_type\": \"file\",\n \"file_types\": TEXT_FILE_TYPES,\n \"info\": f\"Supported file types: {', '.join(TEXT_FILE_TYPES)}\",\n },\n \"silent_errors\": {\n \"display_name\": \"Silent Errors\",\n \"advanced\": True,\n \"info\": \"If true, errors will not raise an exception.\",\n },\n }\n\n def load_file(self, path: str, silent_errors: bool = False) -> Record:\n resolved_path = self.resolve_path(path)\n path_obj = Path(resolved_path)\n extension = path_obj.suffix[1:].lower()\n if extension == \"doc\":\n raise ValueError(\"doc files are not supported. Please save as .docx\")\n if extension not in TEXT_FILE_TYPES:\n raise ValueError(f\"Unsupported file type: {extension}\")\n record = parse_text_file_to_record(resolved_path, silent_errors)\n self.status = record if record else \"No data\"\n return record or Record()\n\n def build(\n self,\n path: str,\n silent_errors: bool = False,\n ) -> Record:\n record = self.load_file(path, silent_errors)\n self.status = record\n return record\n" + }, + "path": { + "advanced": false, + "display_name": "Path", + "dynamic": false, "fileTypes": [ ".txt", ".md", @@ -1370,618 +1728,235 @@ ".tsx" ], "file_path": "51e2b78a-199b-4054-9f32-e288eef6924c/Langflow conversation.pdf", - "password": false, - "name": "path", - "display_name": "Path", - "advanced": false, - "dynamic": false, "info": "Supported file types: txt, md, mdx, csv, json, yaml, yml, xml, html, htm, pdf, docx, py, sh, sql, js, ts, tsx", + "list": false, "load_from_db": false, + "multiline": false, + "name": "path", + "password": false, + "placeholder": "", + "required": true, + "show": true, "title_case": false, + "type": "file", "value": "" }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from pathlib import Path\nfrom typing import Any, Dict\n\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parse_text_file_to_record\nfrom langflow.custom import CustomComponent\nfrom langflow.schema import Record\n\n\nclass FileComponent(CustomComponent):\n display_name = \"File\"\n description = \"A generic file loader.\"\n icon = \"file-text\"\n\n def build_config(self) -> Dict[str, Any]:\n return {\n \"path\": {\n \"display_name\": \"Path\",\n \"field_type\": \"file\",\n \"file_types\": TEXT_FILE_TYPES,\n \"info\": f\"Supported file types: {', '.join(TEXT_FILE_TYPES)}\",\n },\n \"silent_errors\": {\n \"display_name\": \"Silent Errors\",\n \"advanced\": True,\n \"info\": \"If true, errors will not raise an exception.\",\n },\n }\n\n def load_file(self, path: str, silent_errors: bool = False) -> Record:\n resolved_path = self.resolve_path(path)\n path_obj = Path(resolved_path)\n extension = path_obj.suffix[1:].lower()\n if extension == \"doc\":\n raise ValueError(\"doc files are not supported. Please save as .docx\")\n if extension not in TEXT_FILE_TYPES:\n raise ValueError(f\"Unsupported file type: {extension}\")\n record = parse_text_file_to_record(resolved_path, silent_errors)\n self.status = record if record else \"No data\"\n return record or Record()\n\n def build(\n self,\n path: str,\n silent_errors: bool = False,\n ) -> Record:\n record = self.load_file(path, silent_errors)\n self.status = record\n return record\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, "silent_errors": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, + "advanced": true, + "display_name": "Silent Errors", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "silent_errors", - "display_name": "Silent Errors", - "advanced": true, - "dynamic": false, "info": "If true, errors will not raise an exception.", + "list": false, "load_from_db": false, - "title_case": false - }, - "_type": "CustomComponent" - }, - "description": "A generic file loader.", - "icon": "file-text", - "base_classes": ["Record"], - "display_name": "File", - "documentation": "", - "custom_fields": { - "path": null, - "silent_errors": null - }, - "output_types": ["Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false + "multiline": false, + "name": "silent_errors", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + } + } }, - "id": "File-t0a6a" + "type": "File" }, - "selected": false, - "width": 384, + "dragging": false, "height": 281, + "id": "File-t0a6a", + "position": { + "x": 2257.233450682836, + "y": 1747.5389618367233 + }, "positionAbsolute": { "x": 2257.233450682836, "y": 1747.5389618367233 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { + "data": { + "id": "RecursiveCharacterTextSplitter-tR9QM", + "node": { + "base_classes": [ + "Record" + ], + "beta": false, + "custom_fields": { + "chunk_overlap": null, + "chunk_size": null, + "inputs": null, + "separators": null + }, + "description": "Split text into chunks of a specified length.", + "display_name": "Recursive Character Text Splitter", + "documentation": "https://docs.langflow.org/components/text-splitters#recursivecharactertextsplitter", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "output_types": [ + "Record" + ], + "template": { + "_type": "CustomComponent", + "chunk_overlap": { + "advanced": false, + "display_name": "Chunk Overlap", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The amount of overlap between chunks.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "chunk_overlap", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 200 + }, + "chunk_size": { + "advanced": false, + "display_name": "Chunk Size", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The maximum length of each chunk.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "chunk_size", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Optional\n\nfrom langchain_core.documents import Document\nfrom langchain_text_splitters import RecursiveCharacterTextSplitter\n\nfrom langflow.custom import CustomComponent\nfrom langflow.schema import Record\nfrom langflow.utils.util import build_loader_repr_from_records, unescape_string\n\n\nclass RecursiveCharacterTextSplitterComponent(CustomComponent):\n display_name: str = \"Recursive Character Text Splitter\"\n description: str = \"Split text into chunks of a specified length.\"\n documentation: str = \"https://docs.langflow.org/components/text-splitters#recursivecharactertextsplitter\"\n\n def build_config(self):\n return {\n \"inputs\": {\n \"display_name\": \"Input\",\n \"info\": \"The texts to split.\",\n \"input_types\": [\"Document\", \"Record\"],\n },\n \"separators\": {\n \"display_name\": \"Separators\",\n \"info\": 'The characters to split on.\\nIf left empty defaults to [\"\\\\n\\\\n\", \"\\\\n\", \" \", \"\"].',\n \"is_list\": True,\n },\n \"chunk_size\": {\n \"display_name\": \"Chunk Size\",\n \"info\": \"The maximum length of each chunk.\",\n \"field_type\": \"int\",\n \"value\": 1000,\n },\n \"chunk_overlap\": {\n \"display_name\": \"Chunk Overlap\",\n \"info\": \"The amount of overlap between chunks.\",\n \"field_type\": \"int\",\n \"value\": 200,\n },\n \"code\": {\"show\": False},\n }\n\n def build(\n self,\n inputs: list[Document],\n separators: Optional[list[str]] = None,\n chunk_size: Optional[int] = 1000,\n chunk_overlap: Optional[int] = 200,\n ) -> list[Record]:\n \"\"\"\n Split text into chunks of a specified length.\n\n Args:\n separators (list[str]): The characters to split on.\n chunk_size (int): The maximum length of each chunk.\n chunk_overlap (int): The amount of overlap between chunks.\n length_function (function): The function to use to calculate the length of the text.\n\n Returns:\n list[str]: The chunks of text.\n \"\"\"\n\n if separators == \"\":\n separators = None\n elif separators:\n # check if the separators list has escaped characters\n # if there are escaped characters, unescape them\n separators = [unescape_string(x) for x in separators]\n\n # Make sure chunk_size and chunk_overlap are ints\n if isinstance(chunk_size, str):\n chunk_size = int(chunk_size)\n if isinstance(chunk_overlap, str):\n chunk_overlap = int(chunk_overlap)\n splitter = RecursiveCharacterTextSplitter(\n separators=separators,\n chunk_size=chunk_size,\n chunk_overlap=chunk_overlap,\n )\n documents = []\n for _input in inputs:\n if isinstance(_input, Record):\n documents.append(_input.to_lc_document())\n else:\n documents.append(_input)\n docs = splitter.split_documents(documents)\n records = self.to_records(docs)\n self.repr_value = build_loader_repr_from_records(records)\n return records\n" + }, + "inputs": { + "advanced": false, + "display_name": "Input", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The texts to split.", + "input_types": [ + "Document", + "Record" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "inputs", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "Document" + }, + "separators": { + "advanced": false, + "display_name": "Separators", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The characters to split on.\nIf left empty defaults to [\"\\n\\n\", \"\\n\", \" \", \"\"].", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "separators", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": [ + "" + ] + } + } + }, + "type": "RecursiveCharacterTextSplitter" + }, + "dragging": false, + "height": 501, "id": "RecursiveCharacterTextSplitter-tR9QM", - "type": "genericNode", "position": { "x": 2791.013514133929, "y": 1462.9588953494142 }, - "data": { - "type": "RecursiveCharacterTextSplitter", - "node": { - "template": { - "inputs": { - "type": "Document", - "required": true, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "inputs", - "display_name": "Input", - "advanced": false, - "input_types": ["Document", "Record"], - "dynamic": false, - "info": "The texts to split.", - "load_from_db": false, - "title_case": false - }, - "chunk_overlap": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 200, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "chunk_overlap", - "display_name": "Chunk Overlap", - "advanced": false, - "dynamic": false, - "info": "The amount of overlap between chunks.", - "load_from_db": false, - "title_case": false - }, - "chunk_size": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 1000, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "chunk_size", - "display_name": "Chunk Size", - "advanced": false, - "dynamic": false, - "info": "The maximum length of each chunk.", - "load_from_db": false, - "title_case": false - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Optional\n\nfrom langchain_core.documents import Document\nfrom langchain_text_splitters import RecursiveCharacterTextSplitter\n\nfrom langflow.custom import CustomComponent\nfrom langflow.schema import Record\nfrom langflow.utils.util import build_loader_repr_from_records, unescape_string\n\n\nclass RecursiveCharacterTextSplitterComponent(CustomComponent):\n display_name: str = \"Recursive Character Text Splitter\"\n description: str = \"Split text into chunks of a specified length.\"\n documentation: str = \"https://docs.langflow.org/components/text-splitters#recursivecharactertextsplitter\"\n\n def build_config(self):\n return {\n \"inputs\": {\n \"display_name\": \"Input\",\n \"info\": \"The texts to split.\",\n \"input_types\": [\"Document\", \"Record\"],\n },\n \"separators\": {\n \"display_name\": \"Separators\",\n \"info\": 'The characters to split on.\\nIf left empty defaults to [\"\\\\n\\\\n\", \"\\\\n\", \" \", \"\"].',\n \"is_list\": True,\n },\n \"chunk_size\": {\n \"display_name\": \"Chunk Size\",\n \"info\": \"The maximum length of each chunk.\",\n \"field_type\": \"int\",\n \"value\": 1000,\n },\n \"chunk_overlap\": {\n \"display_name\": \"Chunk Overlap\",\n \"info\": \"The amount of overlap between chunks.\",\n \"field_type\": \"int\",\n \"value\": 200,\n },\n \"code\": {\"show\": False},\n }\n\n def build(\n self,\n inputs: list[Document],\n separators: Optional[list[str]] = None,\n chunk_size: Optional[int] = 1000,\n chunk_overlap: Optional[int] = 200,\n ) -> list[Record]:\n \"\"\"\n Split text into chunks of a specified length.\n\n Args:\n separators (list[str]): The characters to split on.\n chunk_size (int): The maximum length of each chunk.\n chunk_overlap (int): The amount of overlap between chunks.\n length_function (function): The function to use to calculate the length of the text.\n\n Returns:\n list[str]: The chunks of text.\n \"\"\"\n\n if separators == \"\":\n separators = None\n elif separators:\n # check if the separators list has escaped characters\n # if there are escaped characters, unescape them\n separators = [unescape_string(x) for x in separators]\n\n # Make sure chunk_size and chunk_overlap are ints\n if isinstance(chunk_size, str):\n chunk_size = int(chunk_size)\n if isinstance(chunk_overlap, str):\n chunk_overlap = int(chunk_overlap)\n splitter = RecursiveCharacterTextSplitter(\n separators=separators,\n chunk_size=chunk_size,\n chunk_overlap=chunk_overlap,\n )\n documents = []\n for _input in inputs:\n if isinstance(_input, Record):\n documents.append(_input.to_lc_document())\n else:\n documents.append(_input)\n docs = splitter.split_documents(documents)\n records = self.to_records(docs)\n self.repr_value = build_loader_repr_from_records(records)\n return records\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "separators": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "separators", - "display_name": "Separators", - "advanced": false, - "dynamic": false, - "info": "The characters to split on.\nIf left empty defaults to [\"\\n\\n\", \"\\n\", \" \", \"\"].", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"], - "value": [""] - }, - "_type": "CustomComponent" - }, - "description": "Split text into chunks of a specified length.", - "base_classes": ["Record"], - "display_name": "Recursive Character Text Splitter", - "documentation": "https://docs.langflow.org/components/text-splitters#recursivecharactertextsplitter", - "custom_fields": { - "inputs": null, - "separators": null, - "chunk_size": null, - "chunk_overlap": null - }, - "output_types": ["Record"], - "field_formatters": {}, - "frozen": false, - "field_order": [], - "beta": false - }, - "id": "RecursiveCharacterTextSplitter-tR9QM" - }, - "selected": false, - "width": 384, - "height": 501, "positionAbsolute": { "x": 2791.013514133929, "y": 1462.9588953494142 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "AstraDBSearch-41nRz", - "type": "genericNode", - "position": { - "x": 1723.976434815103, - "y": 277.03317407245913 - }, "data": { - "type": "AstraDBSearch", + "id": "AstraDBSearch-41nRz", "node": { - "template": { - "embedding": { - "type": "Embeddings", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "embedding", - "display_name": "Embedding", - "advanced": false, - "dynamic": false, - "info": "Embedding to use", - "load_from_db": false, - "title_case": false - }, - "input_value": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "input_value", - "display_name": "Input Value", - "advanced": false, - "dynamic": false, - "info": "Input value to search", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "api_endpoint": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "api_endpoint", - "display_name": "API Endpoint", - "advanced": false, - "dynamic": false, - "info": "API endpoint URL for the Astra DB service.", - "load_from_db": true, - "title_case": false, - "input_types": ["Text"], - "value": "ASTRA_DB_API_ENDPOINT" - }, - "batch_size": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "batch_size", - "display_name": "Batch Size", - "advanced": true, - "dynamic": false, - "info": "Optional number of records to process in a single batch.", - "load_from_db": false, - "title_case": false - }, - "bulk_delete_concurrency": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "bulk_delete_concurrency", - "display_name": "Bulk Delete Concurrency", - "advanced": true, - "dynamic": false, - "info": "Optional concurrency level for bulk delete operations.", - "load_from_db": false, - "title_case": false - }, - "bulk_insert_batch_concurrency": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "bulk_insert_batch_concurrency", - "display_name": "Bulk Insert Batch Concurrency", - "advanced": true, - "dynamic": false, - "info": "Optional concurrency level for bulk insert operations.", - "load_from_db": false, - "title_case": false - }, - "bulk_insert_overwrite_concurrency": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "bulk_insert_overwrite_concurrency", - "display_name": "Bulk Insert Overwrite Concurrency", - "advanced": true, - "dynamic": false, - "info": "Optional concurrency level for bulk insert operations that overwrite existing records.", - "load_from_db": false, - "title_case": false - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import List, Optional\n\nfrom langflow.components.vectorstores.AstraDB import AstraDBVectorStoreComponent\nfrom langflow.components.vectorstores.base.model import LCVectorStoreComponent\nfrom langflow.field_typing import Embeddings, Text\nfrom langflow.schema import Record\n\n\nclass AstraDBSearchComponent(LCVectorStoreComponent):\n display_name = \"Astra DB Search\"\n description = \"Searches an existing Astra DB Vector Store.\"\n icon = \"AstraDB\"\n field_order = [\"token\", \"api_endpoint\", \"collection_name\", \"input_value\", \"embedding\"]\n\n def build_config(self):\n return {\n \"search_type\": {\n \"display_name\": \"Search Type\",\n \"options\": [\"Similarity\", \"MMR\"],\n },\n \"input_value\": {\n \"display_name\": \"Input Value\",\n \"info\": \"Input value to search\",\n },\n \"embedding\": {\"display_name\": \"Embedding\", \"info\": \"Embedding to use\"},\n \"collection_name\": {\n \"display_name\": \"Collection Name\",\n \"info\": \"The name of the collection within Astra DB where the vectors will be stored.\",\n },\n \"token\": {\n \"display_name\": \"Token\",\n \"info\": \"Authentication token for accessing Astra DB.\",\n \"password\": True,\n },\n \"api_endpoint\": {\n \"display_name\": \"API Endpoint\",\n \"info\": \"API endpoint URL for the Astra DB service.\",\n },\n \"namespace\": {\n \"display_name\": \"Namespace\",\n \"info\": \"Optional namespace within Astra DB to use for the collection.\",\n \"advanced\": True,\n },\n \"metric\": {\n \"display_name\": \"Metric\",\n \"info\": \"Optional distance metric for vector comparisons in the vector store.\",\n \"advanced\": True,\n },\n \"batch_size\": {\n \"display_name\": \"Batch Size\",\n \"info\": \"Optional number of records to process in a single batch.\",\n \"advanced\": True,\n },\n \"bulk_insert_batch_concurrency\": {\n \"display_name\": \"Bulk Insert Batch Concurrency\",\n \"info\": \"Optional concurrency level for bulk insert operations.\",\n \"advanced\": True,\n },\n \"bulk_insert_overwrite_concurrency\": {\n \"display_name\": \"Bulk Insert Overwrite Concurrency\",\n \"info\": \"Optional concurrency level for bulk insert operations that overwrite existing records.\",\n \"advanced\": True,\n },\n \"bulk_delete_concurrency\": {\n \"display_name\": \"Bulk Delete Concurrency\",\n \"info\": \"Optional concurrency level for bulk delete operations.\",\n \"advanced\": True,\n },\n \"setup_mode\": {\n \"display_name\": \"Setup Mode\",\n \"info\": \"Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.\",\n \"options\": [\"Sync\", \"Async\", \"Off\"],\n \"advanced\": True,\n },\n \"pre_delete_collection\": {\n \"display_name\": \"Pre Delete Collection\",\n \"info\": \"Boolean flag to determine whether to delete the collection before creating a new one.\",\n \"advanced\": True,\n },\n \"metadata_indexing_include\": {\n \"display_name\": \"Metadata Indexing Include\",\n \"info\": \"Optional list of metadata fields to include in the indexing.\",\n \"advanced\": True,\n },\n \"metadata_indexing_exclude\": {\n \"display_name\": \"Metadata Indexing Exclude\",\n \"info\": \"Optional list of metadata fields to exclude from the indexing.\",\n \"advanced\": True,\n },\n \"collection_indexing_policy\": {\n \"display_name\": \"Collection Indexing Policy\",\n \"info\": \"Optional dictionary defining the indexing policy for the collection.\",\n \"advanced\": True,\n },\n \"number_of_results\": {\n \"display_name\": \"Number of Results\",\n \"info\": \"Number of results to return.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n embedding: Embeddings,\n collection_name: str,\n input_value: Text,\n token: str,\n api_endpoint: str,\n search_type: str = \"Similarity\",\n number_of_results: int = 4,\n namespace: Optional[str] = None,\n metric: Optional[str] = None,\n batch_size: Optional[int] = None,\n bulk_insert_batch_concurrency: Optional[int] = None,\n bulk_insert_overwrite_concurrency: Optional[int] = None,\n bulk_delete_concurrency: Optional[int] = None,\n setup_mode: str = \"Sync\",\n pre_delete_collection: bool = False,\n metadata_indexing_include: Optional[List[str]] = None,\n metadata_indexing_exclude: Optional[List[str]] = None,\n collection_indexing_policy: Optional[dict] = None,\n ) -> List[Record]:\n vector_store = AstraDBVectorStoreComponent().build(\n embedding=embedding,\n collection_name=collection_name,\n token=token,\n api_endpoint=api_endpoint,\n namespace=namespace,\n metric=metric,\n batch_size=batch_size,\n bulk_insert_batch_concurrency=bulk_insert_batch_concurrency,\n bulk_insert_overwrite_concurrency=bulk_insert_overwrite_concurrency,\n bulk_delete_concurrency=bulk_delete_concurrency,\n setup_mode=setup_mode,\n pre_delete_collection=pre_delete_collection,\n metadata_indexing_include=metadata_indexing_include,\n metadata_indexing_exclude=metadata_indexing_exclude,\n collection_indexing_policy=collection_indexing_policy,\n )\n try:\n return self.search_with_vector_store(input_value, search_type, vector_store, k=number_of_results)\n except KeyError as e:\n if \"content\" in str(e):\n raise ValueError(\n \"You should ingest data through Langflow (or LangChain) to query it in Langflow. Your collection does not contain a field name 'content'.\"\n )\n else:\n raise e\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "collection_indexing_policy": { - "type": "dict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "collection_indexing_policy", - "display_name": "Collection Indexing Policy", - "advanced": true, - "dynamic": false, - "info": "Optional dictionary defining the indexing policy for the collection.", - "load_from_db": false, - "title_case": false - }, - "collection_name": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "collection_name", - "display_name": "Collection Name", - "advanced": false, - "dynamic": false, - "info": "The name of the collection within Astra DB where the vectors will be stored.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"], - "value": "langflow" - }, - "metadata_indexing_exclude": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "metadata_indexing_exclude", - "display_name": "Metadata Indexing Exclude", - "advanced": true, - "dynamic": false, - "info": "Optional list of metadata fields to exclude from the indexing.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "metadata_indexing_include": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "metadata_indexing_include", - "display_name": "Metadata Indexing Include", - "advanced": true, - "dynamic": false, - "info": "Optional list of metadata fields to include in the indexing.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "metric": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "metric", - "display_name": "Metric", - "advanced": true, - "dynamic": false, - "info": "Optional distance metric for vector comparisons in the vector store.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "namespace": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "namespace", - "display_name": "Namespace", - "advanced": true, - "dynamic": false, - "info": "Optional namespace within Astra DB to use for the collection.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "number_of_results": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 4, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "number_of_results", - "display_name": "Number of Results", - "advanced": true, - "dynamic": false, - "info": "Number of results to return.", - "load_from_db": false, - "title_case": false - }, - "pre_delete_collection": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "pre_delete_collection", - "display_name": "Pre Delete Collection", - "advanced": true, - "dynamic": false, - "info": "Boolean flag to determine whether to delete the collection before creating a new one.", - "load_from_db": false, - "title_case": false - }, - "search_type": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Similarity", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Similarity", "MMR"], - "name": "search_type", - "display_name": "Search Type", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "setup_mode": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Sync", - "fileTypes": [], - "file_path": "", - "password": false, - "options": ["Sync", "Async", "Off"], - "name": "setup_mode", - "display_name": "Setup Mode", - "advanced": true, - "dynamic": false, - "info": "Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "token": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "token", - "display_name": "Token", - "advanced": false, - "dynamic": false, - "info": "Authentication token for accessing Astra DB.", - "load_from_db": true, - "title_case": false, - "input_types": ["Text"], - "value": "ASTRA_DB_APPLICATION_TOKEN" - }, - "_type": "CustomComponent" - }, - "description": "Searches an existing Astra DB Vector Store.", - "icon": "AstraDB", - "base_classes": ["Record"], - "display_name": "Astra DB Search", - "documentation": "", + "base_classes": [ + "Record" + ], + "beta": false, "custom_fields": { - "embedding": null, - "collection_name": null, - "input_value": null, - "token": null, "api_endpoint": null, - "search_type": null, - "number_of_results": null, - "namespace": null, - "metric": null, "batch_size": null, + "bulk_delete_concurrency": null, "bulk_insert_batch_concurrency": null, "bulk_insert_overwrite_concurrency": null, - "bulk_delete_concurrency": null, - "setup_mode": null, - "pre_delete_collection": null, - "metadata_indexing_include": null, + "collection_indexing_policy": null, + "collection_name": null, + "embedding": null, + "input_value": null, "metadata_indexing_exclude": null, - "collection_indexing_policy": null + "metadata_indexing_include": null, + "metric": null, + "namespace": null, + "number_of_results": null, + "pre_delete_collection": null, + "search_type": null, + "setup_mode": null, + "token": null }, - "output_types": ["Record"], + "description": "Searches an existing Astra DB Vector Store.", + "display_name": "Astra DB Search", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [ "token", "api_endpoint", @@ -1989,378 +1964,450 @@ "input_value", "embedding" ], - "beta": false - }, - "id": "AstraDBSearch-41nRz" - }, - "selected": false, - "width": 384, - "height": 713, - "dragging": false, - "positionAbsolute": { - "x": 1723.976434815103, - "y": 277.03317407245913 - } - }, - { - "id": "AstraDB-eUCSS", - "type": "genericNode", - "position": { - "x": 3372.04958055989, - "y": 1611.0742035495277 - }, - "data": { - "type": "AstraDB", - "node": { + "frozen": false, + "icon": "AstraDB", + "output_types": [ + "Record" + ], "template": { - "embedding": { - "type": "Embeddings", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "embedding", - "display_name": "Embedding", - "advanced": false, - "dynamic": false, - "info": "Embedding to use", - "load_from_db": false, - "title_case": false - }, - "inputs": { - "type": "Record", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "inputs", - "display_name": "Inputs", - "advanced": false, - "dynamic": false, - "info": "Optional list of records to be processed and stored in the vector store.", - "load_from_db": false, - "title_case": false - }, + "_type": "CustomComponent", "api_endpoint": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": false, + "display_name": "API Endpoint", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "api_endpoint", - "display_name": "API Endpoint", - "advanced": false, - "dynamic": false, "info": "API endpoint URL for the Astra DB service.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": true, + "multiline": false, + "name": "api_endpoint", + "password": false, + "placeholder": "", + "required": true, + "show": true, "title_case": false, - "input_types": ["Text"], + "type": "str", "value": "ASTRA_DB_API_ENDPOINT" }, "batch_size": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "Batch Size", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "batch_size", - "display_name": "Batch Size", - "advanced": true, - "dynamic": false, "info": "Optional number of records to process in a single batch.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "batch_size", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int" }, "bulk_delete_concurrency": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "Bulk Delete Concurrency", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "bulk_delete_concurrency", - "display_name": "Bulk Delete Concurrency", - "advanced": true, - "dynamic": false, "info": "Optional concurrency level for bulk delete operations.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "bulk_delete_concurrency", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int" }, "bulk_insert_batch_concurrency": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "Bulk Insert Batch Concurrency", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "bulk_insert_batch_concurrency", - "display_name": "Bulk Insert Batch Concurrency", - "advanced": true, - "dynamic": false, "info": "Optional concurrency level for bulk insert operations.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "bulk_insert_batch_concurrency", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int" }, "bulk_insert_overwrite_concurrency": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "Bulk Insert Overwrite Concurrency", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "bulk_insert_overwrite_concurrency", - "display_name": "Bulk Insert Overwrite Concurrency", - "advanced": true, - "dynamic": false, "info": "Optional concurrency level for bulk insert operations that overwrite existing records.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "bulk_insert_overwrite_concurrency", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int" }, "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import List, Optional, Union\nfrom langchain_astradb import AstraDBVectorStore\nfrom langchain_astradb.utils.astradb import SetupMode\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Embeddings, VectorStore\nfrom langflow.schema import Record\nfrom langchain_core.retrievers import BaseRetriever\n\n\nclass AstraDBVectorStoreComponent(CustomComponent):\n display_name = \"Astra DB\"\n description = \"Builds or loads an Astra DB Vector Store.\"\n icon = \"AstraDB\"\n field_order = [\"token\", \"api_endpoint\", \"collection_name\", \"inputs\", \"embedding\"]\n\n def build_config(self):\n return {\n \"inputs\": {\n \"display_name\": \"Inputs\",\n \"info\": \"Optional list of records to be processed and stored in the vector store.\",\n },\n \"embedding\": {\"display_name\": \"Embedding\", \"info\": \"Embedding to use\"},\n \"collection_name\": {\n \"display_name\": \"Collection Name\",\n \"info\": \"The name of the collection within Astra DB where the vectors will be stored.\",\n },\n \"token\": {\n \"display_name\": \"Token\",\n \"info\": \"Authentication token for accessing Astra DB.\",\n \"password\": True,\n },\n \"api_endpoint\": {\n \"display_name\": \"API Endpoint\",\n \"info\": \"API endpoint URL for the Astra DB service.\",\n },\n \"namespace\": {\n \"display_name\": \"Namespace\",\n \"info\": \"Optional namespace within Astra DB to use for the collection.\",\n \"advanced\": True,\n },\n \"metric\": {\n \"display_name\": \"Metric\",\n \"info\": \"Optional distance metric for vector comparisons in the vector store.\",\n \"advanced\": True,\n },\n \"batch_size\": {\n \"display_name\": \"Batch Size\",\n \"info\": \"Optional number of records to process in a single batch.\",\n \"advanced\": True,\n },\n \"bulk_insert_batch_concurrency\": {\n \"display_name\": \"Bulk Insert Batch Concurrency\",\n \"info\": \"Optional concurrency level for bulk insert operations.\",\n \"advanced\": True,\n },\n \"bulk_insert_overwrite_concurrency\": {\n \"display_name\": \"Bulk Insert Overwrite Concurrency\",\n \"info\": \"Optional concurrency level for bulk insert operations that overwrite existing records.\",\n \"advanced\": True,\n },\n \"bulk_delete_concurrency\": {\n \"display_name\": \"Bulk Delete Concurrency\",\n \"info\": \"Optional concurrency level for bulk delete operations.\",\n \"advanced\": True,\n },\n \"setup_mode\": {\n \"display_name\": \"Setup Mode\",\n \"info\": \"Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.\",\n \"options\": [\"Sync\", \"Async\", \"Off\"],\n \"advanced\": True,\n },\n \"pre_delete_collection\": {\n \"display_name\": \"Pre Delete Collection\",\n \"info\": \"Boolean flag to determine whether to delete the collection before creating a new one.\",\n \"advanced\": True,\n },\n \"metadata_indexing_include\": {\n \"display_name\": \"Metadata Indexing Include\",\n \"info\": \"Optional list of metadata fields to include in the indexing.\",\n \"advanced\": True,\n },\n \"metadata_indexing_exclude\": {\n \"display_name\": \"Metadata Indexing Exclude\",\n \"info\": \"Optional list of metadata fields to exclude from the indexing.\",\n \"advanced\": True,\n },\n \"collection_indexing_policy\": {\n \"display_name\": \"Collection Indexing Policy\",\n \"info\": \"Optional dictionary defining the indexing policy for the collection.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n embedding: Embeddings,\n token: str,\n api_endpoint: str,\n collection_name: str,\n inputs: Optional[List[Record]] = None,\n namespace: Optional[str] = None,\n metric: Optional[str] = None,\n batch_size: Optional[int] = None,\n bulk_insert_batch_concurrency: Optional[int] = None,\n bulk_insert_overwrite_concurrency: Optional[int] = None,\n bulk_delete_concurrency: Optional[int] = None,\n setup_mode: str = \"Sync\",\n pre_delete_collection: bool = False,\n metadata_indexing_include: Optional[List[str]] = None,\n metadata_indexing_exclude: Optional[List[str]] = None,\n collection_indexing_policy: Optional[dict] = None,\n ) -> Union[VectorStore, BaseRetriever]:\n try:\n setup_mode_value = SetupMode[setup_mode.upper()]\n except KeyError:\n raise ValueError(f\"Invalid setup mode: {setup_mode}\")\n if inputs:\n documents = [_input.to_lc_document() for _input in inputs]\n\n vector_store = AstraDBVectorStore.from_documents(\n documents=documents,\n embedding=embedding,\n collection_name=collection_name,\n token=token,\n api_endpoint=api_endpoint,\n namespace=namespace,\n metric=metric,\n batch_size=batch_size,\n bulk_insert_batch_concurrency=bulk_insert_batch_concurrency,\n bulk_insert_overwrite_concurrency=bulk_insert_overwrite_concurrency,\n bulk_delete_concurrency=bulk_delete_concurrency,\n setup_mode=setup_mode_value,\n pre_delete_collection=pre_delete_collection,\n metadata_indexing_include=metadata_indexing_include,\n metadata_indexing_exclude=metadata_indexing_exclude,\n collection_indexing_policy=collection_indexing_policy,\n )\n else:\n vector_store = AstraDBVectorStore(\n embedding=embedding,\n collection_name=collection_name,\n token=token,\n api_endpoint=api_endpoint,\n namespace=namespace,\n metric=metric,\n batch_size=batch_size,\n bulk_insert_batch_concurrency=bulk_insert_batch_concurrency,\n bulk_insert_overwrite_concurrency=bulk_insert_overwrite_concurrency,\n bulk_delete_concurrency=bulk_delete_concurrency,\n setup_mode=setup_mode_value,\n pre_delete_collection=pre_delete_collection,\n metadata_indexing_include=metadata_indexing_include,\n metadata_indexing_exclude=metadata_indexing_exclude,\n collection_indexing_policy=collection_indexing_policy,\n )\n\n return vector_store\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", "advanced": true, "dynamic": true, + "fileTypes": [], + "file_path": "", "info": "", + "list": false, "load_from_db": false, - "title_case": false + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import List, Optional\n\nfrom langflow.components.vectorstores.AstraDB import AstraDBVectorStoreComponent\nfrom langflow.components.vectorstores.base.model import LCVectorStoreComponent\nfrom langflow.field_typing import Embeddings, Text\nfrom langflow.schema import Record\n\n\nclass AstraDBSearchComponent(LCVectorStoreComponent):\n display_name = \"Astra DB Search\"\n description = \"Searches an existing Astra DB Vector Store.\"\n icon = \"AstraDB\"\n field_order = [\"token\", \"api_endpoint\", \"collection_name\", \"input_value\", \"embedding\"]\n\n def build_config(self):\n return {\n \"search_type\": {\n \"display_name\": \"Search Type\",\n \"options\": [\"Similarity\", \"MMR\"],\n },\n \"input_value\": {\n \"display_name\": \"Input Value\",\n \"info\": \"Input value to search\",\n },\n \"embedding\": {\"display_name\": \"Embedding\", \"info\": \"Embedding to use\"},\n \"collection_name\": {\n \"display_name\": \"Collection Name\",\n \"info\": \"The name of the collection within Astra DB where the vectors will be stored.\",\n },\n \"token\": {\n \"display_name\": \"Token\",\n \"info\": \"Authentication token for accessing Astra DB.\",\n \"password\": True,\n },\n \"api_endpoint\": {\n \"display_name\": \"API Endpoint\",\n \"info\": \"API endpoint URL for the Astra DB service.\",\n },\n \"namespace\": {\n \"display_name\": \"Namespace\",\n \"info\": \"Optional namespace within Astra DB to use for the collection.\",\n \"advanced\": True,\n },\n \"metric\": {\n \"display_name\": \"Metric\",\n \"info\": \"Optional distance metric for vector comparisons in the vector store.\",\n \"advanced\": True,\n },\n \"batch_size\": {\n \"display_name\": \"Batch Size\",\n \"info\": \"Optional number of records to process in a single batch.\",\n \"advanced\": True,\n },\n \"bulk_insert_batch_concurrency\": {\n \"display_name\": \"Bulk Insert Batch Concurrency\",\n \"info\": \"Optional concurrency level for bulk insert operations.\",\n \"advanced\": True,\n },\n \"bulk_insert_overwrite_concurrency\": {\n \"display_name\": \"Bulk Insert Overwrite Concurrency\",\n \"info\": \"Optional concurrency level for bulk insert operations that overwrite existing records.\",\n \"advanced\": True,\n },\n \"bulk_delete_concurrency\": {\n \"display_name\": \"Bulk Delete Concurrency\",\n \"info\": \"Optional concurrency level for bulk delete operations.\",\n \"advanced\": True,\n },\n \"setup_mode\": {\n \"display_name\": \"Setup Mode\",\n \"info\": \"Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.\",\n \"options\": [\"Sync\", \"Async\", \"Off\"],\n \"advanced\": True,\n },\n \"pre_delete_collection\": {\n \"display_name\": \"Pre Delete Collection\",\n \"info\": \"Boolean flag to determine whether to delete the collection before creating a new one.\",\n \"advanced\": True,\n },\n \"metadata_indexing_include\": {\n \"display_name\": \"Metadata Indexing Include\",\n \"info\": \"Optional list of metadata fields to include in the indexing.\",\n \"advanced\": True,\n },\n \"metadata_indexing_exclude\": {\n \"display_name\": \"Metadata Indexing Exclude\",\n \"info\": \"Optional list of metadata fields to exclude from the indexing.\",\n \"advanced\": True,\n },\n \"collection_indexing_policy\": {\n \"display_name\": \"Collection Indexing Policy\",\n \"info\": \"Optional dictionary defining the indexing policy for the collection.\",\n \"advanced\": True,\n },\n \"number_of_results\": {\n \"display_name\": \"Number of Results\",\n \"info\": \"Number of results to return.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n embedding: Embeddings,\n collection_name: str,\n input_value: Text,\n token: str,\n api_endpoint: str,\n search_type: str = \"Similarity\",\n number_of_results: int = 4,\n namespace: Optional[str] = None,\n metric: Optional[str] = None,\n batch_size: Optional[int] = None,\n bulk_insert_batch_concurrency: Optional[int] = None,\n bulk_insert_overwrite_concurrency: Optional[int] = None,\n bulk_delete_concurrency: Optional[int] = None,\n setup_mode: str = \"Sync\",\n pre_delete_collection: bool = False,\n metadata_indexing_include: Optional[List[str]] = None,\n metadata_indexing_exclude: Optional[List[str]] = None,\n collection_indexing_policy: Optional[dict] = None,\n ) -> List[Record]:\n vector_store = AstraDBVectorStoreComponent().build(\n embedding=embedding,\n collection_name=collection_name,\n token=token,\n api_endpoint=api_endpoint,\n namespace=namespace,\n metric=metric,\n batch_size=batch_size,\n bulk_insert_batch_concurrency=bulk_insert_batch_concurrency,\n bulk_insert_overwrite_concurrency=bulk_insert_overwrite_concurrency,\n bulk_delete_concurrency=bulk_delete_concurrency,\n setup_mode=setup_mode,\n pre_delete_collection=pre_delete_collection,\n metadata_indexing_include=metadata_indexing_include,\n metadata_indexing_exclude=metadata_indexing_exclude,\n collection_indexing_policy=collection_indexing_policy,\n )\n try:\n return self.search_with_vector_store(input_value, search_type, vector_store, k=number_of_results)\n except KeyError as e:\n if \"content\" in str(e):\n raise ValueError(\n \"You should ingest data through Langflow (or LangChain) to query it in Langflow. Your collection does not contain a field name 'content'.\"\n )\n else:\n raise e\n" }, "collection_indexing_policy": { - "type": "dict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "Collection Indexing Policy", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "collection_indexing_policy", - "display_name": "Collection Indexing Policy", - "advanced": true, - "dynamic": false, "info": "Optional dictionary defining the indexing policy for the collection.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "collection_indexing_policy", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "dict" }, "collection_name": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": false, + "display_name": "Collection Name", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "collection_name", - "display_name": "Collection Name", - "advanced": false, - "dynamic": false, "info": "The name of the collection within Astra DB where the vectors will be stored.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "collection_name", + "password": false, + "placeholder": "", + "required": true, + "show": true, "title_case": false, - "input_types": ["Text"], + "type": "str", "value": "langflow" }, - "metadata_indexing_exclude": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, + "embedding": { + "advanced": false, + "display_name": "Embedding", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "metadata_indexing_exclude", - "display_name": "Metadata Indexing Exclude", - "advanced": true, - "dynamic": false, - "info": "Optional list of metadata fields to exclude from the indexing.", + "info": "Embedding to use", + "list": false, "load_from_db": false, + "multiline": false, + "name": "embedding", + "password": false, + "placeholder": "", + "required": true, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "Embeddings" + }, + "input_value": { + "advanced": false, + "display_name": "Input Value", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Input value to search", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "input_value", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str" + }, + "metadata_indexing_exclude": { + "advanced": true, + "display_name": "Metadata Indexing Exclude", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional list of metadata fields to exclude from the indexing.", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "metadata_indexing_exclude", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" }, "metadata_indexing_include": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "Metadata Indexing Include", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "metadata_indexing_include", - "display_name": "Metadata Indexing Include", - "advanced": true, - "dynamic": false, "info": "Optional list of metadata fields to include in the indexing.", + "input_types": [ + "Text" + ], + "list": true, "load_from_db": false, + "multiline": false, + "name": "metadata_indexing_include", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str" }, "metric": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "Metric", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "metric", - "display_name": "Metric", - "advanced": true, - "dynamic": false, "info": "Optional distance metric for vector comparisons in the vector store.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "metric", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str" }, "namespace": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": true, + "display_name": "Namespace", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "namespace", - "display_name": "Namespace", - "advanced": true, - "dynamic": false, "info": "Optional namespace within Astra DB to use for the collection.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": false, + "multiline": false, + "name": "namespace", + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str" + }, + "number_of_results": { + "advanced": true, + "display_name": "Number of Results", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Number of results to return.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "number_of_results", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 4 }, "pre_delete_collection": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, + "advanced": true, + "display_name": "Pre Delete Collection", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "name": "pre_delete_collection", - "display_name": "Pre Delete Collection", - "advanced": true, - "dynamic": false, "info": "Boolean flag to determine whether to delete the collection before creating a new one.", + "list": false, "load_from_db": false, - "title_case": false + "multiline": false, + "name": "pre_delete_collection", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + }, + "search_type": { + "advanced": false, + "display_name": "Search Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "search_type", + "options": [ + "Similarity", + "MMR" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Similarity" }, "setup_mode": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "Sync", + "advanced": true, + "display_name": "Setup Mode", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": false, - "options": ["Sync", "Async", "Off"], - "name": "setup_mode", - "display_name": "Setup Mode", - "advanced": true, - "dynamic": false, "info": "Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.", + "input_types": [ + "Text" + ], + "list": true, "load_from_db": false, + "multiline": false, + "name": "setup_mode", + "options": [ + "Sync", + "Async", + "Off" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, "title_case": false, - "input_types": ["Text"] + "type": "str", + "value": "Sync" }, "token": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, + "advanced": false, + "display_name": "Token", + "dynamic": false, "fileTypes": [], "file_path": "", - "password": true, - "name": "token", - "display_name": "Token", - "advanced": false, - "dynamic": false, "info": "Authentication token for accessing Astra DB.", + "input_types": [ + "Text" + ], + "list": false, "load_from_db": true, + "multiline": false, + "name": "token", + "password": true, + "placeholder": "", + "required": true, + "show": true, "title_case": false, - "input_types": ["Text"], + "type": "str", "value": "ASTRA_DB_APPLICATION_TOKEN" - }, - "_type": "CustomComponent" - }, - "description": "Builds or loads an Astra DB Vector Store.", - "icon": "AstraDB", - "base_classes": ["VectorStore"], - "display_name": "Astra DB", - "documentation": "", + } + } + }, + "type": "AstraDBSearch" + }, + "dragging": false, + "height": 713, + "id": "AstraDBSearch-41nRz", + "position": { + "x": 1723.976434815103, + "y": 277.03317407245913 + }, + "positionAbsolute": { + "x": 1723.976434815103, + "y": 277.03317407245913 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "AstraDB-eUCSS", + "node": { + "base_classes": [ + "VectorStore" + ], + "beta": false, "custom_fields": { - "embedding": null, - "token": null, "api_endpoint": null, - "collection_name": null, - "inputs": null, - "namespace": null, - "metric": null, "batch_size": null, + "bulk_delete_concurrency": null, "bulk_insert_batch_concurrency": null, "bulk_insert_overwrite_concurrency": null, - "bulk_delete_concurrency": null, - "setup_mode": null, - "pre_delete_collection": null, - "metadata_indexing_include": null, + "collection_indexing_policy": null, + "collection_name": null, + "embedding": null, + "inputs": null, "metadata_indexing_exclude": null, - "collection_indexing_policy": null + "metadata_indexing_include": null, + "metric": null, + "namespace": null, + "pre_delete_collection": null, + "setup_mode": null, + "token": null }, - "output_types": ["VectorStore"], + "description": "Builds or loads an Astra DB Vector Store.", + "display_name": "Astra DB", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [ "token", "api_endpoint", @@ -2368,499 +2415,395 @@ "inputs", "embedding" ], - "beta": false + "frozen": false, + "icon": "AstraDB", + "output_types": [ + "VectorStore", + "BaseRetriever" + ], + "template": { + "_type": "CustomComponent", + "api_endpoint": { + "advanced": false, + "display_name": "API Endpoint", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "API endpoint URL for the Astra DB service.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": true, + "multiline": false, + "name": "api_endpoint", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "ASTRA_DB_API_ENDPOINT" + }, + "batch_size": { + "advanced": true, + "display_name": "Batch Size", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional number of records to process in a single batch.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "batch_size", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int" + }, + "bulk_delete_concurrency": { + "advanced": true, + "display_name": "Bulk Delete Concurrency", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional concurrency level for bulk delete operations.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "bulk_delete_concurrency", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int" + }, + "bulk_insert_batch_concurrency": { + "advanced": true, + "display_name": "Bulk Insert Batch Concurrency", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional concurrency level for bulk insert operations.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "bulk_insert_batch_concurrency", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int" + }, + "bulk_insert_overwrite_concurrency": { + "advanced": true, + "display_name": "Bulk Insert Overwrite Concurrency", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional concurrency level for bulk insert operations that overwrite existing records.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "bulk_insert_overwrite_concurrency", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int" + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import List, Optional, Union\n\nfrom langchain_astradb import AstraDBVectorStore\nfrom langchain_astradb.utils.astradb import SetupMode\nfrom langchain_core.retrievers import BaseRetriever\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Embeddings, VectorStore\nfrom langflow.schema import Record\n\n\nclass AstraDBVectorStoreComponent(CustomComponent):\n display_name = \"Astra DB\"\n description = \"Builds or loads an Astra DB Vector Store.\"\n icon = \"AstraDB\"\n field_order = [\"token\", \"api_endpoint\", \"collection_name\", \"inputs\", \"embedding\"]\n\n def build_config(self):\n return {\n \"inputs\": {\n \"display_name\": \"Inputs\",\n \"info\": \"Optional list of records to be processed and stored in the vector store.\",\n },\n \"embedding\": {\"display_name\": \"Embedding\", \"info\": \"Embedding to use\"},\n \"collection_name\": {\n \"display_name\": \"Collection Name\",\n \"info\": \"The name of the collection within Astra DB where the vectors will be stored.\",\n },\n \"token\": {\n \"display_name\": \"Token\",\n \"info\": \"Authentication token for accessing Astra DB.\",\n \"password\": True,\n },\n \"api_endpoint\": {\n \"display_name\": \"API Endpoint\",\n \"info\": \"API endpoint URL for the Astra DB service.\",\n },\n \"namespace\": {\n \"display_name\": \"Namespace\",\n \"info\": \"Optional namespace within Astra DB to use for the collection.\",\n \"advanced\": True,\n },\n \"metric\": {\n \"display_name\": \"Metric\",\n \"info\": \"Optional distance metric for vector comparisons in the vector store.\",\n \"advanced\": True,\n },\n \"batch_size\": {\n \"display_name\": \"Batch Size\",\n \"info\": \"Optional number of records to process in a single batch.\",\n \"advanced\": True,\n },\n \"bulk_insert_batch_concurrency\": {\n \"display_name\": \"Bulk Insert Batch Concurrency\",\n \"info\": \"Optional concurrency level for bulk insert operations.\",\n \"advanced\": True,\n },\n \"bulk_insert_overwrite_concurrency\": {\n \"display_name\": \"Bulk Insert Overwrite Concurrency\",\n \"info\": \"Optional concurrency level for bulk insert operations that overwrite existing records.\",\n \"advanced\": True,\n },\n \"bulk_delete_concurrency\": {\n \"display_name\": \"Bulk Delete Concurrency\",\n \"info\": \"Optional concurrency level for bulk delete operations.\",\n \"advanced\": True,\n },\n \"setup_mode\": {\n \"display_name\": \"Setup Mode\",\n \"info\": \"Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.\",\n \"options\": [\"Sync\", \"Async\", \"Off\"],\n \"advanced\": True,\n },\n \"pre_delete_collection\": {\n \"display_name\": \"Pre Delete Collection\",\n \"info\": \"Boolean flag to determine whether to delete the collection before creating a new one.\",\n \"advanced\": True,\n },\n \"metadata_indexing_include\": {\n \"display_name\": \"Metadata Indexing Include\",\n \"info\": \"Optional list of metadata fields to include in the indexing.\",\n \"advanced\": True,\n },\n \"metadata_indexing_exclude\": {\n \"display_name\": \"Metadata Indexing Exclude\",\n \"info\": \"Optional list of metadata fields to exclude from the indexing.\",\n \"advanced\": True,\n },\n \"collection_indexing_policy\": {\n \"display_name\": \"Collection Indexing Policy\",\n \"info\": \"Optional dictionary defining the indexing policy for the collection.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n embedding: Embeddings,\n token: str,\n api_endpoint: str,\n collection_name: str,\n inputs: Optional[List[Record]] = None,\n namespace: Optional[str] = None,\n metric: Optional[str] = None,\n batch_size: Optional[int] = None,\n bulk_insert_batch_concurrency: Optional[int] = None,\n bulk_insert_overwrite_concurrency: Optional[int] = None,\n bulk_delete_concurrency: Optional[int] = None,\n setup_mode: str = \"Sync\",\n pre_delete_collection: bool = False,\n metadata_indexing_include: Optional[List[str]] = None,\n metadata_indexing_exclude: Optional[List[str]] = None,\n collection_indexing_policy: Optional[dict] = None,\n ) -> Union[VectorStore, BaseRetriever]:\n try:\n setup_mode_value = SetupMode[setup_mode.upper()]\n except KeyError:\n raise ValueError(f\"Invalid setup mode: {setup_mode}\")\n if inputs:\n documents = [_input.to_lc_document() for _input in inputs]\n\n vector_store = AstraDBVectorStore.from_documents(\n documents=documents,\n embedding=embedding,\n collection_name=collection_name,\n token=token,\n api_endpoint=api_endpoint,\n namespace=namespace,\n metric=metric,\n batch_size=batch_size,\n bulk_insert_batch_concurrency=bulk_insert_batch_concurrency,\n bulk_insert_overwrite_concurrency=bulk_insert_overwrite_concurrency,\n bulk_delete_concurrency=bulk_delete_concurrency,\n setup_mode=setup_mode_value,\n pre_delete_collection=pre_delete_collection,\n metadata_indexing_include=metadata_indexing_include,\n metadata_indexing_exclude=metadata_indexing_exclude,\n collection_indexing_policy=collection_indexing_policy,\n )\n else:\n vector_store = AstraDBVectorStore(\n embedding=embedding,\n collection_name=collection_name,\n token=token,\n api_endpoint=api_endpoint,\n namespace=namespace,\n metric=metric,\n batch_size=batch_size,\n bulk_insert_batch_concurrency=bulk_insert_batch_concurrency,\n bulk_insert_overwrite_concurrency=bulk_insert_overwrite_concurrency,\n bulk_delete_concurrency=bulk_delete_concurrency,\n setup_mode=setup_mode_value,\n pre_delete_collection=pre_delete_collection,\n metadata_indexing_include=metadata_indexing_include,\n metadata_indexing_exclude=metadata_indexing_exclude,\n collection_indexing_policy=collection_indexing_policy,\n )\n\n return vector_store\n return vector_store\n" + }, + "collection_indexing_policy": { + "advanced": true, + "display_name": "Collection Indexing Policy", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional dictionary defining the indexing policy for the collection.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "collection_indexing_policy", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "dict" + }, + "collection_name": { + "advanced": false, + "display_name": "Collection Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "The name of the collection within Astra DB where the vectors will be stored.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "collection_name", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "langflow" + }, + "embedding": { + "advanced": false, + "display_name": "Embedding", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Embedding to use", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "embedding", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "Embeddings" + }, + "inputs": { + "advanced": false, + "display_name": "Inputs", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional list of records to be processed and stored in the vector store.", + "list": true, + "load_from_db": false, + "multiline": false, + "name": "inputs", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "Record" + }, + "metadata_indexing_exclude": { + "advanced": true, + "display_name": "Metadata Indexing Exclude", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional list of metadata fields to exclude from the indexing.", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "metadata_indexing_exclude", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "metadata_indexing_include": { + "advanced": true, + "display_name": "Metadata Indexing Include", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional list of metadata fields to include in the indexing.", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "metadata_indexing_include", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "metric": { + "advanced": true, + "display_name": "Metric", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional distance metric for vector comparisons in the vector store.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "metric", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "namespace": { + "advanced": true, + "display_name": "Namespace", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Optional namespace within Astra DB to use for the collection.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "namespace", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "pre_delete_collection": { + "advanced": true, + "display_name": "Pre Delete Collection", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Boolean flag to determine whether to delete the collection before creating a new one.", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "pre_delete_collection", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + }, + "setup_mode": { + "advanced": true, + "display_name": "Setup Mode", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "setup_mode", + "options": [ + "Sync", + "Async", + "Off" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Sync" + }, + "token": { + "advanced": false, + "display_name": "Token", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "Authentication token for accessing Astra DB.", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": true, + "multiline": false, + "name": "token", + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "ASTRA_DB_APPLICATION_TOKEN" + } + } }, - "id": "AstraDB-eUCSS" + "type": "AstraDB" }, - "selected": false, - "width": 384, + "dragging": false, "height": 573, + "id": "AstraDB-eUCSS", + "position": { + "x": 3372.04958055989, + "y": 1611.0742035495277 + }, "positionAbsolute": { "x": 3372.04958055989, "y": 1611.0742035495277 }, - "dragging": false + "selected": false, + "type": "genericNode", + "width": 384 }, { - "id": "OpenAIEmbeddings-9TPjc", - "type": "genericNode", - "position": { - "x": 2814.0402191223047, - "y": 1955.9268168273086 - }, "data": { - "type": "OpenAIEmbeddings", + "id": "OpenAIEmbeddings-9TPjc", "node": { - "template": { - "allowed_special": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": [], - "fileTypes": [], - "file_path": "", - "password": false, - "name": "allowed_special", - "display_name": "Allowed Special", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "chunk_size": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 1000, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "chunk_size", - "display_name": "Chunk Size", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "client": { - "type": "Any", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "client", - "display_name": "Client", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Dict, List, Optional\n\nfrom langchain_openai.embeddings.base import OpenAIEmbeddings\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Embeddings, NestedDict\n\n\nclass OpenAIEmbeddingsComponent(CustomComponent):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n\n def build_config(self):\n return {\n \"allowed_special\": {\n \"display_name\": \"Allowed Special\",\n \"advanced\": True,\n \"field_type\": \"str\",\n \"is_list\": True,\n },\n \"default_headers\": {\n \"display_name\": \"Default Headers\",\n \"advanced\": True,\n \"field_type\": \"dict\",\n },\n \"default_query\": {\n \"display_name\": \"Default Query\",\n \"advanced\": True,\n \"field_type\": \"NestedDict\",\n },\n \"disallowed_special\": {\n \"display_name\": \"Disallowed Special\",\n \"advanced\": True,\n \"field_type\": \"str\",\n \"is_list\": True,\n },\n \"chunk_size\": {\"display_name\": \"Chunk Size\", \"advanced\": True},\n \"client\": {\"display_name\": \"Client\", \"advanced\": True},\n \"deployment\": {\"display_name\": \"Deployment\", \"advanced\": True},\n \"embedding_ctx_length\": {\n \"display_name\": \"Embedding Context Length\",\n \"advanced\": True,\n },\n \"max_retries\": {\"display_name\": \"Max Retries\", \"advanced\": True},\n \"model\": {\n \"display_name\": \"Model\",\n \"advanced\": False,\n \"options\": [\n \"text-embedding-3-small\",\n \"text-embedding-3-large\",\n \"text-embedding-ada-002\",\n ],\n },\n \"model_kwargs\": {\"display_name\": \"Model Kwargs\", \"advanced\": True},\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"password\": True,\n \"advanced\": True,\n },\n \"openai_api_key\": {\"display_name\": \"OpenAI API Key\", \"password\": True},\n \"openai_api_type\": {\n \"display_name\": \"OpenAI API Type\",\n \"advanced\": True,\n \"password\": True,\n },\n \"openai_api_version\": {\n \"display_name\": \"OpenAI API Version\",\n \"advanced\": True,\n },\n \"openai_organization\": {\n \"display_name\": \"OpenAI Organization\",\n \"advanced\": True,\n },\n \"openai_proxy\": {\"display_name\": \"OpenAI Proxy\", \"advanced\": True},\n \"request_timeout\": {\"display_name\": \"Request Timeout\", \"advanced\": True},\n \"show_progress_bar\": {\n \"display_name\": \"Show Progress Bar\",\n \"advanced\": True,\n },\n \"skip_empty\": {\"display_name\": \"Skip Empty\", \"advanced\": True},\n \"tiktoken_model_name\": {\n \"display_name\": \"TikToken Model Name\",\n \"advanced\": True,\n },\n \"tiktoken_enable\": {\"display_name\": \"TikToken Enable\", \"advanced\": True},\n }\n\n def build(\n self,\n openai_api_key: str,\n default_headers: Optional[Dict[str, str]] = None,\n default_query: Optional[NestedDict] = {},\n allowed_special: List[str] = [],\n disallowed_special: List[str] = [\"all\"],\n chunk_size: int = 1000,\n deployment: str = \"text-embedding-ada-002\",\n embedding_ctx_length: int = 8191,\n max_retries: int = 6,\n model: str = \"text-embedding-ada-002\",\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n openai_api_type: Optional[str] = None,\n openai_api_version: Optional[str] = None,\n openai_organization: Optional[str] = None,\n openai_proxy: Optional[str] = None,\n request_timeout: Optional[float] = None,\n show_progress_bar: bool = False,\n skip_empty: bool = False,\n tiktoken_enable: bool = True,\n tiktoken_model_name: Optional[str] = None,\n ) -> Embeddings:\n # This is to avoid errors with Vector Stores (e.g Chroma)\n if disallowed_special == [\"all\"]:\n disallowed_special = \"all\" # type: ignore\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n return OpenAIEmbeddings(\n tiktoken_enabled=tiktoken_enable,\n default_headers=default_headers,\n default_query=default_query,\n allowed_special=set(allowed_special),\n disallowed_special=\"all\",\n chunk_size=chunk_size,\n deployment=deployment,\n embedding_ctx_length=embedding_ctx_length,\n max_retries=max_retries,\n model=model,\n model_kwargs=model_kwargs,\n base_url=openai_api_base,\n api_key=api_key,\n openai_api_type=openai_api_type,\n api_version=openai_api_version,\n organization=openai_organization,\n openai_proxy=openai_proxy,\n timeout=request_timeout,\n show_progress_bar=show_progress_bar,\n skip_empty=skip_empty,\n tiktoken_model_name=tiktoken_model_name,\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "default_headers": { - "type": "dict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "default_headers", - "display_name": "Default Headers", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "default_query": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "default_query", - "display_name": "Default Query", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "deployment": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": "text-embedding-ada-002", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "deployment", - "display_name": "Deployment", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "disallowed_special": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": ["all"], - "fileTypes": [], - "file_path": "", - "password": false, - "name": "disallowed_special", - "display_name": "Disallowed Special", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "embedding_ctx_length": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 8191, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "embedding_ctx_length", - "display_name": "Embedding Context Length", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "max_retries": { - "type": "int", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": 6, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "max_retries", - "display_name": "Max Retries", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "model": { - "type": "str", - "required": false, - "placeholder": "", - "list": true, - "show": true, - "multiline": false, - "value": "text-embedding-ada-002", - "fileTypes": [], - "file_path": "", - "password": false, - "options": [ - "text-embedding-3-small", - "text-embedding-3-large", - "text-embedding-ada-002" - ], - "name": "model", - "display_name": "Model", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "model_kwargs": { - "type": "NestedDict", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": {}, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "model_kwargs", - "display_name": "Model Kwargs", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "openai_api_base": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_base", - "display_name": "OpenAI API Base", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_key": { - "type": "str", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_key", - "display_name": "OpenAI API Key", - "advanced": false, - "dynamic": false, - "info": "", - "load_from_db": true, - "title_case": false, - "input_types": ["Text"], - "value": "OPENAI_API_KEY" - }, - "openai_api_type": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": true, - "name": "openai_api_type", - "display_name": "OpenAI API Type", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_api_version": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_api_version", - "display_name": "OpenAI API Version", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_organization": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_organization", - "display_name": "OpenAI Organization", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "openai_proxy": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "openai_proxy", - "display_name": "OpenAI Proxy", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "request_timeout": { - "type": "float", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "request_timeout", - "display_name": "Request Timeout", - "advanced": true, - "dynamic": false, - "info": "", - "rangeSpec": { - "step_type": "float", - "min": -1, - "max": 1, - "step": 0.1 - }, - "load_from_db": false, - "title_case": false - }, - "show_progress_bar": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "show_progress_bar", - "display_name": "Show Progress Bar", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "skip_empty": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "skip_empty", - "display_name": "Skip Empty", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "tiktoken_enable": { - "type": "bool", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "value": true, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "tiktoken_enable", - "display_name": "TikToken Enable", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false - }, - "tiktoken_model_name": { - "type": "str", - "required": false, - "placeholder": "", - "list": false, - "show": true, - "multiline": false, - "fileTypes": [], - "file_path": "", - "password": false, - "name": "tiktoken_model_name", - "display_name": "TikToken Model Name", - "advanced": true, - "dynamic": false, - "info": "", - "load_from_db": false, - "title_case": false, - "input_types": ["Text"] - }, - "_type": "CustomComponent" - }, - "description": "Generate embeddings using OpenAI models.", - "base_classes": ["Embeddings"], - "display_name": "OpenAI Embeddings", - "documentation": "", + "base_classes": [ + "Embeddings" + ], + "beta": false, "custom_fields": { - "openai_api_key": null, - "default_headers": null, - "default_query": null, "allowed_special": null, - "disallowed_special": null, "chunk_size": null, "client": null, + "default_headers": null, + "default_query": null, "deployment": null, + "disallowed_special": null, "embedding_ctx_length": null, "max_retries": null, "model": null, "model_kwargs": null, "openai_api_base": null, + "openai_api_key": null, "openai_api_type": null, "openai_api_version": null, "openai_organization": null, @@ -2871,271 +2814,490 @@ "tiktoken_enable": null, "tiktoken_model_name": null }, - "output_types": ["Embeddings"], + "description": "Generate embeddings using OpenAI models.", + "display_name": "OpenAI Embeddings", + "documentation": "", "field_formatters": {}, - "frozen": false, "field_order": [], - "beta": false + "frozen": false, + "output_types": [ + "Embeddings" + ], + "template": { + "_type": "CustomComponent", + "allowed_special": { + "advanced": true, + "display_name": "Allowed Special", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "allowed_special", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": [] + }, + "chunk_size": { + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "chunk_size", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Dict, List, Optional\n\nfrom langchain_openai.embeddings.base import OpenAIEmbeddings\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.custom import CustomComponent\nfrom langflow.field_typing import Embeddings, NestedDict\n\n\nclass OpenAIEmbeddingsComponent(CustomComponent):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n\n def build_config(self):\n return {\n \"allowed_special\": {\n \"display_name\": \"Allowed Special\",\n \"advanced\": True,\n \"field_type\": \"str\",\n \"is_list\": True,\n },\n \"default_headers\": {\n \"display_name\": \"Default Headers\",\n \"advanced\": True,\n \"field_type\": \"dict\",\n },\n \"default_query\": {\n \"display_name\": \"Default Query\",\n \"advanced\": True,\n \"field_type\": \"NestedDict\",\n },\n \"disallowed_special\": {\n \"display_name\": \"Disallowed Special\",\n \"advanced\": True,\n \"field_type\": \"str\",\n \"is_list\": True,\n },\n \"chunk_size\": {\"display_name\": \"Chunk Size\", \"advanced\": True},\n \"client\": {\"display_name\": \"Client\", \"advanced\": True},\n \"deployment\": {\"display_name\": \"Deployment\", \"advanced\": True},\n \"embedding_ctx_length\": {\n \"display_name\": \"Embedding Context Length\",\n \"advanced\": True,\n },\n \"max_retries\": {\"display_name\": \"Max Retries\", \"advanced\": True},\n \"model\": {\n \"display_name\": \"Model\",\n \"advanced\": False,\n \"options\": [\n \"text-embedding-3-small\",\n \"text-embedding-3-large\",\n \"text-embedding-ada-002\",\n ],\n },\n \"model_kwargs\": {\"display_name\": \"Model Kwargs\", \"advanced\": True},\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"password\": True,\n \"advanced\": True,\n },\n \"openai_api_key\": {\"display_name\": \"OpenAI API Key\", \"password\": True},\n \"openai_api_type\": {\n \"display_name\": \"OpenAI API Type\",\n \"advanced\": True,\n \"password\": True,\n },\n \"openai_api_version\": {\n \"display_name\": \"OpenAI API Version\",\n \"advanced\": True,\n },\n \"openai_organization\": {\n \"display_name\": \"OpenAI Organization\",\n \"advanced\": True,\n },\n \"openai_proxy\": {\"display_name\": \"OpenAI Proxy\", \"advanced\": True},\n \"request_timeout\": {\"display_name\": \"Request Timeout\", \"advanced\": True},\n \"show_progress_bar\": {\n \"display_name\": \"Show Progress Bar\",\n \"advanced\": True,\n },\n \"skip_empty\": {\"display_name\": \"Skip Empty\", \"advanced\": True},\n \"tiktoken_model_name\": {\n \"display_name\": \"TikToken Model Name\",\n \"advanced\": True,\n },\n \"tiktoken_enable\": {\"display_name\": \"TikToken Enable\", \"advanced\": True},\n }\n\n def build(\n self,\n openai_api_key: str,\n default_headers: Optional[Dict[str, str]] = None,\n default_query: Optional[NestedDict] = {},\n allowed_special: List[str] = [],\n disallowed_special: List[str] = [\"all\"],\n chunk_size: int = 1000,\n deployment: str = \"text-embedding-ada-002\",\n embedding_ctx_length: int = 8191,\n max_retries: int = 6,\n model: str = \"text-embedding-ada-002\",\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n openai_api_type: Optional[str] = None,\n openai_api_version: Optional[str] = None,\n openai_organization: Optional[str] = None,\n openai_proxy: Optional[str] = None,\n request_timeout: Optional[float] = None,\n show_progress_bar: bool = False,\n skip_empty: bool = False,\n tiktoken_enable: bool = True,\n tiktoken_model_name: Optional[str] = None,\n ) -> Embeddings:\n # This is to avoid errors with Vector Stores (e.g Chroma)\n if disallowed_special == [\"all\"]:\n disallowed_special = \"all\" # type: ignore\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n return OpenAIEmbeddings(\n tiktoken_enabled=tiktoken_enable,\n default_headers=default_headers,\n default_query=default_query,\n allowed_special=set(allowed_special),\n disallowed_special=\"all\",\n chunk_size=chunk_size,\n deployment=deployment,\n embedding_ctx_length=embedding_ctx_length,\n max_retries=max_retries,\n model=model,\n model_kwargs=model_kwargs,\n base_url=openai_api_base,\n api_key=api_key,\n openai_api_type=openai_api_type,\n api_version=openai_api_version,\n organization=openai_organization,\n openai_proxy=openai_proxy,\n timeout=request_timeout,\n show_progress_bar=show_progress_bar,\n skip_empty=skip_empty,\n tiktoken_model_name=tiktoken_model_name,\n )\n" + }, + "default_headers": { + "advanced": true, + "display_name": "Default Headers", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "default_headers", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "dict" + }, + "default_query": { + "advanced": true, + "display_name": "Default Query", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "default_query", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} + }, + "deployment": { + "advanced": true, + "display_name": "Deployment", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "deployment", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "text-embedding-ada-002" + }, + "disallowed_special": { + "advanced": true, + "display_name": "Disallowed Special", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "disallowed_special", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": [ + "all" + ] + }, + "embedding_ctx_length": { + "advanced": true, + "display_name": "Embedding Context Length", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "embedding_ctx_length", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 8191 + }, + "max_retries": { + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "max_retries", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 6 + }, + "model": { + "advanced": false, + "display_name": "Model", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": true, + "load_from_db": false, + "multiline": false, + "name": "model", + "options": [ + "text-embedding-3-small", + "text-embedding-3-large", + "text-embedding-ada-002" + ], + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "text-embedding-ada-002" + }, + "model_kwargs": { + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "model_kwargs", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "NestedDict", + "value": {} + }, + "openai_api_base": { + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_api_base", + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_api_key": { + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": true, + "multiline": false, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "openai_api_type": { + "advanced": true, + "display_name": "OpenAI API Type", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_api_type", + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_api_version": { + "advanced": true, + "display_name": "OpenAI API Version", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_api_version", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_organization": { + "advanced": true, + "display_name": "OpenAI Organization", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_organization", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "openai_proxy": { + "advanced": true, + "display_name": "OpenAI Proxy", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "openai_proxy", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + }, + "request_timeout": { + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "request_timeout", + "password": false, + "placeholder": "", + "rangeSpec": { + "max": 1, + "min": -1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "type": "float" + }, + "show_progress_bar": { + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "show_progress_bar", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + }, + "skip_empty": { + "advanced": true, + "display_name": "Skip Empty", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "skip_empty", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + }, + "tiktoken_enable": { + "advanced": true, + "display_name": "TikToken Enable", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": false, + "name": "tiktoken_enable", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": true + }, + "tiktoken_model_name": { + "advanced": true, + "display_name": "TikToken Model Name", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "tiktoken_model_name", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str" + } + } }, - "id": "OpenAIEmbeddings-9TPjc" + "type": "OpenAIEmbeddings" }, - "selected": false, - "width": 384, + "dragging": false, "height": 383, + "id": "OpenAIEmbeddings-9TPjc", + "position": { + "x": 2814.0402191223047, + "y": 1955.9268168273086 + }, "positionAbsolute": { "x": 2814.0402191223047, "y": 1955.9268168273086 }, - "dragging": false - } - ], - "edges": [ - { - "source": "TextOutput-BDknO", - "target": "Prompt-xeI6K", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œTextOutputœ,œidœ:œTextOutput-BDknOœ}", - "targetHandle": "{œfieldNameœ:œcontextœ,œidœ:œPrompt-xeI6Kœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "id": "reactflow__edge-TextOutput-BDknO{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œTextOutputœ,œidœ:œTextOutput-BDknOœ}-Prompt-xeI6K{œfieldNameœ:œcontextœ,œidœ:œPrompt-xeI6Kœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "context", - "id": "Prompt-xeI6K", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "Text", "str"], - "dataType": "TextOutput", - "id": "TextOutput-BDknO" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "selected": false - }, - { - "source": "ChatInput-yxMKE", - "target": "Prompt-xeI6K", - "sourceHandle": "{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ,œRecordœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-yxMKEœ}", - "targetHandle": "{œfieldNameœ:œquestionœ,œidœ:œPrompt-xeI6Kœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "id": "reactflow__edge-ChatInput-yxMKE{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ,œRecordœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-yxMKEœ}-Prompt-xeI6K{œfieldNameœ:œquestionœ,œidœ:œPrompt-xeI6Kœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "question", - "id": "Prompt-xeI6K", - "inputTypes": ["Document", "BaseOutputParser", "Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["Text", "str", "object", "Record"], - "dataType": "ChatInput", - "id": "ChatInput-yxMKE" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "selected": false - }, - { - "source": "Prompt-xeI6K", - "target": "OpenAIModel-EjXlN", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-xeI6Kœ}", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-EjXlNœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "id": "reactflow__edge-Prompt-xeI6K{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-xeI6Kœ}-OpenAIModel-EjXlN{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-EjXlNœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "OpenAIModel-EjXlN", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "Text", "str"], - "dataType": "Prompt", - "id": "Prompt-xeI6K" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "selected": false - }, - { - "source": "OpenAIModel-EjXlN", - "target": "ChatOutput-Q39I8", - "sourceHandle": "{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-EjXlNœ}", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-Q39I8œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "id": "reactflow__edge-OpenAIModel-EjXlN{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-EjXlNœ}-ChatOutput-Q39I8{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-Q39I8œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-Q39I8", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["object", "Text", "str"], - "dataType": "OpenAIModel", - "id": "OpenAIModel-EjXlN" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "selected": false - }, - { - "source": "File-t0a6a", - "target": "RecursiveCharacterTextSplitter-tR9QM", - "sourceHandle": "{œbaseClassesœ:[œRecordœ],œdataTypeœ:œFileœ,œidœ:œFile-t0a6aœ}", - "targetHandle": "{œfieldNameœ:œinputsœ,œidœ:œRecursiveCharacterTextSplitter-tR9QMœ,œinputTypesœ:[œDocumentœ,œRecordœ],œtypeœ:œDocumentœ}", - "id": "reactflow__edge-File-t0a6a{œbaseClassesœ:[œRecordœ],œdataTypeœ:œFileœ,œidœ:œFile-t0a6aœ}-RecursiveCharacterTextSplitter-tR9QM{œfieldNameœ:œinputsœ,œidœ:œRecursiveCharacterTextSplitter-tR9QMœ,œinputTypesœ:[œDocumentœ,œRecordœ],œtypeœ:œDocumentœ}", - "data": { - "targetHandle": { - "fieldName": "inputs", - "id": "RecursiveCharacterTextSplitter-tR9QM", - "inputTypes": ["Document", "Record"], - "type": "Document" - }, - "sourceHandle": { - "baseClasses": ["Record"], - "dataType": "File", - "id": "File-t0a6a" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "selected": false - }, - { - "source": "OpenAIEmbeddings-ZlOk1", - "sourceHandle": "{œbaseClassesœ:[œEmbeddingsœ],œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-ZlOk1œ}", - "target": "AstraDBSearch-41nRz", - "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œAstraDBSearch-41nRzœ,œinputTypesœ:null,œtypeœ:œEmbeddingsœ}", - "data": { - "targetHandle": { - "fieldName": "embedding", - "id": "AstraDBSearch-41nRz", - "inputTypes": null, - "type": "Embeddings" - }, - "sourceHandle": { - "baseClasses": ["Embeddings"], - "dataType": "OpenAIEmbeddings", - "id": "OpenAIEmbeddings-ZlOk1" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-OpenAIEmbeddings-ZlOk1{œbaseClassesœ:[œEmbeddingsœ],œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-ZlOk1œ}-AstraDBSearch-41nRz{œfieldNameœ:œembeddingœ,œidœ:œAstraDBSearch-41nRzœ,œinputTypesœ:null,œtypeœ:œEmbeddingsœ}" - }, - { - "source": "ChatInput-yxMKE", - "sourceHandle": "{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ,œRecordœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-yxMKEœ}", - "target": "AstraDBSearch-41nRz", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œAstraDBSearch-41nRzœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "AstraDBSearch-41nRz", - "inputTypes": ["Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["Text", "str", "object", "Record"], - "dataType": "ChatInput", - "id": "ChatInput-yxMKE" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-ChatInput-yxMKE{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ,œRecordœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-yxMKEœ}-AstraDBSearch-41nRz{œfieldNameœ:œinput_valueœ,œidœ:œAstraDBSearch-41nRzœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}" - }, - { - "source": "RecursiveCharacterTextSplitter-tR9QM", - "sourceHandle": "{œbaseClassesœ:[œRecordœ],œdataTypeœ:œRecursiveCharacterTextSplitterœ,œidœ:œRecursiveCharacterTextSplitter-tR9QMœ}", - "target": "AstraDB-eUCSS", - "targetHandle": "{œfieldNameœ:œinputsœ,œidœ:œAstraDB-eUCSSœ,œinputTypesœ:null,œtypeœ:œRecordœ}", - "data": { - "targetHandle": { - "fieldName": "inputs", - "id": "AstraDB-eUCSS", - "inputTypes": null, - "type": "Record" - }, - "sourceHandle": { - "baseClasses": ["Record"], - "dataType": "RecursiveCharacterTextSplitter", - "id": "RecursiveCharacterTextSplitter-tR9QM" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-RecursiveCharacterTextSplitter-tR9QM{œbaseClassesœ:[œRecordœ],œdataTypeœ:œRecursiveCharacterTextSplitterœ,œidœ:œRecursiveCharacterTextSplitter-tR9QMœ}-AstraDB-eUCSS{œfieldNameœ:œinputsœ,œidœ:œAstraDB-eUCSSœ,œinputTypesœ:null,œtypeœ:œRecordœ}", - "selected": false - }, - { - "source": "OpenAIEmbeddings-9TPjc", - "sourceHandle": "{œbaseClassesœ:[œEmbeddingsœ],œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-9TPjcœ}", - "target": "AstraDB-eUCSS", - "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œAstraDB-eUCSSœ,œinputTypesœ:null,œtypeœ:œEmbeddingsœ}", - "data": { - "targetHandle": { - "fieldName": "embedding", - "id": "AstraDB-eUCSS", - "inputTypes": null, - "type": "Embeddings" - }, - "sourceHandle": { - "baseClasses": ["Embeddings"], - "dataType": "OpenAIEmbeddings", - "id": "OpenAIEmbeddings-9TPjc" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-OpenAIEmbeddings-9TPjc{œbaseClassesœ:[œEmbeddingsœ],œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-9TPjcœ}-AstraDB-eUCSS{œfieldNameœ:œembeddingœ,œidœ:œAstraDB-eUCSSœ,œinputTypesœ:null,œtypeœ:œEmbeddingsœ}", - "selected": false - }, - { - "source": "AstraDBSearch-41nRz", - "sourceHandle": "{œbaseClassesœ:[œRecordœ],œdataTypeœ:œAstraDBSearchœ,œidœ:œAstraDBSearch-41nRzœ}", - "target": "TextOutput-BDknO", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-BDknOœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}", - "data": { - "targetHandle": { - "fieldName": "input_value", - "id": "TextOutput-BDknO", - "inputTypes": ["Record", "Text"], - "type": "str" - }, - "sourceHandle": { - "baseClasses": ["Record"], - "dataType": "AstraDBSearch", - "id": "AstraDBSearch-41nRz" - } - }, - "style": { - "stroke": "#555" - }, - "className": "stroke-gray-900 stroke-connection", - "id": "reactflow__edge-AstraDBSearch-41nRz{œbaseClassesœ:[œRecordœ],œdataTypeœ:œAstraDBSearchœ,œidœ:œAstraDBSearch-41nRzœ}-TextOutput-BDknO{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-BDknOœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}" + "selected": false, + "type": "genericNode", + "width": 384 } ], "viewport": { @@ -3145,7 +3307,8 @@ } }, "description": "Visit https://pre-release.langflow.org/tutorials/rag-with-astradb for a detailed guide of this project.\nThis project give you both Ingestion and RAG in a single file. You'll need to visit https://astra.datastax.com/ to create an Astra DB instance, your Token and grab an API Endpoint.\nRunning this project requires you to add a file in the Files component, then define a Collection Name and click on the Play icon on the Astra DB component. \n\nAfter the ingestion ends you are ready to click on the Run button at the lower left corner and start asking questions about your data.", - "name": "Vector Store RAG", + "id": "51e2b78a-199b-4054-9f32-e288eef6924c", + "is_component": false, "last_tested_version": "1.0.0a0", - "is_component": false -} + "name": "Vector Store RAG" +} \ No newline at end of file diff --git a/src/backend/base/langflow/interface/initialize/loading.py b/src/backend/base/langflow/interface/initialize/loading.py index 03de827b3..afe653e42 100644 --- a/src/backend/base/langflow/interface/initialize/loading.py +++ b/src/backend/base/langflow/interface/initialize/loading.py @@ -7,7 +7,8 @@ import orjson from loguru import logger from langflow.custom.eval import eval_custom_component_code -from langflow.schema.schema import Record +from langflow.graph.utils import get_artifact_type, post_process_raw +from langflow.schema import Record if TYPE_CHECKING: from langflow.custom import CustomComponent @@ -70,7 +71,7 @@ def update_params_with_load_from_db_fields( try: key = None try: - key = custom_component.variables(params[field]) + key = custom_component.variables(params[field], field) except ValueError as e: # check if "User id is not set" is in the error message if "User id is not set" in str(e) and not fallback_to_env_vars: @@ -84,8 +85,12 @@ def update_params_with_load_from_db_fields( logger.info(f"Using environment variable {params[field]} for {field}") if key is None: logger.warning(f"Could not get value for {field}. Setting it to None.") + params[field] = key + except TypeError as exc: + raise exc + except Exception as exc: logger.error(f"Failed to get value for {field} from custom component. Setting it to None. Error: {exc}") @@ -124,4 +129,14 @@ async def instantiate_custom_component(params, user_id, vertex, fallback_to_env_ custom_repr = build_result if not isinstance(custom_repr, str): custom_repr = str(custom_repr) - return custom_component, build_result, {"repr": custom_repr} + raw = custom_component.repr_value + if hasattr(raw, "data") and raw is not None: + raw = raw.data + + elif hasattr(raw, "model_dump") and raw is not None: + raw = raw.model_dump() + + artifact_type = get_artifact_type(custom_component, build_result) + raw = post_process_raw(raw, artifact_type) + artifact = {"repr": custom_repr, "raw": raw, "type": artifact_type} + return custom_component, build_result, artifact diff --git a/src/backend/base/langflow/memory.py b/src/backend/base/langflow/memory.py index f44958fdd..9a07f4bc1 100644 --- a/src/backend/base/langflow/memory.py +++ b/src/backend/base/langflow/memory.py @@ -1,9 +1,9 @@ import warnings -from typing import List, Optional, Union +from typing import List, Optional from loguru import logger -from langflow.schema import Record +from langflow.schema.message import Message from langflow.services.deps import get_monitor_service from langflow.services.monitor.schema import MessageModel @@ -39,54 +39,46 @@ def get_messages( order=order, ) - records: list[Record] = [] + messages: list[Message] = [] # messages_df has a timestamp # it gets the last 5 messages, for example # but now they are ordered from most recent to least recent # so we need to reverse the order messages_df = messages_df[::-1] if order == "DESC" else messages_df for row in messages_df.itertuples(): - record = Record( - data={ - "text": row.message, - "sender": row.sender, - "sender_name": row.sender_name, - "session_id": row.session_id, - "timestamp": row.timestamp, - }, - ) - records.append(record) + msg = Message(text=row.text, sender=row.sender, sender_name=row.sender_name, timestamp=row.timestamp) - return records + messages.append(msg) + + return messages -def add_messages(records: Union[list[Record], Record], flow_id: Optional[str] = None): +def add_messages(messages: Message | list[Message], flow_id: Optional[str] = None): """ Add a message to the monitor service. """ try: monitor_service = get_monitor_service() + if not isinstance(messages, list): + messages = [messages] - if isinstance(records, Record): - records = [records] + if not all(isinstance(message, Message) for message in messages): + types = ", ".join([str(type(message)) for message in messages]) + raise ValueError(f"The messages must be instances of Message. Found: {types}") - if not all(isinstance(record, (Record, str)) for record in records): - types = ", ".join([str(type(record)) for record in records]) - raise ValueError(f"The records must be instances of Record. Found: {types}") + messages_models: list[MessageModel] = [] + for msg in messages: + msg.timestamp = monitor_service.get_timestamp() + messages_models.append(MessageModel.from_message(msg, flow_id=flow_id)) - messages: list[MessageModel] = [] - for record in records: - record.timestamp = monitor_service.get_timestamp() - messages.append(MessageModel.from_record(record, flow_id=flow_id)) - - for message in messages: + for message_model in messages_models: try: - monitor_service.add_message(message) + monitor_service.add_message(message_model) except Exception as e: logger.error(f"Error adding message to monitor service: {e}") logger.exception(e) raise e - return records + return messages_models except Exception as e: logger.exception(e) raise e @@ -100,28 +92,22 @@ def delete_messages(session_id: str): session_id (str): The session ID associated with the messages to delete. """ monitor_service = get_monitor_service() - monitor_service.delete_messages(session_id) + monitor_service.delete_messages_session(session_id) def store_message( - message: Union[str, Record], - session_id: Optional[str] = None, - sender: Optional[str] = None, - sender_name: Optional[str] = None, + message: Message, flow_id: Optional[str] = None, -) -> List[Record]: +) -> List[Message]: """ Stores a message in the memory. Args: - message (Union[str, Record]): The message to be stored. It can be either a string or a Record object. - session_id (Optional[str]): The session ID associated with the message. - sender (Optional[str]): The sender ID associated with the message. - sender_name (Optional[str]): The name of the sender associated with the message. + message (Message): The message to store. flow_id (Optional[str]): The flow ID associated with the message. When running from the CustomComponent you can access this using `self.graph.flow_id`. Returns: - List[Record]: A list of records containing the stored message. + List[Message]: A list of records containing the stored message. Raises: ValueError: If any of the required parameters (session_id, sender, sender_name) is not provided. @@ -130,26 +116,7 @@ def store_message( warnings.warn("No message provided.") return [] - if not session_id or not sender or not sender_name: + if not message.session_id or not message.sender or not message.sender_name: raise ValueError("All of session_id, sender, and sender_name must be provided.") - if isinstance(message, Record): - record = message - record.data.update( - { - "session_id": session_id, - "sender": sender, - "sender_name": sender_name, - } - ) - elif isinstance(message, str): - record = Record( - data={ - "text": message, - "session_id": session_id, - "sender": sender, - "sender_name": sender_name, - }, - ) - - return add_messages([record], flow_id=flow_id) + return add_messages([message], flow_id=flow_id) diff --git a/src/backend/base/langflow/processing/process.py b/src/backend/base/langflow/processing/process.py index aeff0f1a4..1b54d3f08 100644 --- a/src/backend/base/langflow/processing/process.py +++ b/src/backend/base/langflow/processing/process.py @@ -59,7 +59,7 @@ async def run_graph_internal( outputs or [], stream=stream, session_id=session_id_str or "", - fallback_to_env_vars=fallback_to_env_vars + fallback_to_env_vars=fallback_to_env_vars, ) if session_id_str and session_service: await session_service.update_session(session_id_str, (graph, artifacts)) diff --git a/src/backend/base/langflow/schema/__init__.py b/src/backend/base/langflow/schema/__init__.py index 14230578c..9f7e3b384 100644 --- a/src/backend/base/langflow/schema/__init__.py +++ b/src/backend/base/langflow/schema/__init__.py @@ -1,4 +1,4 @@ from .dotdict import dotdict -from .schema import Record +from .record import Record __all__ = ["Record", "dotdict"] diff --git a/src/backend/base/langflow/schema/image.py b/src/backend/base/langflow/schema/image.py new file mode 100644 index 000000000..552f75b8b --- /dev/null +++ b/src/backend/base/langflow/schema/image.py @@ -0,0 +1,63 @@ +import base64 + +from PIL import Image as PILImage +from pydantic import BaseModel + +from langflow.services.deps import get_storage_service + +IMAGE_ENDPOINT = "/files/images/" + + +def is_image_file(file_path): + try: + with PILImage.open(file_path) as img: + img.verify() # Verify that it is, in fact, an image + return True + except (IOError, SyntaxError): + return False + + +async def get_file_paths(files: list[str]): + storage_service = get_storage_service() + file_paths = [] + for file in files: + flow_id, file_name = file.split("/") + file_paths.append(storage_service.build_full_path(flow_id=flow_id, file_name=file_name)) + return file_paths + + +async def get_files( + file_paths: list[str], + convert_to_base64: bool = False, +): + storage_service = get_storage_service() + file_objects: list[str | bytes] = [] + for file_path in file_paths: + flow_id, file_name = file_path.split("/") + file_object = await storage_service.get_file(flow_id=flow_id, file_name=file_name) + if convert_to_base64: + file_base64 = base64.b64encode(file_object).decode("utf-8") + file_objects.append(file_base64) + else: + file_objects.append(file_object) + return file_objects + + +class Image(BaseModel): + path: str | None = None + url: str | None = None + + def to_base64(self): + if self.path: + files = get_files([self.path], convert_to_base64=True) + return files[0] + raise ValueError("Image path is not set.") + + def to_content_dict(self): + return { + "type": "image_url", + "image_url": self.to_base64(), + } + + def get_url(self): + return f"{IMAGE_ENDPOINT}{self.path}" diff --git a/src/backend/base/langflow/schema/message.py b/src/backend/base/langflow/schema/message.py new file mode 100644 index 000000000..865d684bf --- /dev/null +++ b/src/backend/base/langflow/schema/message.py @@ -0,0 +1,111 @@ +from datetime import datetime, timezone +from typing import Annotated, Any, AsyncIterator, Iterator, Optional + +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage +from langchain_core.prompt_values import ImagePromptValue +from langchain_core.prompts.image import ImagePromptTemplate +from pydantic import BaseModel, BeforeValidator, ConfigDict, Field, field_serializer + +from langflow.schema.image import Image, get_file_paths, is_image_file +from langflow.schema.record import Record + + +def _timestamp_to_str(timestamp: datetime) -> str: + return timestamp.strftime("%Y-%m-%d %H:%M:%S") + + +class Message(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + # Helper class to deal with image data + text: Optional[str | AsyncIterator | Iterator] = Field(default="") + sender: str + sender_name: str + files: Optional[list[str | Image]] = Field(default=[]) + session_id: Optional[str] = Field(default="") + timestamp: Annotated[str, BeforeValidator(_timestamp_to_str)] = Field( + default=datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + ) + flow_id: Optional[str] = None + + def model_post_init(self, __context: Any) -> None: + new_files = [] + for file in self.files or []: + if is_image_file(file): + new_files.append(Image(path=file)) + else: + new_files.append(file) + self.files = new_files + + def to_lc_message( + self, + ) -> BaseMessage: + """ + Converts the Record to a BaseMessage. + + Returns: + BaseMessage: The converted BaseMessage. + """ + # The idea of this function is to be a helper to convert a Record to a BaseMessage + # It will use the "sender" key to determine if the message is Human or AI + # If the key is not present, it will default to AI + # But first we check if all required keys are present in the data dictionary + # they are: "text", "sender" + if self.text is None or not self.sender: + raise ValueError("Missing required keys ('text', 'sender') in Message.") + + if self.sender == "User": + if self.files: + contents = [{"type": "text", "text": self.text}] + contents.extend(self.get_file_content_dicts()) + human_message = HumanMessage(content=contents) + else: + human_message = HumanMessage( + content=[{"type": "text", "text": self.text}], + ) + + return human_message + + return AIMessage(content=self.text) + + @classmethod + def from_record(cls, record: Record) -> "Message": + """ + Converts a BaseMessage to a Record. + + Args: + record (BaseMessage): The BaseMessage to convert. + + Returns: + Record: The converted Record. + """ + + return cls( + text=record.text, + sender=record.sender, + sender_name=record.sender_name, + files=record.files, + session_id=record.session_id, + timestamp=record.timestamp, + flow_id=record.flow_id, + ) + + @field_serializer("text", mode="plain") + def serialize_text(self, value): + if isinstance(value, AsyncIterator): + return "" + elif isinstance(value, Iterator): + return "" + return value + + async def get_file_content_dicts(self): + content_dicts = [] + files = await get_file_paths(self.files) + + for file in files: + if isinstance(file, Image): + content_dicts.append(file.to_content_dict()) + else: + image_template = ImagePromptTemplate() + image_prompt_value: ImagePromptValue = image_template.invoke(input={"path": file}) + content_dicts.append({"type": "image_url", "image_url": image_prompt_value.image_url}) + return content_dicts diff --git a/src/backend/base/langflow/schema/record.py b/src/backend/base/langflow/schema/record.py new file mode 100644 index 000000000..830f576ba --- /dev/null +++ b/src/backend/base/langflow/schema/record.py @@ -0,0 +1,202 @@ +import copy +import json +from typing import cast, Optional + +from langchain_core.documents import Document +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage +from langchain_core.prompts.image import ImagePromptTemplate +from pydantic import BaseModel, model_serializer, model_validator +from langchain_core.prompt_values import ImagePromptValue + + +class Record(BaseModel): + """ + Represents a record with text and optional data. + + Attributes: + data (dict, optional): Additional data associated with the record. + """ + + text_key: str = "text" + data: dict = {} + default_value: Optional[str] = "" + + @model_validator(mode="before") + def validate_data(cls, values): + if not values.get("data"): + values["data"] = {} + # Any other keyword should be added to the data dictionary + for key in values: + if key not in values["data"] and key not in {"text_key", "data", "default_value"}: + values["data"][key] = values[key] + return values + + @model_serializer(mode="plain", when_used="json") + def serialize_model(self): + data = {k: v.to_json() if hasattr(v, "to_json") else v for k, v in self.data.items()} + return data + + def get_text(self): + """ + Retrieves the text value from the data dictionary. + + If the text key is present in the data dictionary, the corresponding value is returned. + Otherwise, the default value is returned. + + Returns: + The text value from the data dictionary or the default value. + """ + return self.data.get(self.text_key, self.default_value) + + @classmethod + def from_document(cls, document: Document) -> "Record": + """ + Converts a Document to a Record. + + Args: + document (Document): The Document to convert. + + Returns: + Record: The converted Record. + """ + data = document.metadata + data["text"] = document.page_content + return cls(data=data, text_key="text") + + @classmethod + def from_lc_message(cls, message: BaseMessage) -> "Record": + """ + Converts a BaseMessage to a Record. + + Args: + message (BaseMessage): The BaseMessage to convert. + + Returns: + Record: The converted Record. + """ + data: dict = {"text": message.content} + data["metadata"] = cast(dict, message.to_json()) + return cls(data=data, text_key="text") + + def __add__(self, other: "Record") -> "Record": + """ + Combines the data of two records by attempting to add values for overlapping keys + for all types that support the addition operation. Falls back to the value from 'other' + record when addition is not supported. + """ + combined_data = self.data.copy() + for key, value in other.data.items(): + # If the key exists in both records and both values support the addition operation + if key in combined_data: + try: + combined_data[key] += value + except TypeError: + # Fallback: Use the value from 'other' record if addition is not supported + combined_data[key] = value + else: + # If the key is not in the first record, simply add it + combined_data[key] = value + + return Record(data=combined_data) + + def to_lc_document(self) -> Document: + """ + Converts the Record to a Document. + + Returns: + Document: The converted Document. + """ + text = self.data.pop(self.text_key, self.default_value) + return Document(page_content=text, metadata=self.data) + + def to_lc_message( + self, + ) -> HumanMessage | SystemMessage: + """ + Converts the Record to a BaseMessage. + + Returns: + BaseMessage: The converted BaseMessage. + """ + # The idea of this function is to be a helper to convert a Record to a BaseMessage + # It will use the "sender" key to determine if the message is Human or AI + # If the key is not present, it will default to AI + # But first we check if all required keys are present in the data dictionary + # they are: "text", "sender" + if not all(key in self.data for key in ["text", "sender"]): + raise ValueError(f"Missing required keys ('text', 'sender') in Record: {self.data}") + sender = self.data.get("sender", "Machine") + text = self.data.get("text", "") + files = self.data.get("files", []) + if sender == "User": + if files: + contents = [{"type": "text", "text": text}] + for file_path in files: + image_template = ImagePromptTemplate() + image_prompt_value: ImagePromptValue = image_template.invoke(input={"path": file_path}) + contents.append({"type": "image_url", "image_url": image_prompt_value.image_url}) + human_message = HumanMessage(content=contents) + else: + human_message = HumanMessage( + content=[{"type": "text", "text": text}], + ) + + return human_message + + return AIMessage(content=text) + + def __getattr__(self, key): + """ + Allows attribute-like access to the data dictionary. + """ + try: + if key.startswith("__"): + return self.__getattribute__(key) + if key in {"data", "text_key"} or key.startswith("_"): + return super().__getattr__(key) + + return self.data.get(key, self.default_value) + except KeyError: + # Fallback to default behavior to raise AttributeError for undefined attributes + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{key}'") + + def __setattr__(self, key, value): + """ + Allows attribute-like setting of values in the data dictionary, + while still allowing direct assignment to class attributes. + """ + if key in {"data", "text_key"} or key.startswith("_"): + super().__setattr__(key, value) + else: + self.data[key] = value + + def __delattr__(self, key): + """ + Allows attribute-like deletion from the data dictionary. + """ + if key in {"data", "text_key"} or key.startswith("_"): + super().__delattr__(key) + else: + del self.data[key] + + def __deepcopy__(self, memo): + """ + Custom deepcopy implementation to handle copying of the Record object. + """ + # Create a new Record object with a deep copy of the data dictionary + return Record(data=copy.deepcopy(self.data, memo), text_key=self.text_key, default_value=self.default_value) + + # check which attributes the Record has by checking the keys in the data dictionary + def __dir__(self): + return super().__dir__() + list(self.data.keys()) + + def __str__(self) -> str: + # return a JSON string representation of the Record atributes + try: + data = {k: v.to_json() if hasattr(v, "to_json") else v for k, v in self.data.items()} + return json.dumps(data, indent=4) + except Exception: + return str(self.data) + + def __contains__(self, key): + return key in self.data diff --git a/src/backend/base/langflow/schema/schema.py b/src/backend/base/langflow/schema/schema.py index 921bd65b2..5153941a5 100644 --- a/src/backend/base/langflow/schema/schema.py +++ b/src/backend/base/langflow/schema/schema.py @@ -1,179 +1,17 @@ -import copy -import json -from typing import Literal, Optional, cast - -from langchain_core.documents import Document -from langchain_core.messages import AIMessage, BaseMessage, HumanMessage -from pydantic import BaseModel, model_validator - - -class Record(BaseModel): - """ - Represents a record with text and optional data. - - Attributes: - data (dict, optional): Additional data associated with the record. - """ - - text_key: str = "text" - data: dict = {} - default_value: Optional[str] = "" - - @model_validator(mode="before") - def validate_data(cls, values): - if not values.get("data"): - values["data"] = {} - # Any other keyword should be added to the data dictionary - for key in values: - if key not in values["data"] and key not in {"text_key", "data", "default_value"}: - values["data"][key] = values[key] - return values - - def get_text(self): - """ - Retrieves the text value from the data dictionary. - - If the text key is present in the data dictionary, the corresponding value is returned. - Otherwise, the default value is returned. - - Returns: - The text value from the data dictionary or the default value. - """ - return self.data.get(self.text_key, self.default_value) - - @classmethod - def from_document(cls, document: Document) -> "Record": - """ - Converts a Document to a Record. - - Args: - document (Document): The Document to convert. - - Returns: - Record: The converted Record. - """ - data = document.metadata - data["text"] = document.page_content - return cls(data=data, text_key="text") - - @classmethod - def from_lc_message(cls, message: BaseMessage) -> "Record": - """ - Converts a BaseMessage to a Record. - - Args: - message (BaseMessage): The BaseMessage to convert. - - Returns: - Record: The converted Record. - """ - data: dict = {"text": message.content} - data["metadata"] = cast(dict, message.to_json()) - return cls(data=data, text_key="text") - - def __add__(self, other: "Record") -> "Record": - """ - Combines the data of two records by attempting to add values for overlapping keys - for all types that support the addition operation. Falls back to the value from 'other' - record when addition is not supported. - """ - combined_data = self.data.copy() - for key, value in other.data.items(): - # If the key exists in both records and both values support the addition operation - if key in combined_data: - try: - combined_data[key] += value - except TypeError: - # Fallback: Use the value from 'other' record if addition is not supported - combined_data[key] = value - else: - # If the key is not in the first record, simply add it - combined_data[key] = value - - return Record(data=combined_data) - - def to_lc_document(self) -> Document: - """ - Converts the Record to a Document. - - Returns: - Document: The converted Document. - """ - text = self.data.pop(self.text_key, self.default_value) - return Document(page_content=text, metadata=self.data) - - def to_lc_message(self) -> BaseMessage: - """ - Converts the Record to a BaseMessage. - - Returns: - BaseMessage: The converted BaseMessage. - """ - # The idea of this function is to be a helper to convert a Record to a BaseMessage - # It will use the "sender" key to determine if the message is Human or AI - # If the key is not present, it will default to AI - # But first we check if all required keys are present in the data dictionary - # they are: "text", "sender" - if not all(key in self.data for key in ["text", "sender"]): - raise ValueError(f"Missing required keys ('text', 'sender') in Record: {self.data}") - sender = self.data.get("sender", "Machine") - text = self.data.get("text", "") - if sender == "User": - return HumanMessage(content=text) - return AIMessage(content=text) - - def __getattr__(self, key): - """ - Allows attribute-like access to the data dictionary. - """ - try: - if key.startswith("__"): - return self.__getattribute__(key) - if key in {"data", "text_key"} or key.startswith("_"): - return super().__getattr__(key) - - return self.data.get(key, self.default_value) - except KeyError: - # Fallback to default behavior to raise AttributeError for undefined attributes - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{key}'") - - def __setattr__(self, key, value): - """ - Allows attribute-like setting of values in the data dictionary, - while still allowing direct assignment to class attributes. - """ - if key in {"data", "text_key"} or key.startswith("_"): - super().__setattr__(key, value) - else: - self.data[key] = value - - def __delattr__(self, key): - """ - Allows attribute-like deletion from the data dictionary. - """ - if key in {"data", "text_key"} or key.startswith("_"): - super().__delattr__(key) - else: - del self.data[key] - - def __deepcopy__(self, memo): - """ - Custom deepcopy implementation to handle copying of the Record object. - """ - # Create a new Record object with a deep copy of the data dictionary - return Record(data=copy.deepcopy(self.data, memo), text_key=self.text_key, default_value=self.default_value) - - # check which attributes the Record has by checking the keys in the data dictionary - def __dir__(self): - return super().__dir__() + list(self.data.keys()) - - def __str__(self) -> str: - # return a JSON string representation of the Record atributes - - return json.dumps(self.data) +from typing import Literal +from typing_extensions import TypedDict INPUT_FIELD_NAME = "input_value" InputType = Literal["chat", "text", "any"] OutputType = Literal["chat", "text", "any", "debug"] + + +class StreamURL(TypedDict): + location: str + + +class Log(TypedDict): + message: str | dict | StreamURL + type: str diff --git a/src/backend/base/langflow/services/auth/utils.py b/src/backend/base/langflow/services/auth/utils.py index 0e0aead88..f62d3ce4f 100644 --- a/src/backend/base/langflow/services/auth/utils.py +++ b/src/backend/base/langflow/services/auth/utils.py @@ -215,10 +215,7 @@ def create_user_longterm_token(db: Session = Depends(get_session)) -> tuple[UUID username = settings_service.auth_settings.SUPERUSER super_user = get_user_by_username(db, username) if not super_user: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="Super user hasn't been created" - ) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Super user hasn't been created") access_token_expires_longterm = timedelta(days=365) access_token = create_token( data={"sub": str(super_user.id)}, diff --git a/src/backend/base/langflow/services/cache/utils.py b/src/backend/base/langflow/services/cache/utils.py index ff19836ef..a89963f56 100644 --- a/src/backend/base/langflow/services/cache/utils.py +++ b/src/backend/base/langflow/services/cache/utils.py @@ -23,6 +23,9 @@ class CacheMiss: def __repr__(self): return "" + def __bool__(self): + return False + def create_cache_folder(func): def wrapper(*args, **kwargs): diff --git a/src/backend/base/langflow/services/database/models/flow/model.py b/src/backend/base/langflow/services/database/models/flow/model.py index 7727c7b86..a3f9a055d 100644 --- a/src/backend/base/langflow/services/database/models/flow/model.py +++ b/src/backend/base/langflow/services/database/models/flow/model.py @@ -13,7 +13,7 @@ from pydantic import field_serializer, field_validator from sqlalchemy import UniqueConstraint from sqlmodel import JSON, Column, Field, Relationship, SQLModel -from langflow.schema.schema import Record +from langflow.schema import Record if TYPE_CHECKING: from langflow.services.database.models.folder import Folder @@ -29,6 +29,7 @@ class FlowBase(SQLModel): is_component: Optional[bool] = Field(default=False, nullable=True) updated_at: Optional[datetime] = Field(default_factory=lambda: datetime.now(timezone.utc), nullable=True) webhook: Optional[bool] = Field(default=False, nullable=True, description="Can be used on the webhook endpoint") + folder_id: Optional[UUID] = Field(default=None, nullable=True) endpoint_name: Optional[str] = Field(default=None, nullable=True, index=True) @field_validator("endpoint_name") diff --git a/src/backend/base/langflow/services/monitor/schema.py b/src/backend/base/langflow/services/monitor/schema.py index e7bc7a963..ca267ac2b 100644 --- a/src/backend/base/langflow/services/monitor/schema.py +++ b/src/backend/base/langflow/services/monitor/schema.py @@ -1,35 +1,35 @@ import json from datetime import datetime -from typing import TYPE_CHECKING, Any, Optional +from typing import Any, Optional from pydantic import BaseModel, Field, field_serializer, field_validator -if TYPE_CHECKING: - from langflow.schema import Record +from langflow.schema.message import Message class TransactionModel(BaseModel): index: Optional[int] = Field(default=None) timestamp: Optional[datetime] = Field(default_factory=datetime.now, alias="timestamp") - flow_id: str - source: str - target: str - target_args: dict + vertex_id: str + target_id: str | None = None + inputs: dict + outputs: Optional[dict] = None status: str error: Optional[str] = None + flow_id: Optional[str] = Field(default=None, alias="flow_id") class Config: from_attributes = True populate_by_name = True # validate target_args in case it is a JSON - @field_validator("target_args", mode="before") + @field_validator("outputs", "inputs", mode="before") def validate_target_args(cls, v): if isinstance(v, str): return json.loads(v) return v - @field_serializer("target_args") + @field_serializer("outputs", "inputs") def serialize_target_args(v): if isinstance(v, dict): return json.dumps(v) @@ -39,19 +39,21 @@ class TransactionModel(BaseModel): class TransactionModelResponse(BaseModel): index: Optional[int] = Field(default=None) timestamp: Optional[datetime] = Field(default_factory=datetime.now, alias="timestamp") - flow_id: str - source: str - target: str - target_args: dict + vertex_id: str + inputs: dict + outputs: Optional[dict] = None status: str error: Optional[str] = None + flow_id: Optional[str] = Field(default=None, alias="flow_id") + source: Optional[str] = None + target: Optional[str] = None class Config: from_attributes = True populate_by_name = True # validate target_args in case it is a JSON - @field_validator("target_args", mode="before") + @field_validator("outputs", "inputs", mode="before") def validate_target_args(cls, v): if isinstance(v, str): return json.loads(v) @@ -74,31 +76,31 @@ class MessageModel(BaseModel): sender: str sender_name: str session_id: str - message: str - artifacts: dict + text: str + files: list[str] = [] class Config: from_attributes = True populate_by_name = True - @field_validator("artifacts", mode="before") - def validate_target_args(cls, v): + @field_validator("files", mode="before") + def validate_files(cls, v): if isinstance(v, str): return json.loads(v) return v @classmethod - def from_record(cls, record: "Record", flow_id: Optional[str] = None): + def from_message(cls, message: Message, flow_id: Optional[str] = None): # first check if the record has all the required fields - if not record.data or ("sender" not in record.data and "sender_name" not in record.data): - raise ValueError("The record does not have the required fields 'sender' and 'sender_name' in the data.") + if not message.text or not message.sender or not message.sender_name: + raise ValueError("The message does not have the required fields 'sender' and 'sender_name' in the data.") return cls( - sender=record.sender, - sender_name=record.sender_name, - message=record.text, - session_id=record.session_id, - artifacts=record.artifacts or {}, - timestamp=record.timestamp, + sender=message.sender, + sender_name=message.sender_name, + text=message.text, + session_id=message.session_id, + files=message.files or [], + timestamp=message.timestamp, flow_id=flow_id, ) @@ -106,12 +108,6 @@ class MessageModel(BaseModel): class MessageModelResponse(MessageModel): index: Optional[int] = Field(default=None) - @field_validator("artifacts", mode="before") - def serialize_artifacts(v): - if isinstance(v, str): - return json.loads(v) - return v - @field_validator("index", mode="before") def validate_id(cls, v): if isinstance(v, float): @@ -122,6 +118,13 @@ class MessageModelResponse(MessageModel): return v +class MessageModelRequest(MessageModel): + text: str = Field(default="") + sender: str = Field(default="") + sender_name: str = Field(default="") + session_id: str = Field(default="") + + class VertexBuildModel(BaseModel): index: Optional[int] = Field(default=None, alias="index", exclude=True) id: Optional[str] = Field(default=None, alias="id") diff --git a/src/backend/base/langflow/services/monitor/service.py b/src/backend/base/langflow/services/monitor/service.py index 9fce7dd59..2badabc1f 100644 --- a/src/backend/base/langflow/services/monitor/service.py +++ b/src/backend/base/langflow/services/monitor/service.py @@ -32,6 +32,10 @@ class MonitorService(Service): except Exception as e: logger.exception(f"Error initializing monitor service: {e}") + def exec_query(self, query: str): + with duckdb.connect(str(self.db_path)) as conn: + return conn.execute(query).df() + def to_df(self, table_name): return self.load_table_as_dataframe(table_name) @@ -69,7 +73,7 @@ class MonitorService(Service): valid: Optional[bool] = None, order_by: Optional[str] = "timestamp", ): - query = "SELECT index,flow_id, valid, params, data, artifacts, timestamp FROM vertex_builds" + query = "SELECT id, index,flow_id, valid, params, data, artifacts, timestamp FROM vertex_builds" conditions = [] if flow_id: conditions.append(f"flow_id = '{flow_id}'") @@ -98,11 +102,22 @@ class MonitorService(Service): with duckdb.connect(str(self.db_path)) as conn: conn.execute(query) - def delete_messages(self, session_id: str): + def delete_messages_session(self, session_id: str): query = f"DELETE FROM messages WHERE session_id = '{session_id}'" - with duckdb.connect(str(self.db_path)) as conn: - conn.execute(query) + return self.exec_query(query) + + def delete_messages(self, message_ids: list[int]): + query = f"DELETE FROM messages WHERE index IN ({','.join(map(str, message_ids))})" + + return self.exec_query(query) + + def update_message(self, message_id: int, **kwargs): + query = ( + f"""UPDATE messages SET {', '.join(f"{k} = '{v}'" for k, v in kwargs.items())} WHERE index = {message_id}""" + ) + + return self.exec_query(query) def add_message(self, message: MessageModel): self.add_row("messages", message) @@ -117,7 +132,7 @@ class MonitorService(Service): order: Optional[str] = "DESC", limit: Optional[int] = None, ): - query = "SELECT index, flow_id, sender_name, sender, session_id, message, artifacts, timestamp FROM messages" + query = "SELECT index, flow_id, sender_name, sender, session_id, text, timestamp FROM messages" conditions = [] if sender: conditions.append(f"sender = '{sender}'") @@ -151,7 +166,9 @@ class MonitorService(Service): order_by: Optional[str] = "timestamp", flow_id: Optional[str] = None, ): - query = "SELECT index,flow_id, source, target, target_args, status, error, timestamp FROM transactions" + query = ( + "SELECT index,flow_id, status, error, timestamp, vertex_id, inputs, outputs, target_id FROM transactions" + ) conditions = [] if source: conditions.append(f"source = '{source}'") @@ -166,7 +183,7 @@ class MonitorService(Service): query += " WHERE " + " AND ".join(conditions) if order_by: - query += f" ORDER BY {order_by}" + query += f" ORDER BY {order_by} DESC" with duckdb.connect(str(self.db_path)) as conn: df = conn.execute(query).df() diff --git a/src/backend/base/langflow/services/monitor/utils.py b/src/backend/base/langflow/services/monitor/utils.py index f603b3fde..706d62348 100644 --- a/src/backend/base/langflow/services/monitor/utils.py +++ b/src/backend/base/langflow/services/monitor/utils.py @@ -119,21 +119,16 @@ async def log_message( sender_name: str, message: str, session_id: str, - artifacts: Optional[dict] = None, + files: Optional[list] = None, flow_id: Optional[str] = None, ): try: - from langflow.graph.vertex.base import Vertex - - if isinstance(session_id, Vertex): - session_id = await session_id.build() # type: ignore - monitor_service = get_monitor_service() row = { "sender": sender, "sender_name": sender_name, "message": message, - "artifacts": artifacts or {}, + "files": files or [], "session_id": session_id, "timestamp": monitor_service.get_timestamp(), "flow_id": flow_id, @@ -183,17 +178,19 @@ def build_clean_params(target: "Vertex") -> dict: return params -def log_transaction(vertex: "Vertex", status, error=None): +def log_transaction(flow_id, vertex: "Vertex", status, target: Optional["Vertex"] = None, error=None): try: monitor_service = get_monitor_service() clean_params = build_clean_params(vertex) data = { - "vertex_id": vertex.id, + "vertex_id": str(vertex.id), + "target_id": str(target.id) if target else None, "inputs": clean_params, - "output": str(vertex.result), + "outputs": vertex.result.model_dump_json() if vertex.result else None, "timestamp": monitor_service.get_timestamp(), "status": status, "error": error, + "flow_id": flow_id, } monitor_service.add_row(table_name="transactions", data=data) except Exception as e: diff --git a/src/backend/base/langflow/services/settings/base.py b/src/backend/base/langflow/services/settings/base.py index 259e10170..679d16627 100644 --- a/src/backend/base/langflow/services/settings/base.py +++ b/src/backend/base/langflow/services/settings/base.py @@ -70,7 +70,7 @@ class Settings(BaseSettings): """Database URL for Langflow. If not provided, Langflow will use a SQLite database.""" pool_size: int = 10 """The number of connections to keep open in the connection pool. If not provided, the default is 10.""" - max_overflow: int = 10 + max_overflow: int = 20 """The number of connections to allow that can be opened beyond the pool size. If not provided, the default is 10.""" cache_type: str = "async" remove_api_keys: bool = False @@ -78,7 +78,6 @@ class Settings(BaseSettings): langchain_cache: str = "InMemoryCache" load_flows_path: Optional[str] = None - # Redis redis_host: str = "localhost" redis_port: int = 6379 diff --git a/src/backend/base/langflow/services/settings/service.py b/src/backend/base/langflow/services/settings/service.py index 95088e829..3ecdb683d 100644 --- a/src/backend/base/langflow/services/settings/service.py +++ b/src/backend/base/langflow/services/settings/service.py @@ -1,5 +1,4 @@ import os -from typing import Optional import yaml from loguru import logger @@ -8,6 +7,7 @@ from langflow.services.base import Service from langflow.services.settings.auth import AuthSettings from langflow.services.settings.base import Settings + class SettingsService(Service): name = "settings_service" diff --git a/src/backend/base/langflow/services/variable/service.py b/src/backend/base/langflow/services/variable/service.py index 84671e0f9..b2389e890 100644 --- a/src/backend/base/langflow/services/variable/service.py +++ b/src/backend/base/langflow/services/variable/service.py @@ -54,11 +54,19 @@ class VariableService(Service): self, user_id: Union[UUID, str], name: str, + field: str, session: Session = Depends(get_session), ) -> str: # we get the credential from the database # credential = session.query(Variable).filter(Variable.user_id == user_id, Variable.name == name).first() variable = session.exec(select(Variable).where(Variable.user_id == user_id, Variable.name == name)).first() + + if variable.type == "Credential" and field == "session_id": + raise TypeError( + f"variable {name} of type 'Credential' cannot be used in a Session ID field " + "because its purpose is to prevent the exposure of values." + ) + # we decrypt the value if not variable or not variable.value: raise ValueError(f"{name} variable not found.") diff --git a/src/backend/base/langflow/template/field/prompt.py b/src/backend/base/langflow/template/field/prompt.py index ccc5d01a0..138b90131 100644 --- a/src/backend/base/langflow/template/field/prompt.py +++ b/src/backend/base/langflow/template/field/prompt.py @@ -10,5 +10,5 @@ class DefaultPromptField(TemplateField): advanced: bool = False multiline: bool = True - input_types: list[str] = ["Document", "Record", "Text"] + input_types: list[str] = ["Document", "Message", "Record", "Text"] value: str = "" # Set the value to empty string diff --git a/src/backend/base/langflow/utils/schemas.py b/src/backend/base/langflow/utils/schemas.py index fbbec2429..647941f59 100644 --- a/src/backend/base/langflow/utils/schemas.py +++ b/src/backend/base/langflow/utils/schemas.py @@ -2,7 +2,18 @@ import enum from typing import Dict, List, Optional, Union from langchain_core.messages import BaseMessage -from pydantic import BaseModel, model_validator +from pydantic import BaseModel, field_validator, model_validator +from typing_extensions import TypedDict + +from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES + + +class File(TypedDict): + """File schema.""" + + path: str + name: str + type: str class ChatOutputResponse(BaseModel): @@ -14,6 +25,47 @@ class ChatOutputResponse(BaseModel): session_id: Optional[str] = None stream_url: Optional[str] = None component_id: Optional[str] = None + files: List[File] = [] + type: str + + @field_validator("files", mode="before") + def validate_files(cls, files): + """Validate files.""" + if not files: + return files + + for file in files: + if not isinstance(file, dict): + raise ValueError("Files must be a list of dictionaries.") + + if not all(key in file for key in ["path", "name", "type"]): + # If any of the keys are missing, we should extract the + # values from the file path + path = file.get("path") + if not path: + raise ValueError("File path is required.") + + name = file.get("name") + if not name: + name = path.split("/")[-1] + file["name"] = name + _type = file.get("type") + if not _type: + # get the file type from the path + extension = path.split(".")[-1] + file_types = set(TEXT_FILE_TYPES + IMG_FILE_TYPES) + if extension and extension in file_types: + _type = extension + else: + for file_type in file_types: + if file_type in path: + _type = file_type + break + if not _type: + raise ValueError("File type is required.") + file["type"] = _type + + return files @classmethod def from_message( diff --git a/src/backend/base/langflow/utils/util.py b/src/backend/base/langflow/utils/util.py index bc7efc161..89b44bd0e 100644 --- a/src/backend/base/langflow/utils/util.py +++ b/src/backend/base/langflow/utils/util.py @@ -7,8 +7,7 @@ from typing import Any, Dict, List, Optional, Union from docstring_parser import parse - -from langflow.schema.schema import Record +from langflow.schema import Record from langflow.services.deps import get_settings_service from langflow.template.frontend_node.constants import FORCE_SHOW_FIELDS from langflow.utils import constants diff --git a/src/backend/base/langflow/utils/validate.py b/src/backend/base/langflow/utils/validate.py index 0871dbd82..c3bbc9df8 100644 --- a/src/backend/base/langflow/utils/validate.py +++ b/src/backend/base/langflow/utils/validate.py @@ -159,7 +159,10 @@ def create_class(code, class_name): # Replace from langflow import CustomComponent with from langflow.custom import CustomComponent code = code.replace("from langflow import CustomComponent", "from langflow.custom import CustomComponent") - + code = code.replace( + "from langflow.interface.custom.custom_component import CustomComponent", + "from langflow.custom import CustomComponent", + ) module = ast.parse(code) exec_globals = prepare_global_scope(code, module) diff --git a/src/backend/base/poetry.lock b/src/backend/base/poetry.lock index d54969e68..75fbdcd6a 100644 --- a/src/backend/base/poetry.lock +++ b/src/backend/base/poetry.lock @@ -1159,13 +1159,13 @@ files = [ [[package]] name = "langchain" -version = "0.2.2" +version = "0.2.3" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain-0.2.2-py3-none-any.whl", hash = "sha256:58ca0c47bcdd156da66f50a0a4fcedc49bf6950827f4a6b06c8c4842d55805f3"}, - {file = "langchain-0.2.2.tar.gz", hash = "sha256:9d61e50e9cdc2bea659bc5e6c03650ba048fda63a307490ae368e539f61a0d3a"}, + {file = "langchain-0.2.3-py3-none-any.whl", hash = "sha256:5dc33cd9c8008693d328b7cb698df69073acecc89ad9c2a95f243b3314f8d834"}, + {file = "langchain-0.2.3.tar.gz", hash = "sha256:81962cc72cce6515f7bd71e01542727870789bf8b666c6913d85559080c1a201"}, ] [package.dependencies] @@ -1181,29 +1181,15 @@ requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] -clarifai = ["clarifai (>=9.1.0)"] -cli = ["typer (>=0.9.0,<0.10.0)"] -cohere = ["cohere (>=4,<6)"] -docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] -embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] -javascript = ["esprima (>=4.0.1,<5.0.0)"] -llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] -openai = ["openai (<2)", "tiktoken (>=0.7,<1.0)"] -qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] -text-helpers = ["chardet (>=5.1.0,<6.0.0)"] - [[package]] name = "langchain-community" -version = "0.2.2" +version = "0.2.4" description = "Community contributed LangChain integrations." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_community-0.2.2-py3-none-any.whl", hash = "sha256:470ee16e05f1acacb91a656b6d3c2cbf6fb6a8dcb00a13901cd1353cd29c2bb3"}, - {file = "langchain_community-0.2.2.tar.gz", hash = "sha256:fb09faf4640726a929932056dc55ff120e490aaf2e424fae8ddbb15605195447"}, + {file = "langchain_community-0.2.4-py3-none-any.whl", hash = "sha256:8582e9800f4837660dc297cccd2ee1ddc1d8c440d0fe8b64edb07620f0373b0e"}, + {file = "langchain_community-0.2.4.tar.gz", hash = "sha256:2bb6a1a36b8500a564d25d76469c02457b1a7c3afea6d4a609a47c06b993e3e4"}, ] [package.dependencies] @@ -1218,19 +1204,15 @@ requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -cli = ["typer (>=0.9.0,<0.10.0)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-search-documents (==11.4.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.6,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpathlib (>=0.18,<0.19)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "httpx-sse (>=0.4.0,<0.5.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "oracledb (>=2.2.0,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pyjwt (>=2.8.0,<3.0.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "simsimd (>=4.3.1,<5.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] - [[package]] name = "langchain-core" -version = "0.2.4" +version = "0.2.5" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_core-0.2.4-py3-none-any.whl", hash = "sha256:5212f7ec78a525e88a178ed3aefe2fd7134b03fb92573dfbab9914f1d92d6ec5"}, - {file = "langchain_core-0.2.4.tar.gz", hash = "sha256:82bdcc546eb0341cefcf1f4ecb3e49836fff003903afddda2d1312bb8491ef81"}, + {file = "langchain_core-0.2.5-py3-none-any.whl", hash = "sha256:abe5138f22acff23a079ec538be5268bbf97cf023d51987a0dd474d2a16cae3e"}, + {file = "langchain_core-0.2.5.tar.gz", hash = "sha256:4a5c2f56b22396a63ef4790043660e393adbfa6832b978f023ca996a04b8e752"}, ] [package.dependencies] @@ -1241,9 +1223,6 @@ pydantic = ">=1,<3" PyYAML = ">=5.3" tenacity = ">=8.1.0,<9.0.0" -[package.extras] -extended-testing = ["jinja2 (>=3,<4)"] - [[package]] name = "langchain-experimental" version = "0.0.60" @@ -1296,13 +1275,13 @@ types-requests = ">=2.31.0.2,<3.0.0.0" [[package]] name = "langsmith" -version = "0.1.71" +version = "0.1.75" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.71-py3-none-any.whl", hash = "sha256:a9979de2780442eb24eced31314e49f5ece6f807a0d70740b2c6c39217226794"}, - {file = "langsmith-0.1.71.tar.gz", hash = "sha256:bdb1037a08acf7c19b3969c085df09c1eecb65baca8400b3b76ae871e2c8a97e"}, + {file = "langsmith-0.1.75-py3-none-any.whl", hash = "sha256:d08b08dd6b3fa4da170377f95123d77122ef4c52999d10fff4ae08ff70d07aed"}, + {file = "langsmith-0.1.75.tar.gz", hash = "sha256:61274e144ea94c297dd78ce03e6dfae18459fe9bd8ab5094d61a0c4816561279"}, ] [package.dependencies] @@ -1600,13 +1579,13 @@ files = [ [[package]] name = "marshmallow" -version = "3.21.2" +version = "3.21.3" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ - {file = "marshmallow-3.21.2-py3-none-any.whl", hash = "sha256:70b54a6282f4704d12c0a41599682c5c5450e843b9ec406308653b47c59648a1"}, - {file = "marshmallow-3.21.2.tar.gz", hash = "sha256:82408deadd8b33d56338d2182d455db632c6313aa2af61916672146bb32edc56"}, + {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, + {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, ] [package.dependencies] @@ -2214,13 +2193,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pydantic-settings" -version = "2.3.0" +version = "2.3.1" description = "Settings management using Pydantic" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_settings-2.3.0-py3-none-any.whl", hash = "sha256:26eeed27370a9c5e3f64e4a7d6602573cbedf05ed940f1d5b11c3f178427af7a"}, - {file = "pydantic_settings-2.3.0.tar.gz", hash = "sha256:78db28855a71503cfe47f39500a1dece523c640afd5280edb5c5c9c9cfa534c9"}, + {file = "pydantic_settings-2.3.1-py3-none-any.whl", hash = "sha256:acb2c213140dfff9669f4fe9f8180d43914f51626db28ab2db7308a576cce51a"}, + {file = "pydantic_settings-2.3.1.tar.gz", hash = "sha256:e34bbd649803a6bb3e2f0f58fb0edff1f0c7f556849fda106cc21bcce12c30ab"}, ] [package.dependencies] diff --git a/src/backend/base/pyproject.toml b/src/backend/base/pyproject.toml index 6cf0a4b19..b175fbcf5 100644 --- a/src/backend/base/pyproject.toml +++ b/src/backend/base/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langflow-base" -version = "0.0.56" +version = "0.0.60" description = "A Python package with a built-in web application" authors = ["Langflow "] maintainers = [ diff --git a/src/frontend/package-lock.json b/src/frontend/package-lock.json index 69e621774..c4a1ee671 100644 --- a/src/frontend/package-lock.json +++ b/src/frontend/package-lock.json @@ -26,6 +26,7 @@ "@radix-ui/react-slot": "^1.0.2", "@radix-ui/react-switch": "^1.0.3", "@radix-ui/react-tabs": "^1.0.4", + "@radix-ui/react-toggle": "^1.0.3", "@radix-ui/react-tooltip": "^1.0.6", "@tabler/icons-react": "^2.32.0", "@tailwindcss/forms": "^0.5.6", @@ -40,9 +41,9 @@ "class-variance-authority": "^0.6.1", "clsx": "^1.2.1", "cmdk": "^1.0.0", - "debounce-promise": "^3.1.2", "dompurify": "^3.0.5", "dotenv": "^16.4.5", + "emoji-regex": "^10.3.0", "esbuild": "^0.17.19", "file-saver": "^2.0.5", "framer-motion": "^11.0.6", @@ -51,6 +52,7 @@ "million": "^3.0.6", "moment": "^2.29.4", "openseadragon": "^4.1.1", + "p-debounce": "^4.0.0", "playwright": "^1.42.0", "react": "^18.2.21", "react-ace": "^10.1.0", @@ -58,6 +60,7 @@ "react-dom": "^18.2.21", "react-error-boundary": "^4.0.11", "react-hook-form": "^7.51.4", + "react-hotkeys-hook": "^4.5.0", "react-icons": "^5.0.1", "react-laag": "^2.0.5", "react-markdown": "^8.0.7", @@ -1921,12 +1924,12 @@ } }, "node_modules/@playwright/test": { - "version": "1.44.0", - "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.44.0.tgz", - "integrity": "sha512-rNX5lbNidamSUorBhB4XZ9SQTjAqfe5M+p37Z8ic0jPFBMo5iCtQz1kRWkEMg+rYOKSlVycpQmpqjSFq7LXOfg==", + "version": "1.44.1", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.44.1.tgz", + "integrity": "sha512-1hZ4TNvD5z9VuhNJ/walIjvMVvYkZKf71axoF/uiAqpntQJXpG64dlXhoDXE3OczPuTuvjf/M5KWFg5VAVUS3Q==", "dev": true, "dependencies": { - "playwright": "1.44.0" + "playwright": "1.44.1" }, "bin": { "playwright": "cli.js" @@ -2762,6 +2765,31 @@ } } }, + "node_modules/@radix-ui/react-toggle": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.0.3.tgz", + "integrity": "sha512-Pkqg3+Bc98ftZGsl60CLANXQBBQ4W3mTFS9EJvNxKMZ7magklKV69/id1mlAlOFDDfHvlCms0fx8fA4CMKDJHg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-controllable-state": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-tooltip": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.7.tgz", @@ -5671,11 +5699,6 @@ "node": ">=12" } }, - "node_modules/debounce-promise": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/debounce-promise/-/debounce-promise-3.1.2.tgz", - "integrity": "sha512-rZHcgBkbYavBeD9ej6sP56XfG53d51CD4dnaw989YX/nZ/ZJfgRx/9ePKmTNiUiyQvh4mtrMoS3OAWW+yoYtpg==" - }, "node_modules/debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", @@ -5966,14 +5989,14 @@ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" }, "node_modules/electron-to-chromium": { - "version": "1.4.778", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.778.tgz", - "integrity": "sha512-C6q/xcUJf/2yODRxAVCfIk4j3y3LMsD0ehiE2RQNV2cxc8XU62gR6vvYh3+etSUzlgTfil+qDHI1vubpdf0TOA==" + "version": "1.4.780", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.780.tgz", + "integrity": "sha512-NPtACGFe7vunRYzvYqVRhQvsDrTevxpgDKxG/Vcbe0BTNOY+5+/2mOXSw2ls7ToNbE5Bf/+uQbjTxcmwMozpCw==" }, "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.3.0.tgz", + "integrity": "sha512-QpLs9D9v9kArv4lfDEgg1X/gN5XLnf/A6l9cs8SPZLRZR3ZkY9+kwIQTxm+fsSej5UMYGE8fdoaZVIBlqG0XTw==" }, "node_modules/end-of-stream": { "version": "1.4.4", @@ -7596,6 +7619,7 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", "devOptional": true, "dependencies": { "once": "^1.3.0", @@ -10023,6 +10047,15 @@ "node": ">=8" } }, + "node_modules/p-debounce": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-debounce/-/p-debounce-4.0.0.tgz", + "integrity": "sha512-4Ispi9I9qYGO4lueiLDhe4q4iK5ERK8reLsuzH6BPaXn53EGaua8H66PXIFGrW897hwjXp+pVLrm/DLxN0RF0A==", + "license": "MIT", + "engines": { + "node": ">=12" + } + }, "node_modules/p-finally": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", @@ -10272,11 +10305,11 @@ } }, "node_modules/playwright": { - "version": "1.44.0", - "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.44.0.tgz", - "integrity": "sha512-F9b3GUCLQ3Nffrfb6dunPOkE5Mh68tR7zN32L4jCk4FjQamgesGay7/dAAe1WaMEGV04DkdJfcJzjoCKygUaRQ==", + "version": "1.44.1", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.44.1.tgz", + "integrity": "sha512-qr/0UJ5CFAtloI3avF95Y0L1xQo6r3LQArLIg/z/PoGJ6xa+EwzrwO5lpNr/09STxdHuUoP2mvuELJS+hLdtgg==", "dependencies": { - "playwright-core": "1.44.0" + "playwright-core": "1.44.1" }, "bin": { "playwright": "cli.js" @@ -10289,9 +10322,9 @@ } }, "node_modules/playwright-core": { - "version": "1.44.0", - "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.44.0.tgz", - "integrity": "sha512-ZTbkNpFfYcGWohvTTl+xewITm7EOuqIqex0c7dNZ+aXsbrLj0qI8XlGKfPpipjm0Wny/4Lt4CJsWJk1stVS5qQ==", + "version": "1.44.1", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.44.1.tgz", + "integrity": "sha512-wh0JWtYTrhv1+OSsLPgFzGzt67Y7BE/ZS3jEqgGBlp2ppp1ZDj8c+9IARNW4dwf1poq5MgHreEM2KV/GuR4cFA==", "bin": { "playwright-core": "cli.js" }, @@ -10990,6 +11023,15 @@ "react": "^16.8.0 || ^17 || ^18" } }, + "node_modules/react-hotkeys-hook": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/react-hotkeys-hook/-/react-hotkeys-hook-4.5.0.tgz", + "integrity": "sha512-Samb85GSgAWFQNvVt3PS90LPPGSf9mkH/r4au81ZP1yOIFayLC3QAvqTgGtJ8YEDMXtPmaVBs6NgipHO6h4Mug==", + "peerDependencies": { + "react": ">=16.8.1", + "react-dom": ">=16.8.1" + } + }, "node_modules/react-icons": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/react-icons/-/react-icons-5.2.1.tgz", @@ -12178,6 +12220,16 @@ "node": ">=8" } }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/string-width/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, "node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", diff --git a/src/frontend/package.json b/src/frontend/package.json index 84feb826f..c069b9da1 100644 --- a/src/frontend/package.json +++ b/src/frontend/package.json @@ -6,6 +6,7 @@ "@headlessui/react": "^1.7.17", "@hookform/resolvers": "^3.3.4", "@million/lint": "^0.0.73", + "react-hotkeys-hook": "^4.5.0", "@radix-ui/react-accordion": "^1.1.2", "@radix-ui/react-checkbox": "^1.0.4", "@radix-ui/react-dialog": "^1.0.4", @@ -21,6 +22,7 @@ "@radix-ui/react-slot": "^1.0.2", "@radix-ui/react-switch": "^1.0.3", "@radix-ui/react-tabs": "^1.0.4", + "@radix-ui/react-toggle": "^1.0.3", "@radix-ui/react-tooltip": "^1.0.6", "@tabler/icons-react": "^2.32.0", "@tailwindcss/forms": "^0.5.6", @@ -35,9 +37,9 @@ "class-variance-authority": "^0.6.1", "clsx": "^1.2.1", "cmdk": "^1.0.0", - "debounce-promise": "^3.1.2", "dompurify": "^3.0.5", "dotenv": "^16.4.5", + "emoji-regex": "^10.3.0", "esbuild": "^0.17.19", "file-saver": "^2.0.5", "framer-motion": "^11.0.6", @@ -46,6 +48,7 @@ "million": "^3.0.6", "moment": "^2.29.4", "openseadragon": "^4.1.1", + "p-debounce": "^4.0.0", "playwright": "^1.42.0", "react": "^18.2.21", "react-ace": "^10.1.0", diff --git a/src/frontend/playwright.config.ts b/src/frontend/playwright.config.ts index eeb9497ae..5af71db80 100644 --- a/src/frontend/playwright.config.ts +++ b/src/frontend/playwright.config.ts @@ -45,6 +45,9 @@ export default defineConfig({ name: "chromium", use: { ...devices["Desktop Chrome"], + launchOptions: { + // headless: false, + }, contextOptions: { // chromium-specific permissions permissions: ["clipboard-read", "clipboard-write"], @@ -57,6 +60,7 @@ export default defineConfig({ // use: { // ...devices["Desktop Firefox"], // launchOptions: { + // headless: false, // firefoxUserPrefs: { // "dom.events.asyncClipboard.readText": true, // "dom.events.testing.asyncClipboard": true, diff --git a/src/frontend/src/App.css b/src/frontend/src/App.css index a4ff01961..5a97d371e 100644 --- a/src/frontend/src/App.css +++ b/src/frontend/src/App.css @@ -164,3 +164,22 @@ body { .ag-body-vertical-scroll-viewport::-webkit-scrollbar-thumb:hover { background-color: #bbb; } + +/* This CSS is to not apply the border for the column having 'no-border' class */ +.no-border.ag-cell:focus { + border: none !important; + outline: none; +} +.no-border.ag-cell { + border: none !important; + outline: none; +} + +/* selected */ +.react-flow__edge.selected .react-flow__edge-path { + stroke: var(--selected) !important; +} + +.react-flow__edge .react-flow__edge-path { + stroke: var(--connection) !important; +} diff --git a/src/frontend/src/App.tsx b/src/frontend/src/App.tsx index b9e02a27d..4a60f453f 100644 --- a/src/frontend/src/App.tsx +++ b/src/frontend/src/App.tsx @@ -1,4 +1,3 @@ -import axios from "axios"; import { useContext, useEffect, useState } from "react"; import { ErrorBoundary } from "react-error-boundary"; import { useNavigate } from "react-router-dom"; diff --git a/src/frontend/src/CustomNodes/GenericNode/components/outputModal/components/switchOutputView/components/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/outputModal/components/switchOutputView/components/index.tsx new file mode 100644 index 000000000..bd28aad11 --- /dev/null +++ b/src/frontend/src/CustomNodes/GenericNode/components/outputModal/components/switchOutputView/components/index.tsx @@ -0,0 +1,12 @@ +import { Textarea } from "../../../../../../../components/ui/textarea"; + +export default function ErrorOutput({ value }: { value: string }) { + return ( + + ); +} diff --git a/src/frontend/src/CustomNodes/GenericNode/components/outputModal/components/switchOutputView/helpers/convert-to-table-rows.ts b/src/frontend/src/CustomNodes/GenericNode/components/outputModal/components/switchOutputView/helpers/convert-to-table-rows.ts new file mode 100644 index 000000000..db61acaa3 --- /dev/null +++ b/src/frontend/src/CustomNodes/GenericNode/components/outputModal/components/switchOutputView/helpers/convert-to-table-rows.ts @@ -0,0 +1,4 @@ +export const convertToTableRows = (obj: Object) => { + const tokensArray = [Object.values(obj)[0]]; + return tokensArray; +}; diff --git a/src/frontend/src/CustomNodes/GenericNode/components/outputModal/components/switchOutputView/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/outputModal/components/switchOutputView/index.tsx new file mode 100644 index 000000000..307c77c1a --- /dev/null +++ b/src/frontend/src/CustomNodes/GenericNode/components/outputModal/components/switchOutputView/index.tsx @@ -0,0 +1,77 @@ +import ForwardedIconComponent from "../../../../../../components/genericIconComponent"; +import RecordsOutputComponent from "../../../../../../components/recordsOutputComponent"; +import { + Alert, + AlertDescription, + AlertTitle, +} from "../../../../../../components/ui/alert"; +import { Case } from "../../../../../../shared/components/caseComponent"; +import TextOutputView from "../../../../../../shared/components/textOutputView"; +import useFlowStore from "../../../../../../stores/flowStore"; +import ErrorOutput from "./components"; + +export default function SwitchOutputView(nodeId): JSX.Element { + const nodeIdentity = nodeId.nodeId; + + const nodes = useFlowStore((state) => state.nodes); + const flowPool = useFlowStore((state) => state.flowPool); + const node = nodes.find((node) => node?.id === nodeIdentity); + + const flowPoolNode = (flowPool[nodeIdentity] ?? [])[ + (flowPool[nodeIdentity]?.length ?? 1) - 1 + ]; + + const results = flowPoolNode?.data?.logs[0] ?? ""; + const resultType = results?.type; + let resultMessage = results?.message; + const RECORD_TYPES = ["record", "object", "array", "message"]; + if (resultMessage.raw) { + resultMessage = resultMessage.raw; + } + console.log("resultType", results); + return ( + <> + +
NO OUTPUT
+ + + + + + + + + + + ).every((item) => item.data) + ? (resultMessage as Array).map((item) => item.data) + : resultMessage + : [resultMessage] + } + pagination={true} + columnMode="union" + /> + + + +
+ + + {"Streaming is not supported"} + + { + "Use the playground to interact with components that stream data" + } + + +
+
+ + ); +} diff --git a/src/frontend/src/CustomNodes/GenericNode/components/outputModal/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/outputModal/index.tsx new file mode 100644 index 000000000..b3185acb1 --- /dev/null +++ b/src/frontend/src/CustomNodes/GenericNode/components/outputModal/index.tsx @@ -0,0 +1,25 @@ +import { Button } from "../../../../components/ui/button"; +import BaseModal from "../../../../modals/baseModal"; +import SwitchOutputView from "./components/switchOutputView"; + +export default function OutputModal({ open, setOpen, nodeId }): JSX.Element { + return ( + + +
+ Component Output +
+
+ + + + +
+ +
+
+
+ ); +} diff --git a/src/frontend/src/customNodes/genericNode/components/parameterComponent/constants.ts b/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/constants.ts similarity index 100% rename from src/frontend/src/customNodes/genericNode/components/parameterComponent/constants.ts rename to src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/constants.ts diff --git a/src/frontend/src/customNodes/genericNode/components/parameterComponent/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx similarity index 90% rename from src/frontend/src/customNodes/genericNode/components/parameterComponent/index.tsx rename to src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx index 41ab876fe..941c86271 100644 --- a/src/frontend/src/customNodes/genericNode/components/parameterComponent/index.tsx +++ b/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx @@ -22,7 +22,6 @@ import { TOOLTIP_EMPTY, } from "../../../../constants/constants"; import { Case } from "../../../../shared/components/caseComponent"; -import useAlertStore from "../../../../stores/alertStore"; import useFlowStore from "../../../../stores/flowStore"; import useFlowsManagerStore from "../../../../stores/flowsManagerStore"; import { useTypesStore } from "../../../../stores/typesStore"; @@ -45,6 +44,7 @@ import useFetchDataOnMount from "../../../hooks/use-fetch-data-on-mount"; import useHandleOnNewValue from "../../../hooks/use-handle-new-value"; import useHandleNodeClass from "../../../hooks/use-handle-node-class"; import useHandleRefreshButtonPress from "../../../hooks/use-handle-refresh-buttons"; +import OutputModal from "../outputModal"; import TooltipRenderComponent from "../tooltipRenderComponent"; import { TEXT_FIELD_TYPES } from "./constants"; @@ -67,7 +67,6 @@ export default function ParameterComponent({ const ref = useRef(null); const refHtml = useRef(null); const infoHtml = useRef(null); - const setErrorData = useAlertStore((state) => state.setErrorData); const currentFlow = useFlowsManagerStore((state) => state.currentFlow); const nodes = useFlowStore((state) => state.nodes); const edges = useFlowStore((state) => state.edges); @@ -80,6 +79,16 @@ export default function ParameterComponent({ const flow = currentFlow?.data?.nodes ?? null; const groupedEdge = useRef(null); const setFilterEdge = useFlowStore((state) => state.setFilterEdge); + const [openOutputModal, setOpenOutputModal] = useState(false); + const flowPool = useFlowStore((state) => state.flowPool); + + const displayOutputPreview = !!flowPool[data.id]; + + const unknownOutput = !!( + flowPool[data.id] && + flowPool[data.id][flowPool[data.id].length - 1]?.data?.logs[0]?.type === + "unknown" + ); const { handleOnNewValue: handleOnNewValueHook } = useHandleOnNewValue( data, @@ -251,9 +260,40 @@ export default function ParameterComponent({ ) : ( - - {title} - +
+ + {title} + + {!left && ( + + + + )} +
)} {required ? "*" : ""} @@ -392,7 +432,7 @@ export default function ParameterComponent({ }); }} name={name} - data={data.node?.template[name]} + data={data.node?.template[name]!} /> {data.node?.template[name]?.refresh_button && ( @@ -448,8 +488,8 @@ export default function ParameterComponent({ data.node?.template[name]?.real_time_refresh) } > -
-
+
+
@@ -580,6 +619,13 @@ export default function ParameterComponent({ />
+ {openOutputModal && ( + + )}
); diff --git a/src/frontend/src/customNodes/genericNode/components/tooltipRenderComponent/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/tooltipRenderComponent/index.tsx similarity index 100% rename from src/frontend/src/customNodes/genericNode/components/tooltipRenderComponent/index.tsx rename to src/frontend/src/CustomNodes/GenericNode/components/tooltipRenderComponent/index.tsx diff --git a/src/frontend/src/customNodes/genericNode/index.tsx b/src/frontend/src/CustomNodes/GenericNode/index.tsx similarity index 71% rename from src/frontend/src/customNodes/genericNode/index.tsx rename to src/frontend/src/CustomNodes/GenericNode/index.tsx index 89fb6166d..dea103b37 100644 --- a/src/frontend/src/customNodes/genericNode/index.tsx +++ b/src/frontend/src/CustomNodes/GenericNode/index.tsx @@ -1,46 +1,52 @@ -import { cloneDeep } from "lodash"; -import { useCallback, useEffect, useMemo, useState } from "react"; +import emojiRegex from "emoji-regex"; +import { useEffect, useMemo, useState } from "react"; import { NodeToolbar, useUpdateNodeInternals } from "reactflow"; import IconComponent from "../../components/genericIconComponent"; import InputComponent from "../../components/inputComponent"; import ShadTooltip from "../../components/shadTooltipComponent"; import { Button } from "../../components/ui/button"; -import Checkmark from "../../components/ui/checkmark"; -import Loading from "../../components/ui/loading"; import { Textarea } from "../../components/ui/textarea"; -import Xmark from "../../components/ui/xmark"; import { RUN_TIMESTAMP_PREFIX, STATUS_BUILD, STATUS_BUILDING, + TOOLTIP_OUTDATED_NODE, } from "../../constants/constants"; import { BuildStatus } from "../../constants/enums"; +import { countHandlesFn } from "../helpers/count-handles"; +import { getSpecificClassFromBuildStatus } from "../helpers/get-class-from-build-status"; import NodeToolbarComponent from "../../pages/FlowPage/components/nodeToolbarComponent"; import useAlertStore from "../../stores/alertStore"; import { useDarkStore } from "../../stores/darkStore"; import useFlowStore from "../../stores/flowStore"; import useFlowsManagerStore from "../../stores/flowsManagerStore"; import { useTypesStore } from "../../stores/typesStore"; -import { APIClassType } from "../../types/api"; -import { validationStatusType } from "../../types/components"; +import { VertexBuildTypeAPI } from "../../types/api"; import { NodeDataType } from "../../types/flow"; import { handleKeyDown, scapedJSONStringfy } from "../../utils/reactflowUtils"; import { nodeColors, nodeIconsLucide } from "../../utils/styleUtils"; import { classNames, cn } from "../../utils/utils"; +import useCheckCodeValidity from "../hooks/use-check-code-validity"; +import useIconNodeRender from "../hooks/use-icon-render"; +import useIconStatus from "../hooks/use-icons-status"; +import useUpdateNodeCode from "../hooks/use-update-node-code"; +import useUpdateValidationStatus from "../hooks/use-update-validation-status"; +import useValidationStatusString from "../hooks/use-validation-status-string"; import getFieldTitle from "../utils/get-field-title"; import sortFields from "../utils/sort-fields"; import ParameterComponent from "./components/parameterComponent"; +import { postCustomComponent } from "../../controllers/API"; +import { useShortcutsStore } from "../../stores/shortcuts"; export default function GenericNode({ data, - xPos, - yPos, + selected, }: { data: NodeDataType; selected: boolean; - xPos: number; - yPos: number; + xPos?: number; + yPos?: number; }): JSX.Element { const types = useTypesStore((state) => state.types); const templates = useTypesStore((state) => state.templates); @@ -50,7 +56,15 @@ export default function GenericNode({ const setNode = useFlowStore((state) => state.setNode); const updateNodeInternals = useUpdateNodeInternals(); const setErrorData = useAlertStore((state) => state.setErrorData); - const name = nodeIconsLucide[data.type] ? data.type : types[data.type]; + const isDark = useDarkStore((state) => state.dark); + const buildStatus = useFlowStore( + (state) => state.flowBuildStatus[data.id]?.status, + ); + const lastRunTime = useFlowStore( + (state) => state.flowBuildStatus[data.id]?.timestamp, + ); + const takeSnapshot = useFlowsManagerStore((state) => state.takeSnapshot); + const [inputName, setInputName] = useState(false); const [nodeName, setNodeName] = useState(data.node!.display_name); const [inputDescription, setInputDescription] = useState(false); @@ -58,185 +72,25 @@ export default function GenericNode({ data.node?.description!, ); const [isOutdated, setIsOutdated] = useState(false); - const buildStatus = useFlowStore( - (state) => state.flowBuildStatus[data.id]?.status, - ); - const lastRunTime = useFlowStore( - (state) => state.flowBuildStatus[data.id]?.timestamp, - ); const [validationStatus, setValidationStatus] = - useState(null); + useState(null); const [handles, setHandles] = useState(0); - const [validationString, setValidationString] = useState(""); - const takeSnapshot = useFlowsManagerStore((state) => state.takeSnapshot); - - useEffect(() => { - // This one should run only once - // first check if data.type in NATIVE_CATEGORIES - // if not return - if (!data.node?.template?.code?.value) return; - const thisNodeTemplate = templates[data.type]?.template; - // if the template does not have a code key - // return - if (!thisNodeTemplate?.code) return; - const currentCode = thisNodeTemplate.code?.value; - const thisNodesCode = data.node!.template?.code?.value; - const componentsToIgnore = ["Custom Component"]; - if ( - currentCode !== thisNodesCode && - !componentsToIgnore.includes(data.node!.display_name) - ) { - setIsOutdated(true); - } else { - setIsOutdated(false); - } - // template.code can be undefined - }, [data.node?.template?.code?.value]); - - const updateNodeCode = useCallback( - (newNodeClass: APIClassType, code: string, name: string) => { - setNode(data.id, (oldNode) => { - let newNode = cloneDeep(oldNode); - - newNode.data = { - ...newNode.data, - node: newNodeClass, - description: newNodeClass.description ?? data.node!.description, - display_name: newNodeClass.display_name ?? data.node!.display_name, - }; - - newNode.data.node.template[name].value = code; - setIsOutdated(false); - - return newNode; - }); - - updateNodeInternals(data.id); - }, - [data.id, data.node, setNode, setIsOutdated], - ); - - if (!data.node!.template) { - setErrorData({ - title: `Error in component ${data.node!.display_name}`, - list: [ - `The component ${data.node!.display_name} has no template.`, - `Please contact the developer of the component to fix this issue.`, - ], - }); - takeSnapshot(); - deleteNode(data.id); - } - - function countHandles(): void { - let count = Object.keys(data.node!.template) - .filter((templateField) => templateField.charAt(0) !== "_") - .map((templateCamp) => { - const { template } = data.node!; - if (template[templateCamp].input_types) return true; - if (!template[templateCamp].show) return false; - switch (template[templateCamp].type) { - case "str": - case "bool": - case "float": - case "code": - case "prompt": - case "file": - case "int": - return false; - default: - return true; - } - }) - .reduce((total, value) => total + (value ? 1 : 0), 0); - - setHandles(count); - } - useEffect(() => { - countHandles(); - }, [data, data.node]); - - useEffect(() => { - if (!selected) { - setInputName(false); - setInputDescription(false); - } - }, [selected]); - + const iconStatus = useIconStatus(buildStatus, validationStatus); + const [showNode, setShowNode] = useState(data.showNode ?? true); // State for outline color const isBuilding = useFlowStore((state) => state.isBuilding); - // should be empty string if no duration - // else should be `Duration: ${duration}` - const getDurationString = (duration: number | undefined): string => { - if (duration === undefined) { - return ""; - } else { - return `${duration}`; - } - }; - const durationString = getDurationString(validationStatus?.data.duration); + const updateNodeCode = useUpdateNodeCode( + data?.id, + data.node!, + setNode, + setIsOutdated, + updateNodeInternals, + ); - useEffect(() => { - setNodeDescription(data.node!.description); - }, [data.node!.description]); - - useEffect(() => { - setNodeName(data.node!.display_name); - }, [data.node!.display_name]); - - useEffect(() => { - const relevantData = - flowPool[data.id] && flowPool[data.id]?.length > 0 - ? flowPool[data.id][flowPool[data.id].length - 1] - : null; - if (relevantData) { - // Extract validation information from relevantData and update the validationStatus state - setValidationStatus(relevantData); - } else { - setValidationStatus(null); - } - }, [flowPool[data.id], data.id]); - - useEffect(() => { - if (validationStatus?.params) { - // if it is not a string turn it into a string - let newValidationString = validationStatus.params; - if (typeof newValidationString !== "string") { - newValidationString = JSON.stringify(validationStatus.params); - } - - setValidationString(newValidationString); - } - }, [validationStatus, validationStatus?.params]); - - const [showNode, setShowNode] = useState(data.showNode ?? true); - - useEffect(() => { - setShowNode(data.showNode ?? true); - }, [data.showNode]); - - const nameEditable = true; - - const emojiRegex = /\p{Emoji}/u; - const isEmoji = emojiRegex.test(data?.node?.icon!); - - const iconNodeRender = useCallback(() => { - const iconElement = data?.node?.icon; - const iconColor = nodeColors[types[data.type]]; - const iconName = - iconElement || (data.node?.flow ? "group_components" : name); - const iconClassName = `generic-node-icon ${ - !showNode ? " absolute inset-x-6 h-12 w-12 " : "" - }`; - if (iconElement && isEmoji) { - return nodeIconFragment(iconElement); - } else { - return checkNodeIconFragment(iconColor, iconName, iconClassName); - } - }, [data, isEmoji, name, showNode]); + const name = nodeIconsLucide[data.type] ? data.type : types[data.type]; const nodeIconFragment = (icon) => { return {icon}; @@ -252,79 +106,24 @@ export default function GenericNode({ ); }; - const isDark = useDarkStore((state) => state.dark); - const renderIconStatus = ( - buildStatus: BuildStatus | undefined, - validationStatus: validationStatusType | null, - ) => { - if (buildStatus === BuildStatus.BUILDING) { - return ; - } else { - return ( - <> - - {validationStatus && validationStatus.valid ? ( - - ) : validationStatus && - !validationStatus.valid && - buildStatus === BuildStatus.INACTIVE ? ( - - ) : buildStatus === BuildStatus.ERROR || - (validationStatus && !validationStatus.valid) ? ( - - ) : ( - - )} - - ); - } - }; - const getSpecificClassFromBuildStatus = ( - buildStatus: BuildStatus | undefined, - validationStatus: validationStatusType | null, - ) => { - let isInvalid = validationStatus && !validationStatus.valid; - - if (buildStatus === BuildStatus.INACTIVE) { - // INACTIVE should have its own class - return "inactive-status"; - } - if ( - (buildStatus === BuildStatus.BUILT && isInvalid) || - buildStatus === BuildStatus.ERROR - ) { - return isDark ? "built-invalid-status-dark" : "built-invalid-status"; - } else if (buildStatus === BuildStatus.BUILDING) { - return "building-status"; - } else { - return ""; - } + const renderIconStatus = () => { + return ( +
+ {iconStatus} +
+ ); }; const getNodeBorderClassName = ( selected: boolean, showNode: boolean, buildStatus: BuildStatus | undefined, - validationStatus: validationStatusType | null, + validationStatus: VertexBuildTypeAPI | null, ) => { const specificClassFromBuildStatus = getSpecificClassFromBuildStatus( buildStatus, validationStatus, + isDark, ); const baseBorderClass = getBaseBorderClass(selected); @@ -332,12 +131,14 @@ export default function GenericNode({ const names = classNames( baseBorderClass, nodeSizeClass, - "generic-node-div", + "generic-node-div group/node", specificClassFromBuildStatus, ); return names; }; + // const [openWDoubleCLick, setOpenWDoubleCLick] = useState(false); + const getBaseBorderClass = (selected) => { let className = selected ? "border border-ring" : "border"; let frozenClass = selected ? "border-ring-frozen" : "border-frozen"; @@ -347,10 +148,99 @@ export default function GenericNode({ const getNodeSizeClass = (showNode) => showNode ? "w-96 rounded-lg" : "w-26 h-26 rounded-full"; + const nameEditable = true; + const isEmoji = emojiRegex().test(data?.node?.icon!); + + if (!data.node!.template) { + setErrorData({ + title: `Error in component ${data.node!.display_name}`, + list: [ + `The component ${data.node!.display_name} has no template.`, + `Please contact the developer of the component to fix this issue.`, + ], + }); + takeSnapshot(); + deleteNode(data.id); + } + + useCheckCodeValidity(data, templates, setIsOutdated, types); + useValidationStatusString(validationStatus, setValidationString); + useUpdateValidationStatus(data?.id, flowPool, setValidationStatus); + + const iconNodeRender = useIconNodeRender( + data, + types, + nodeColors, + name, + showNode, + isEmoji, + nodeIconFragment, + checkNodeIconFragment, + ); + + function countHandles(): void { + const count = countHandlesFn(data); + setHandles(count); + } + + useEffect(() => { + countHandles(); + }, [data, data.node]); + + useEffect(() => { + if (!selected) { + setInputName(false); + setInputDescription(false); + } + }, [selected]); + + useEffect(() => { + setNodeDescription(data.node!.description); + }, [data.node!.description]); + + useEffect(() => { + setNodeName(data.node!.display_name); + }, [data.node!.display_name]); + + useEffect(() => { + setShowNode(data.showNode ?? true); + }, [data.showNode]); + + const [loadingUpdate, setLoadingUpdate] = useState(false); + + const handleUpdateCode = () => { + setLoadingUpdate(true); + takeSnapshot(); + // to update we must get the code from the templates in useTypesStore + const thisNodeTemplate = templates[data.type]?.template; + // if the template does not have a code key + // return + if (!thisNodeTemplate?.code) return; + + const currentCode = thisNodeTemplate.code.value; + if (data.node) { + postCustomComponent(currentCode, data.node) + .then((apiReturn) => { + const { data } = apiReturn; + if (data && updateNodeCode) { + updateNodeCode(data, currentCode, "code"); + setLoadingUpdate(false); + } + }) + .catch((err) => { + console.log(err); + }); + } + }; + + const shortcuts = useShortcutsStore((state) => state.shortcuts); + const memoizedNodeToolbarComponent = useMemo(() => { return ( { takeSnapshot(); @@ -367,8 +257,6 @@ export default function GenericNode({ showNode={showNode} openAdvancedModal={false} onCloseAdvancedModal={() => {}} - updateNodeCode={updateNodeCode} - isOutdated={isOutdated} selected={selected} /> @@ -384,11 +272,18 @@ export default function GenericNode({ updateNodeCode, isOutdated, selected, + shortcuts, + // openWDoubleCLick, + // setOpenWDoubleCLick, ]); return ( <> {memoizedNodeToolbarComponent}
{ + // if (!isWrappedWithClass(event, "nodoubleclick")) + // setOpenWDoubleCLick(true); + // }} className={getNodeBorderClassName( selected, showNode, @@ -462,7 +357,7 @@ export default function GenericNode({ event.preventDefault(); }} data-testid={"title-" + data.node?.display_name} - className="generic-node-tooltip-div cursor-text text-primary" + className="nodoubleclick generic-node-tooltip-div cursor-text text-primary" > {data.node?.display_name}
@@ -582,67 +477,73 @@ export default function GenericNode({ )}
{showNode && ( - {STATUS_BUILDING} - ) : !validationStatus ? ( - {STATUS_BUILD} - ) : ( -
-
- {lastRunTime && ( + <> +
+ {isOutdated && ( + + + + )} + {STATUS_BUILDING} + ) : !validationStatus ? ( + {STATUS_BUILD} + ) : ( +
+
+ {lastRunTime && ( +
+
{RUN_TIMESTAMP_PREFIX}
+
+ {lastRunTime} +
+
+ )} +
-
{RUN_TIMESTAMP_PREFIX}
+
Duration:
- {lastRunTime} + {validationStatus?.data.duration}
- )} -
-
-
Duration:
-
- {validationStatus?.data.duration}
-
-
- - Output - -
- {validationString.split("\n").map((line, index) => ( -
- {line} -
- ))} -
-
- ) - } - side="bottom" - > - - + + +
+ )}
@@ -704,7 +605,7 @@ export default function GenericNode({ ) : (
templateField.charAt(0) !== "_") + .map((templateCamp) => { + const { template } = data.node!; + if (template[templateCamp].input_types) return true; + if (!template[templateCamp].show) return false; + switch (template[templateCamp].type) { + case "str": + case "bool": + case "float": + case "code": + case "prompt": + case "file": + case "int": + return false; + default: + return true; + } + }) + .reduce((total, value) => total + (value ? 1 : 0), 0); + + return count; +} diff --git a/src/frontend/src/CustomNodes/helpers/get-class-from-build-status.ts b/src/frontend/src/CustomNodes/helpers/get-class-from-build-status.ts new file mode 100644 index 000000000..710e91d15 --- /dev/null +++ b/src/frontend/src/CustomNodes/helpers/get-class-from-build-status.ts @@ -0,0 +1,25 @@ +import { BuildStatus } from "../../constants/enums"; +import { VertexBuildTypeAPI } from "../../types/api"; + +export const getSpecificClassFromBuildStatus = ( + buildStatus: BuildStatus | undefined, + validationStatus: VertexBuildTypeAPI | null, + isDark: boolean, +) => { + let isInvalid = validationStatus && !validationStatus.valid; + + if (buildStatus === BuildStatus.INACTIVE) { + // INACTIVE should have its own class + return "inactive-status"; + } + if ( + (buildStatus === BuildStatus.BUILT && isInvalid) || + buildStatus === BuildStatus.ERROR + ) { + return isDark ? "built-invalid-status-dark" : "built-invalid-status"; + } else if (buildStatus === BuildStatus.BUILDING) { + return "building-status"; + } else { + return ""; + } +}; diff --git a/src/frontend/src/CustomNodes/hooks/use-check-code-validity.tsx b/src/frontend/src/CustomNodes/hooks/use-check-code-validity.tsx new file mode 100644 index 000000000..ec4d586f6 --- /dev/null +++ b/src/frontend/src/CustomNodes/hooks/use-check-code-validity.tsx @@ -0,0 +1,40 @@ +import { useEffect } from "react"; +import { NATIVE_CATEGORIES } from "../../constants/constants"; +import { NodeDataType } from "../../types/flow"; + +const useCheckCodeValidity = ( + data: NodeDataType, + templates: { [key: string]: any }, + setIsOutdated: (value: boolean) => void, + types, +) => { + useEffect(() => { + // This one should run only once + // first check if data.type in NATIVE_CATEGORIES + // if not return + if ( + !NATIVE_CATEGORIES.includes(types[data.type]) || + !data.node?.template?.code?.value + ) + return; + const thisNodeTemplate = templates[data.type].template; + // if the template does not have a code key + // return + if (!thisNodeTemplate.code) return; + const currentCode = thisNodeTemplate.code?.value; + const thisNodesCode = data.node!.template?.code?.value; + const componentsToIgnore = ["CustomComponent", "Prompt"]; + if ( + currentCode !== thisNodesCode && + !componentsToIgnore.includes(data.type) && + !(data.node?.edited ?? false) + ) { + setIsOutdated(true); + } else { + setIsOutdated(false); + } + // template.code can be undefined + }, [data.node?.template?.code?.value, templates, setIsOutdated]); +}; + +export default useCheckCodeValidity; diff --git a/src/frontend/src/customNodes/hooks/use-fetch-data-on-mount.tsx b/src/frontend/src/CustomNodes/hooks/use-fetch-data-on-mount.tsx similarity index 100% rename from src/frontend/src/customNodes/hooks/use-fetch-data-on-mount.tsx rename to src/frontend/src/CustomNodes/hooks/use-fetch-data-on-mount.tsx diff --git a/src/frontend/src/customNodes/hooks/use-handle-new-value.tsx b/src/frontend/src/CustomNodes/hooks/use-handle-new-value.tsx similarity index 100% rename from src/frontend/src/customNodes/hooks/use-handle-new-value.tsx rename to src/frontend/src/CustomNodes/hooks/use-handle-new-value.tsx diff --git a/src/frontend/src/customNodes/hooks/use-handle-node-class.tsx b/src/frontend/src/CustomNodes/hooks/use-handle-node-class.tsx similarity index 100% rename from src/frontend/src/customNodes/hooks/use-handle-node-class.tsx rename to src/frontend/src/CustomNodes/hooks/use-handle-node-class.tsx diff --git a/src/frontend/src/customNodes/hooks/use-handle-refresh-buttons.tsx b/src/frontend/src/CustomNodes/hooks/use-handle-refresh-buttons.tsx similarity index 100% rename from src/frontend/src/customNodes/hooks/use-handle-refresh-buttons.tsx rename to src/frontend/src/CustomNodes/hooks/use-handle-refresh-buttons.tsx diff --git a/src/frontend/src/CustomNodes/hooks/use-icon-render.tsx b/src/frontend/src/CustomNodes/hooks/use-icon-render.tsx new file mode 100644 index 000000000..181b4f515 --- /dev/null +++ b/src/frontend/src/CustomNodes/hooks/use-icon-render.tsx @@ -0,0 +1,45 @@ +import { useCallback } from "react"; +import { NodeDataType } from "../../types/flow"; + +const useIconNodeRender = ( + data: NodeDataType, + types: { [key: string]: string }, + nodeColors: { [key: string]: string }, + name: string, + showNode: boolean, + isEmoji: boolean, + nodeIconFragment: (iconElement: string) => JSX.Element, + checkNodeIconFragment: ( + iconColor: string, + iconName: string, + iconClassName: string, + ) => JSX.Element, +) => { + const iconNodeRender = useCallback(() => { + const iconElement = data?.node?.icon; + const iconColor = nodeColors[types[data.type]]; + const iconName = + iconElement || (data.node?.flow ? "group_components" : name); + const iconClassName = `generic-node-icon ${ + !showNode ? " absolute inset-x-6 h-12 w-12 " : "" + }`; + if (iconElement && isEmoji) { + return nodeIconFragment(iconElement); + } else { + return checkNodeIconFragment(iconColor, iconName, iconClassName); + } + }, [ + data, + types, + nodeColors, + name, + showNode, + isEmoji, + nodeIconFragment, + checkNodeIconFragment, + ]); + + return iconNodeRender; +}; + +export default useIconNodeRender; diff --git a/src/frontend/src/CustomNodes/hooks/use-icons-status.tsx b/src/frontend/src/CustomNodes/hooks/use-icons-status.tsx new file mode 100644 index 000000000..ec378fac2 --- /dev/null +++ b/src/frontend/src/CustomNodes/hooks/use-icons-status.tsx @@ -0,0 +1,54 @@ +import IconComponent from "../../components/genericIconComponent"; +import Checkmark from "../../components/ui/checkmark"; +import Loading from "../../components/ui/loading"; +import Xmark from "../../components/ui/xmark"; +import { BuildStatus } from "../../constants/enums"; +import { VertexBuildTypeAPI } from "../../types/api"; + +const useIconStatus = ( + buildStatus: BuildStatus | undefined, + validationStatus: VertexBuildTypeAPI | null, +) => { + const renderIconStatus = () => { + if (buildStatus === BuildStatus.BUILDING) { + return ; + } else { + return ( + <> + + {validationStatus && validationStatus.valid ? ( + + ) : validationStatus && + !validationStatus.valid && + buildStatus === BuildStatus.INACTIVE ? ( + + ) : buildStatus === BuildStatus.ERROR || + (validationStatus && !validationStatus.valid) ? ( + + ) : ( + + )} + + ); + } + }; + + return renderIconStatus(); +}; + +export default useIconStatus; diff --git a/src/frontend/src/CustomNodes/hooks/use-update-node-code.tsx b/src/frontend/src/CustomNodes/hooks/use-update-node-code.tsx new file mode 100644 index 000000000..f1593597f --- /dev/null +++ b/src/frontend/src/CustomNodes/hooks/use-update-node-code.tsx @@ -0,0 +1,39 @@ +import { cloneDeep } from "lodash"; // or any other deep cloning library you prefer +import { useCallback } from "react"; +import { APIClassType } from "../../types/api"; + +const useUpdateNodeCode = ( + dataId: string, + dataNode: APIClassType, // Define YourNodeType according to your data structure + setNode: (id: string, callback: (oldNode) => any) => void, + setIsOutdated: (value: boolean) => void, + updateNodeInternals: (id: string) => void, +) => { + const updateNodeCode = useCallback( + (newNodeClass: APIClassType, code: string, name: string) => { + setNode(dataId, (oldNode) => { + let newNode = cloneDeep(oldNode); + + newNode.data = { + ...newNode.data, + node: newNodeClass, + description: newNodeClass.description ?? dataNode.description, + display_name: newNodeClass.display_name ?? dataNode.display_name, + edited: false, + }; + + newNode.data.node.template[name].value = code; + setIsOutdated(false); + + return newNode; + }); + + updateNodeInternals(dataId); + }, + [dataId, dataNode, setNode, setIsOutdated, updateNodeInternals], + ); + + return updateNodeCode; +}; + +export default useUpdateNodeCode; diff --git a/src/frontend/src/CustomNodes/hooks/use-update-validation-status.tsx b/src/frontend/src/CustomNodes/hooks/use-update-validation-status.tsx new file mode 100644 index 000000000..2a7153dfb --- /dev/null +++ b/src/frontend/src/CustomNodes/hooks/use-update-validation-status.tsx @@ -0,0 +1,18 @@ +import { useEffect } from "react"; + +const useUpdateValidationStatus = (dataId, flowPool, setValidationStatus) => { + useEffect(() => { + const relevantData = + flowPool[dataId] && flowPool[dataId]?.length > 0 + ? flowPool[dataId][flowPool[dataId].length - 1] + : null; + if (relevantData) { + // Extract validation information from relevantData and update the validationStatus state + setValidationStatus(relevantData); + } else { + setValidationStatus(null); + } + }, [flowPool[dataId], dataId, setValidationStatus]); +}; + +export default useUpdateValidationStatus; diff --git a/src/frontend/src/CustomNodes/hooks/use-validation-status-string.tsx b/src/frontend/src/CustomNodes/hooks/use-validation-status-string.tsx new file mode 100644 index 000000000..acc4a1190 --- /dev/null +++ b/src/frontend/src/CustomNodes/hooks/use-validation-status-string.tsx @@ -0,0 +1,22 @@ +import { useEffect } from "react"; + +const useValidationStatusString = (validationStatus, setValidationString) => { + useEffect(() => { + if (validationStatus?.data.logs) { + // if it is not a string turn it into a string + let newValidationString = ""; + if (Array.isArray(validationStatus.data.logs)) { + newValidationString = validationStatus.data.logs + .map((log) => (log?.message ? log.message : JSON.stringify(log))) + .join("\n"); + } + if (typeof newValidationString !== "string") { + newValidationString = JSON.stringify(validationStatus.data.logs); + } + + setValidationString(newValidationString); + } + }, [validationStatus, validationStatus?.data.logs, setValidationString]); +}; + +export default useValidationStatusString; diff --git a/src/frontend/src/customNodes/utils/get-field-title.tsx b/src/frontend/src/CustomNodes/utils/get-field-title.tsx similarity index 100% rename from src/frontend/src/customNodes/utils/get-field-title.tsx rename to src/frontend/src/CustomNodes/utils/get-field-title.tsx diff --git a/src/frontend/src/customNodes/utils/sort-fields.tsx b/src/frontend/src/CustomNodes/utils/sort-fields.tsx similarity index 100% rename from src/frontend/src/customNodes/utils/sort-fields.tsx rename to src/frontend/src/CustomNodes/utils/sort-fields.tsx diff --git a/src/frontend/src/alerts/error/index.tsx b/src/frontend/src/alerts/error/index.tsx index 3690590b9..b70a5ae45 100644 --- a/src/frontend/src/alerts/error/index.tsx +++ b/src/frontend/src/alerts/error/index.tsx @@ -51,13 +51,15 @@ export default function ErrorAlert({ />
-

{title}

+

{title}

{list?.length !== 0 && list?.some((item) => item !== null && item !== undefined) ? (
    {list.map((item, index) => ( -
  • {item}
  • +
  • + {item} +
  • ))}
diff --git a/src/frontend/src/alerts/notice/index.tsx b/src/frontend/src/alerts/notice/index.tsx index fb29954ea..dcb034691 100644 --- a/src/frontend/src/alerts/notice/index.tsx +++ b/src/frontend/src/alerts/notice/index.tsx @@ -47,7 +47,7 @@ export default function NoticeAlert({ />
-

+

{title}

diff --git a/src/frontend/src/alerts/success/index.tsx b/src/frontend/src/alerts/success/index.tsx index db62c8432..270ae5515 100644 --- a/src/frontend/src/alerts/success/index.tsx +++ b/src/frontend/src/alerts/success/index.tsx @@ -45,7 +45,7 @@ export default function SuccessAlert({ />

-

{title}

+

{title}

diff --git a/src/frontend/src/assets/profile-circle.png b/src/frontend/src/assets/profile-circle.png new file mode 100644 index 000000000..e63f97c59 Binary files /dev/null and b/src/frontend/src/assets/profile-circle.png differ diff --git a/src/frontend/src/components/accordionComponent/index.tsx b/src/frontend/src/components/accordionComponent/index.tsx index 7c5562e7f..c9c21b8b2 100644 --- a/src/frontend/src/components/accordionComponent/index.tsx +++ b/src/frontend/src/components/accordionComponent/index.tsx @@ -7,7 +7,6 @@ import { } from "../../components/ui/accordion"; import { AccordionComponentType } from "../../types/components"; import { cn } from "../../utils/utils"; -import ShadTooltip from "../shadTooltipComponent"; export default function AccordionComponent({ trigger, diff --git a/src/frontend/src/components/arrayReaderComponent/index.tsx b/src/frontend/src/components/arrayReaderComponent/index.tsx index bcbfde010..6e151d426 100644 --- a/src/frontend/src/components/arrayReaderComponent/index.tsx +++ b/src/frontend/src/components/arrayReaderComponent/index.tsx @@ -1,10 +1,12 @@ +import TableAutoCellRender from "../tableComponent/components/tableAutoCellRender"; + export default function ArrayReader({ array }: { array: any[] }): JSX.Element { //TODO check array type return (
    {array.map((item, index) => ( -
  • {item}
  • +
  • {}
  • ))}
diff --git a/src/frontend/src/components/cardComponent/index.tsx b/src/frontend/src/components/cardComponent/index.tsx index 09b8ff833..d169a598a 100644 --- a/src/frontend/src/components/cardComponent/index.tsx +++ b/src/frontend/src/components/cardComponent/index.tsx @@ -27,8 +27,8 @@ import { import { Checkbox } from "../ui/checkbox"; import { FormControl, FormField } from "../ui/form"; import Loading from "../ui/loading"; -import { convertTestName } from "./utils/convert-test-name"; import DragCardComponent from "./components/dragCardComponent"; +import { convertTestName } from "./utils/convert-test-name"; export default function CollectionCardComponent({ data, @@ -217,7 +217,7 @@ export default function CollectionCardComponent({ data-testid={`card-${convertTestName(data.name)}`} //TODO check color schema className={cn( - "group relative flex min-h-[11rem] flex-col justify-between overflow-hidden transition-all hover:bg-muted/50 hover:shadow-md hover:dark:bg-[#ffffff10]", + "group relative flex h-[11rem] flex-col justify-between overflow-hidden hover:bg-muted/50 hover:shadow-md hover:dark:bg-[#5f5f5f0e]", disabled ? "pointer-events-none opacity-50" : "", onClick ? "cursor-pointer" : "", isSelectedCard ? "border border-selected" : "", diff --git a/src/frontend/src/components/chatComponent/index.tsx b/src/frontend/src/components/chatComponent/index.tsx index 81dade485..7fad5a882 100644 --- a/src/frontend/src/components/chatComponent/index.tsx +++ b/src/frontend/src/components/chatComponent/index.tsx @@ -1,40 +1,53 @@ import { Transition } from "@headlessui/react"; +import { useHotkeys } from "react-hotkeys-hook"; import { useEffect, useMemo, useRef, useState } from "react"; import IOModal from "../../modals/IOModal"; import ApiModal from "../../modals/apiModal/views"; import ShareModal from "../../modals/shareModal"; import useFlowStore from "../../stores/flowStore"; import useFlowsManagerStore from "../../stores/flowsManagerStore"; +import { useShortcutsStore } from "../../stores/shortcuts"; import { useStoreStore } from "../../stores/storeStore"; -import { classNames } from "../../utils/utils"; +import { classNames, isThereModal } from "../../utils/utils"; import ForwardedIconComponent from "../genericIconComponent"; import { Separator } from "../ui/separator"; export default function FlowToolbar(): JSX.Element { - const [open, setOpen] = useState(false); + const preventDefault = true; + const [open, setOpen] = useState(false); + const [openCodeModal, setOpenCodeModal] = useState(false); + const [openShareModal, setOpenShareModal] = useState(false); + function handleAPIWShortcut(e: KeyboardEvent) { + if (isThereModal() && !openCodeModal) return; + setOpenCodeModal((oldOpen) => !oldOpen); + } + + function handleChatWShortcut(e: KeyboardEvent) { + if (isThereModal() && !open) return; + if (useFlowStore.getState().hasIO) { + setOpen((oldState) => !oldState); + } + } + + function handleShareWShortcut(e: KeyboardEvent) { + if (isThereModal() && !openShareModal) return; + setOpenShareModal((oldState) => !oldState); + } + + const openPlayground = useShortcutsStore((state) => state.open); + const api = useShortcutsStore((state) => state.api); + const flow = useShortcutsStore((state) => state.flow); + + useHotkeys(openPlayground, handleChatWShortcut, { preventDefault }); + useHotkeys(api, handleAPIWShortcut, { preventDefault }); + useHotkeys(flow, handleShareWShortcut, { preventDefault }); + const hasIO = useFlowStore((state) => state.hasIO); const hasStore = useStoreStore((state) => state.hasStore); const validApiKey = useStoreStore((state) => state.validApiKey); const hasApiKey = useStoreStore((state) => state.hasApiKey); const currentFlow = useFlowsManagerStore((state) => state.currentFlow); - useEffect(() => { - const handleKeyDown = (event: KeyboardEvent) => { - if ( - (event.key === "K" || event.key === "k") && - (event.metaKey || event.ctrlKey) && - useFlowStore.getState().hasIO - ) { - event.preventDefault(); - setOpen((oldState) => !oldState); - } - }; - document.addEventListener("keydown", handleKeyDown); - return () => { - document.removeEventListener("keydown", handleKeyDown); - }; - }, []); - const prevNodesRef = useRef(); const ModalMemo = useMemo( @@ -43,6 +56,8 @@ export default function FlowToolbar(): JSX.Element { is_component={false} component={currentFlow!} disabled={!hasApiKey || !validApiKey || !hasStore} + open={openShareModal} + setOpen={setOpenShareModal} > + )} @@ -186,20 +191,45 @@ export default function Header(): JSX.Element { - + {!autoLogin && ( + <> + +
+ + + {userData?.username ?? "User"} +
+
+ + + )} General ({ ...col, resizable: idx !== columns.length - 1, @@ -22,6 +25,7 @@ function RecordsOutputComponent({ return ( + {isMac ? ( + + ) : ( + filteredShortcut[0] + )} + + {filteredShortcut.map((key, idx) => { + if (idx > 0) { + return {key.toUpperCase()} ; + } + })} + + ) : ( + <> + {shortcutWPlus[0].toLowerCase() === "space" ? ( + "Space" + ) : shortcutWPlus[0].length <= 1 ? ( + shortcutWPlus[0] + ) : isMac ? ( + + ) : ( + shortcutWPlus[0] + )} + {shortcutWPlus.map((key, idx) => { + if (idx > 0) { + return {key.toUpperCase()} ; + } + })} + + ); +} diff --git a/src/frontend/src/components/sidebarComponent/components/sideBarButtons/index.tsx b/src/frontend/src/components/sidebarComponent/components/sideBarButtons/index.tsx index 10f3cd54d..5b382bd15 100644 --- a/src/frontend/src/components/sidebarComponent/components/sideBarButtons/index.tsx +++ b/src/frontend/src/components/sidebarComponent/components/sideBarButtons/index.tsx @@ -1,7 +1,6 @@ import { Link } from "react-router-dom"; import { cn } from "../../../../utils/utils"; import { buttonVariants } from "../../../ui/button"; -import ForwardedIconComponent from "../../../genericIconComponent"; type SideBarButtonsComponentProps = { items: { diff --git a/src/frontend/src/components/sidebarComponent/components/sideBarFolderButtons/index.tsx b/src/frontend/src/components/sidebarComponent/components/sideBarFolderButtons/index.tsx index e1c39b285..d43f753c5 100644 --- a/src/frontend/src/components/sidebarComponent/components/sideBarFolderButtons/index.tsx +++ b/src/frontend/src/components/sidebarComponent/components/sideBarFolderButtons/index.tsx @@ -3,6 +3,7 @@ import { useLocation } from "react-router-dom"; import { FolderType } from "../../../../pages/MainPage/entities"; import { addFolder, updateFolder } from "../../../../pages/MainPage/services"; import { handleDownloadFolderFn } from "../../../../pages/MainPage/utils/handle-download-folder"; +import useAlertStore from "../../../../stores/alertStore"; import useFlowsManagerStore from "../../../../stores/flowsManagerStore"; import { useFolderStore } from "../../../../stores/foldersStore"; import { handleKeyDown } from "../../../../utils/reactflowUtils"; @@ -15,16 +16,13 @@ import { Input } from "../../../ui/input"; import useFileDrop from "../../hooks/use-on-file-drop"; type SideBarFoldersButtonsComponentProps = { - folders: FolderType[]; pathname: string; handleChangeFolder?: (id: string) => void; - handleEditFolder?: (item: FolderType) => void; handleDeleteFolder?: (item: FolderType) => void; }; const SideBarFoldersButtonsComponent = ({ pathname, handleChangeFolder, - handleEditFolder, handleDeleteFolder, }: SideBarFoldersButtonsComponentProps) => { const refInput = useRef(null); @@ -51,6 +49,8 @@ const SideBarFoldersButtonsComponent = ({ const location = useLocation(); const folderId = location?.state?.folderId ?? myCollectionId; const getFolderById = useFolderStore((state) => state.getFolderById); + const setErrorData = useAlertStore((state) => state.setErrorData); + const setSuccessData = useAlertStore((state) => state.setSuccessData); const handleFolderChange = (folderId: string) => { getFolderById(folderId); @@ -62,7 +62,20 @@ const SideBarFoldersButtonsComponent = ({ ); const handleUploadFlowsToFolder = () => { - uploadFolder(folderId); + uploadFolder(folderId) + .then(() => { + getFolderById(folderId); + setSuccessData({ + title: "Uploaded successfully", + }); + }) + .catch((err) => { + console.log(err); + setErrorData({ + title: `Error on upload`, + list: [err["response"]["data"]], + }); + }); }; const handleDownloadFolder = (id: string) => { diff --git a/src/frontend/src/components/sidebarComponent/index.tsx b/src/frontend/src/components/sidebarComponent/index.tsx index b4fbf11b3..efec5da1e 100644 --- a/src/frontend/src/components/sidebarComponent/index.tsx +++ b/src/frontend/src/components/sidebarComponent/index.tsx @@ -5,9 +5,6 @@ import { cn } from "../../utils/utils"; import HorizontalScrollFadeComponent from "../horizontalScrollFadeComponent"; import SideBarButtonsComponent from "./components/sideBarButtons"; import SideBarFoldersButtonsComponent from "./components/sideBarFolderButtons"; -import { addFolder } from "../../pages/MainPage/services"; -import { useNavigate } from "react-router-dom"; -import useFlowStore from "../../stores/flowStore"; type SidebarNavProps = { items: { @@ -15,7 +12,6 @@ type SidebarNavProps = { title: string; icon: React.ReactNode; }[]; - handleOpenNewFolderModal?: () => void; handleChangeFolder?: (id: string) => void; handleEditFolder?: (item: FolderType) => void; handleDeleteFolder?: (item: FolderType) => void; @@ -48,10 +44,8 @@ export default function SidebarNav({ folders?.length > 0 && isFolderPath && ( ) diff --git a/src/frontend/src/components/stringReaderComponent/index.tsx b/src/frontend/src/components/stringReaderComponent/index.tsx index bc24c4510..8b1b0e41f 100644 --- a/src/frontend/src/components/stringReaderComponent/index.tsx +++ b/src/frontend/src/components/stringReaderComponent/index.tsx @@ -3,9 +3,5 @@ export default function StringReader({ }: { string: string; }): JSX.Element { - return ( - - {string} - - ); + return {string}; } diff --git a/src/frontend/src/components/tableComponent/components/ResetColumns/index.tsx b/src/frontend/src/components/tableComponent/components/ResetColumns/index.tsx new file mode 100644 index 000000000..5bd4c15aa --- /dev/null +++ b/src/frontend/src/components/tableComponent/components/ResetColumns/index.tsx @@ -0,0 +1,20 @@ +import { cn } from "../../../../utils/utils"; + +export default function ResetColumns({ + resetGrid, +}: { + resetGrid: () => void; +}): JSX.Element { + return ( +
+ { + resetGrid(); + }} + > + Reset Columns + +
+ ); +} diff --git a/src/frontend/src/components/tableComponent/components/TableOptions/index.tsx b/src/frontend/src/components/tableComponent/components/TableOptions/index.tsx new file mode 100644 index 000000000..7ad2d11f3 --- /dev/null +++ b/src/frontend/src/components/tableComponent/components/TableOptions/index.tsx @@ -0,0 +1,95 @@ +import { cn } from "../../../../utils/utils"; +import ShadTooltip from "../../../shadTooltipComponent"; +import { Button } from "../../../ui/button"; +import IconComponent from "../../../genericIconComponent"; + +export default function TableOptions({ + resetGrid, + duplicateRow, + deleteRow, + hasSelection, + stateChange, +}: { + resetGrid: () => void; + duplicateRow?: () => void; + deleteRow?: () => void; + hasSelection: boolean; + stateChange: boolean; +}): JSX.Element { + return ( +
+
+
+ + + +
+ {duplicateRow && ( +
+ Select items to duplicate + ) : ( + Duplicate selected items + ) + } + > + + +
+ )} + {deleteRow && ( +
+ Select items to delete + ) : ( + Delete selected items + ) + } + > + + +
+ )}{" "} +
+
+ ); +} diff --git a/src/frontend/src/components/tableAutoCellRender/index.tsx b/src/frontend/src/components/tableComponent/components/tableAutoCellRender/index.tsx similarity index 63% rename from src/frontend/src/components/tableAutoCellRender/index.tsx rename to src/frontend/src/components/tableComponent/components/tableAutoCellRender/index.tsx index 5c59f6430..4a6020f78 100644 --- a/src/frontend/src/components/tableAutoCellRender/index.tsx +++ b/src/frontend/src/components/tableComponent/components/tableAutoCellRender/index.tsx @@ -1,30 +1,26 @@ import { CustomCellRendererProps } from "ag-grid-react"; -import { cn, isTimeStampString } from "../../utils/utils"; -import ArrayReader from "../arrayReaderComponent"; -import DateReader from "../dateReaderComponent"; -import NumberReader from "../numberReader"; -import ObjectRender from "../objectRender"; -import StringReader from "../stringReaderComponent"; -import { Badge } from "../ui/badge"; +import { cn, isTimeStampString } from "../../../../utils/utils"; +import ArrayReader from "../../../arrayReaderComponent"; +import DateReader from "../../../dateReaderComponent"; +import NumberReader from "../../../numberReader"; +import ObjectRender from "../../../objectRender"; +import StringReader from "../../../stringReaderComponent"; +import { Badge } from "../../../ui/badge"; export default function TableAutoCellRender({ value, -}: CustomCellRendererProps) { +}: CustomCellRendererProps | { value: any }) { function getCellType() { switch (typeof value) { case "object": if (value === null) { return String(value); } else if (Array.isArray(value)) { - return ; - } else if (value.definitions) { - // use a custom render defined by the sender + return ; } else { return ; } - break; case "string": - console.log(isTimeStampString(value), value); if (isTimeStampString(value)) { return ; } @@ -52,7 +48,7 @@ export default function TableAutoCellRender({ } return ( -
+
{getCellType()}
); diff --git a/src/frontend/src/components/tableNodeCellRender/index.tsx b/src/frontend/src/components/tableComponent/components/tableNodeCellRender/index.tsx similarity index 86% rename from src/frontend/src/components/tableNodeCellRender/index.tsx rename to src/frontend/src/components/tableComponent/components/tableNodeCellRender/index.tsx index 5291b1cce..86db7de9c 100644 --- a/src/frontend/src/components/tableNodeCellRender/index.tsx +++ b/src/frontend/src/components/tableComponent/components/tableNodeCellRender/index.tsx @@ -1,33 +1,26 @@ -import { CustomCellEditorProps, CustomCellRendererProps } from "ag-grid-react"; -import { classNames, cn, isTimeStampString } from "../../utils/utils"; -import ArrayReader from "../arrayReaderComponent"; -import DateReader from "../dateReaderComponent"; -import NumberReader from "../numberReader"; -import ObjectRender from "../objectRender"; -import StringReader from "../stringReaderComponent"; -import { Badge } from "../ui/badge"; +import { CustomCellRendererProps } from "ag-grid-react"; import { cloneDeep } from "lodash"; -import { type } from "os"; +import { useState } from "react"; +import useFlowStore from "../../../../stores/flowStore"; import { convertObjToArray, convertValuesToNumbers, hasDuplicateKeys, scapedJSONStringfy, -} from "../../utils/reactflowUtils"; -import CodeAreaComponent from "../codeAreaComponent"; -import DictComponent from "../dictComponent"; -import Dropdown from "../dropdownComponent"; -import FloatComponent from "../floatComponent"; -import InputFileComponent from "../inputFileComponent"; -import InputGlobalComponent from "../inputGlobalComponent"; -import InputListComponent from "../inputListComponent"; -import IntComponent from "../intComponent"; -import KeypairListComponent from "../keypairListComponent"; -import PromptAreaComponent from "../promptComponent"; -import TextAreaComponent from "../textAreaComponent"; -import ToggleShadComponent from "../toggleShadComponent"; -import { useState } from "react"; -import useFlowStore from "../../stores/flowStore"; +} from "../../../../utils/reactflowUtils"; +import { classNames } from "../../../../utils/utils"; +import CodeAreaComponent from "../../../codeAreaComponent"; +import DictComponent from "../../../dictComponent"; +import Dropdown from "../../../dropdownComponent"; +import FloatComponent from "../../../floatComponent"; +import InputFileComponent from "../../../inputFileComponent"; +import InputGlobalComponent from "../../../inputGlobalComponent"; +import InputListComponent from "../../../inputListComponent"; +import IntComponent from "../../../intComponent"; +import KeypairListComponent from "../../../keypairListComponent"; +import PromptAreaComponent from "../../../promptComponent"; +import TextAreaComponent from "../../../textAreaComponent"; +import ToggleShadComponent from "../../../toggleShadComponent"; export default function TableNodeCellRender({ node: { data }, @@ -266,7 +259,7 @@ export default function TableNodeCellRender({ } return ( -
+
{getCellType()}
); diff --git a/src/frontend/src/components/tableComponent/components/tableToggleCellRender/index.tsx b/src/frontend/src/components/tableComponent/components/tableToggleCellRender/index.tsx new file mode 100644 index 000000000..5fcf1b875 --- /dev/null +++ b/src/frontend/src/components/tableComponent/components/tableToggleCellRender/index.tsx @@ -0,0 +1,24 @@ +import { CustomCellRendererProps } from "ag-grid-react"; +import { useState } from "react"; +import ToggleShadComponent from "../../../toggleShadComponent"; + +export default function TableToggleCellRender({ + value: { name, enabled, setEnabled }, +}: CustomCellRendererProps) { + const [value, setValue] = useState(enabled); + + return ( +
+ { + setValue(e); + setEnabled(e); + }} + size="small" + editNode={true} + /> +
+ ); +} diff --git a/src/frontend/src/components/tableTooltipRender/index.tsx b/src/frontend/src/components/tableComponent/components/tableTooltipRender/index.tsx similarity index 100% rename from src/frontend/src/components/tableTooltipRender/index.tsx rename to src/frontend/src/components/tableComponent/components/tableTooltipRender/index.tsx diff --git a/src/frontend/src/components/tableComponent/index.tsx b/src/frontend/src/components/tableComponent/index.tsx index 6113316be..ee7455e01 100644 --- a/src/frontend/src/components/tableComponent/index.tsx +++ b/src/frontend/src/components/tableComponent/index.tsx @@ -1,22 +1,28 @@ import "ag-grid-community/styles/ag-grid.css"; // Mandatory CSS required by the grid import "ag-grid-community/styles/ag-theme-quartz.css"; // Optional Theme applied to the grid import { AgGridReact, AgGridReactProps } from "ag-grid-react"; -import { ElementRef, forwardRef, useCallback } from "react"; +import { ElementRef, forwardRef, useRef, useState } from "react"; import { DEFAULT_TABLE_ALERT_MSG, DEFAULT_TABLE_ALERT_TITLE, } from "../../constants/constants"; import { useDarkStore } from "../../stores/darkStore"; import "../../style/ag-theme-shadcn.css"; // Custom CSS applied to the grid -import { cn } from "../../utils/utils"; +import { cn, toTitleCase } from "../../utils/utils"; import ForwardedIconComponent from "../genericIconComponent"; import { Alert, AlertDescription, AlertTitle } from "../ui/alert"; +import TableOptions from "./components/TableOptions"; +import { useParams } from "react-router-dom"; +import resetGrid from "./utils/reset-grid-columns"; interface TableComponentProps extends AgGridReactProps { columnDefs: NonNullable; rowData: NonNullable; alertTitle?: string; alertDescription?: string; + editable?: boolean | string[]; + onDelete?: () => void; + onDuplicate?: () => void; } const TableComponent = forwardRef< @@ -31,7 +37,73 @@ const TableComponent = forwardRef< }, ref, ) => { + let colDef = props.columnDefs.map((col, index) => { + let newCol = { + ...col, + headerName: toTitleCase(col.headerName), + }; + if (index === props.columnDefs.length - 1) { + newCol = { + ...newCol, + resizable: false, + }; + } + if (props.onSelectionChanged && index === 0) { + newCol = { + ...newCol, + checkboxSelection: true, + headerCheckboxSelection: true, + headerCheckboxSelectionFilteredOnly: true, + }; + } + if ( + (typeof props.editable === "boolean" && props.editable) || + (Array.isArray(props.editable) && + props.editable.includes(newCol.headerName ?? "")) + ) { + newCol = { + ...newCol, + editable: true, + }; + } + return newCol; + }); + const gridRef = useRef(null); + // @ts-ignore + const realRef: React.MutableRefObject = ref?.current + ? ref + : gridRef; const dark = useDarkStore((state) => state.dark); + const initialColumnDefs = useRef(colDef); + const [columnStateChange, setColumnStateChange] = useState(false); + + const makeLastColumnNonResizable = (columnDefs) => { + columnDefs.forEach((colDef, index) => { + colDef.resizable = index !== columnDefs.length - 1; + }); + return columnDefs; + }; + + const onGridReady = (params) => { + // @ts-ignore + realRef.current = params; + const updatedColumnDefs = makeLastColumnNonResizable([...colDef]); + params.api.setGridOption("columnDefs", updatedColumnDefs); + initialColumnDefs.current = params.api.getColumnDefs(); + if (props.onGridReady) props.onGridReady(params); + setTimeout(() => { + setColumnStateChange(false); + }, 50); + }; + + const onColumnMoved = (params) => { + const updatedColumnDefs = makeLastColumnNonResizable( + params.columnApi.getAllGridColumns().map((col) => col.getColDef()), + ); + params.api.setGridOption("columnDefs", updatedColumnDefs); + if (props.onColumnMoved) props.onColumnMoved(params); + }; + if (props.rowData.length === 0) { return (
@@ -46,21 +118,46 @@ const TableComponent = forwardRef<
); } - return (
{ + console.log(e); + if (e.sources.some((source) => source.includes("column"))) { + setColumnStateChange(true); + } + }} + /> + 0} + duplicateRow={props.onDuplicate ? props.onDuplicate : undefined} + deleteRow={props.onDelete ? props.onDelete : undefined} + resetGrid={() => { + console.log("teste"); + resetGrid(realRef, initialColumnDefs); + setTimeout(() => { + setColumnStateChange(false); + }, 100); }} - ref={ref} />
); diff --git a/src/frontend/src/components/tableComponent/utils/reset-grid-columns.tsx b/src/frontend/src/components/tableComponent/utils/reset-grid-columns.tsx new file mode 100644 index 000000000..6dc841104 --- /dev/null +++ b/src/frontend/src/components/tableComponent/utils/reset-grid-columns.tsx @@ -0,0 +1,12 @@ +export default function resetGrid(ref, initialColumnDefs) { + if (ref?.current && ref?.current.api) { + ref.current.api.resetColumnState(); + if (initialColumnDefs.current) { + const resetColumns = ref.current.api.applyColumnState({ + state: initialColumnDefs.current, + applyOrder: true, + }); + return resetColumns; + } + } +} diff --git a/src/frontend/src/components/ui/button.tsx b/src/frontend/src/components/ui/button.tsx index cbcadf2c3..92d6f41a9 100644 --- a/src/frontend/src/components/ui/button.tsx +++ b/src/frontend/src/components/ui/button.tsx @@ -5,7 +5,7 @@ import { cn } from "../../utils/utils"; import ForwardedIconComponent from "../genericIconComponent"; const buttonVariants = cva( - "inline-flex items-center justify-center gap-2 rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:opacity-50 disabled:pointer-events-none ring-offset-background", + "inline-flex items-center justify-center rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:opacity-50 disabled:pointer-events-none ring-offset-background", { variants: { variant: { @@ -59,6 +59,7 @@ const Button = React.forwardRef( variant, size, loading, + type, disabled, asChild = false, children, @@ -76,6 +77,7 @@ const Button = React.forwardRef( diff --git a/src/frontend/src/components/ui/refreshButton.tsx b/src/frontend/src/components/ui/refreshButton.tsx index b039772d8..a1bdc18e6 100644 --- a/src/frontend/src/components/ui/refreshButton.tsx +++ b/src/frontend/src/components/ui/refreshButton.tsx @@ -31,11 +31,7 @@ function RefreshButton({ // icon class name should take into account the disabled state and the loading state const disabledIconTextClass = disabled ? "text-muted-foreground" : ""; - const iconClassName = cn( - "h-4 w-4", - isLoading ? "animate-spin" : "animate-wiggle", - disabledIconTextClass - ); + const iconClassName = cn("h-4 w-4 animate-wiggle", disabledIconTextClass); return (