This commit is contained in:
cristhianzl 2024-06-03 15:49:51 -03:00
commit 9d033c9e34
157 changed files with 6860 additions and 5525 deletions

View file

@ -77,7 +77,12 @@ runs:
POETRY_VERSION: ${{ inputs.poetry-version }}
PYTHON_VERSION: ${{ inputs.python-version }}
# Install poetry using the python version installed by setup-python step.
run: pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose
run: |
pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose
pipx ensurepath
# Ensure the poetry binary is available in the PATH.
# Test that the poetry binary is available.
poetry --version
- name: Restore pip and poetry cached dependencies
uses: actions/cache@v4

64
.github/workflows/create-release.yml vendored Normal file
View file

@ -0,0 +1,64 @@
name: Create Release
on:
workflow_dispatch:
inputs:
version:
description: "Version to release"
required: true
type: string
release_type:
description: "Type of release (base or main)"
required: true
type: choice
options:
- base
- main
env:
POETRY_VERSION: "1.8.2"
jobs:
release:
name: Build Langflow
runs-on: ubuntu-latest
outputs:
version: ${{ steps.check-version.outputs.version }}
steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: "3.12"
cache: "poetry"
- name: Build project for distribution
run: |
if [ "${{ inputs.release_type }}" == "base" ]; then
make build base=true
else
make build main=true
fi
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: dist${{ inputs.release_type }}
path: ${{ inputs.release_type == 'base' && 'src/backend/base/dist' || 'dist' }}
create_release:
name: Create Release Job
runs-on: ubuntu-latest
needs: release
steps:
- uses: actions/download-artifact@v4
with:
name: dist${{ inputs.release_type }}
path: dist
- name: Create Release Notes
uses: ncipollo/release-action@v1
with:
artifacts: "dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: true
prerelease: true
tag: v${{ inputs.version }}
commit: dev

View file

@ -52,3 +52,30 @@ jobs:
push: true
file: ${{ env.DOCKERFILE }}
tags: ${{ env.TAGS }}
- name: Wait for Docker Hub to propagate
run: sleep 120
restart-space:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.12"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_caching"
with:
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
cache-key: ${{ runner.os }}-poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
- name: Install Python dependencies
run: |
poetry env use ${{ matrix.python-version }}
poetry install
- name: Restart HuggingFace Spaces Build
run: |
poetry run python ./scripts/factory_restart_space.py
env:
HUGGINGFACE_API_TOKEN: ${{ secrets.HUGGINGFACE_API_TOKEN }}

View file

@ -67,27 +67,30 @@ jobs:
else
make publish main=true
fi
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: dist${{ inputs.release_type }}
path: ${{ inputs.release_type == 'base' && 'src/backend/base/dist' || 'dist' }}
call_docker_build:
name: Call Docker Build Workflow
runs-on: ubuntu-latest
needs: release
steps:
- uses: actions/checkout@v4
- uses: ./.github/workflows/docker-build.yml
with:
version: ${{ needs.release.outputs.version }}
release_type: ${{ inputs.release_type }}
uses: langflow-ai/langflow/.github/workflows/docker-build.yml@dev
with:
version: ${{ needs.release.outputs.version }}
release_type: ${{ inputs.release_type }}
secrets: inherit
create_release:
name: Create Release
runs-on: ubuntu-latest
needs: [call_docker_build, release]
needs: [release]
if: ${{ inputs.release_type == 'main' }}
steps:
- uses: actions/download-artifact@v4
with:
name: dist
name: dist${{ inputs.release_type }}
path: dist
- name: Create Release
uses: ncipollo/release-action@v1

View file

@ -25,13 +25,6 @@ repos:
args:
- --fix=lf
- id: trailing-whitespace
- id: pretty-format-json
exclude: ^tsconfig.*.json
args:
- --autofix
- --indent=4
- --no-sort-keys
- id: check-merge-conflict
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.4.2

View file

@ -7,6 +7,7 @@ port ?= 7860
env ?= .env
open_browser ?= true
path = src/backend/base/langflow/frontend
workers ?= 1
codespell:
@poetry install --with spelling
@ -144,10 +145,10 @@ backend:
@-kill -9 $(lsof -t -i:7860)
ifdef login
@echo "Running backend autologin is $(login)";
LANGFLOW_AUTO_LOGIN=$(login) poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio
LANGFLOW_AUTO_LOGIN=$(login) poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio --workers $(workers)
else
@echo "Running backend respecting the .env file";
poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio
poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio --workers $(workers)
endif
build_and_run:

111
README.md
View file

@ -1,21 +1,58 @@
<!-- markdownlint-disable MD030 -->
# [![Langflow](https://github.com/langflow-ai/langflow/blob/dev/docs/static/img/hero.png)](https://www.langflow.org)
# [![Langflow](./docs/static/img/hero.png)](https://www.langflow.org)
### [Langflow](https://www.langflow.org) is a new, visual way to build, iterate and deploy AI apps.
<p align="center"><strong>
A visual framework for building multi-agent and RAG applications
</strong></p>
<p align="center" style="font-size: 12px;">
Open-source, Python-powered, fully customizable, LLM and vector store agnostic
</p>
# ⚡️ Documentation and Community
<p align="center" style="font-size: 12px;">
<a href="https://docs.langflow.org" style="text-decoration: underline;">Docs</a> -
<a href="https://discord.com/invite/EqksyE2EX9" style="text-decoration: underline;">Join our Discord</a> -
<a href="https://twitter.com/langflow_ai" style="text-decoration: underline;">Follow us on X</a> -
<a href="https://huggingface.co/spaces/Langflow/Langflow-Preview" style="text-decoration: underline;">Live demo</a>
</p>
- [Documentation](https://docs.langflow.org)
- [Discord](https://discord.com/invite/EqksyE2EX9)
<p align="center">
<a href="https://github.com/langflow-ai/langflow">
<img src="https://img.shields.io/github/stars/langflow-ai/langflow">
</a>
<a href="https://discord.com/invite/EqksyE2EX9">
<img src="https://img.shields.io/discord/1116803230643527710?label=Discord">
</a>
</p>
# 📦 Installation
<p align="center">
<img src="./docs/static/img/langflow_basic_howto.gif" alt="Your GIF" style="border: 3px solid #211C43;">
</p>
# 📝 Content
- [](#)
- [📝 Content](#-content)
- [📦 Get Started](#-get-started)
- [🎨 Create Flows](#-create-flows)
- [Deploy](#deploy)
- [Deploy Langflow on Google Cloud Platform](#deploy-langflow-on-google-cloud-platform)
- [Deploy on Railway](#deploy-on-railway)
- [Deploy on Render](#deploy-on-render)
- [🖥️ Command Line Interface (CLI)](#-command-line-interface-cli)
- [Usage](#usage)
- [Environment Variables](#environment-variables)
- [👋 Contribute](#-contribute)
- [🌟 Contributors](#-contributors)
- [📄 License](#-license)
# 📦 Get Started
You can install Langflow with pip:
```shell
# Make sure you have Python 3.10 installed on your system.
# Install the pre-release version
# Make sure you have >=Python 3.10 installed on your system.
# Install the pre-release version (recommended for the latest updates)
python -m pip install langflow --pre --force-reinstall
# or stable version
@ -28,9 +65,9 @@ Then, run Langflow with:
python -m langflow run
```
You can also preview Langflow in [HuggingFace Spaces](https://huggingface.co/spaces/Langflow/Langflow-Preview). [Clone the space using this link](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true), to create your own Langflow workspace in minutes.
You can also preview Langflow in [HuggingFace Spaces](https://huggingface.co/spaces/Langflow/Langflow-Preview). [Clone the space using this link](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true) to create your own Langflow workspace in minutes.
# 🎨 Creating Flows
# 🎨 Create Flows
Creating flows with Langflow is easy. Simply drag components from the sidebar onto the canvas and connect them to start building your application.
@ -46,6 +83,32 @@ from langflow.load import run_flow_from_json
results = run_flow_from_json("path/to/flow.json", input_value="Hello, World!")
```
# Deploy
## Deploy Langflow on Google Cloud Platform
Follow our step-by-step guide to deploy Langflow on Google Cloud Platform (GCP) using Google Cloud Shell. The guide is available in the [**Langflow in Google Cloud Platform**](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/deployment/gcp-deployment.md) document.
Alternatively, click the **"Open in Cloud Shell"** button below to launch Google Cloud Shell, clone the Langflow repository, and start an **interactive tutorial** that will guide you through the process of setting up the necessary resources and deploying Langflow on your GCP project.
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## Deploy on Railway
Use this template to deploy Langflow 1.0 Preview on Railway:
[![Deploy 1.0 Preview on Railway](https://railway.app/button.svg)](https://railway.app/template/UsJ1uB?referralCode=MnPSdg)
Or this one to deploy Langflow 0.6.x:
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
## Deploy on Render
<a href="https://render.com/deploy?repo=https://github.com/langflow-ai/langflow/tree/dev">
<img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
</a>
# 🖥️ Command Line Interface (CLI)
Langflow provides a command-line interface (CLI) for easy management and configuration.
@ -87,33 +150,7 @@ You can configure many of the CLI options using environment variables. These can
A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence.
# Deployment
## Deploy Langflow on Google Cloud Platform
Follow our step-by-step guide to deploy Langflow on Google Cloud Platform (GCP) using Google Cloud Shell. The guide is available in the [**Langflow in Google Cloud Platform**](GCP_DEPLOYMENT.md) document.
Alternatively, click the **"Open in Cloud Shell"** button below to launch Google Cloud Shell, clone the Langflow repository, and start an **interactive tutorial** that will guide you through the process of setting up the necessary resources and deploying Langflow on your GCP project.
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## Deploy on Railway
Use this template to deploy Langflow 1.0 Preview on Railway:
[![Deploy 1.0 Preview on Railway](https://railway.app/button.svg)](https://railway.app/template/UsJ1uB?referralCode=MnPSdg)
Or this one to deploy Langflow 0.6.x:
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
## Deploy on Render
<a href="https://render.com/deploy?repo=https://github.com/langflow-ai/langflow/tree/main">
<img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render" />
</a>
# 👋 Contributing
# 👋 Contribute
We welcome contributions from developers of all levels to our open-source project on GitHub. If you'd like to contribute, please check our [contributing guidelines](./CONTRIBUTING.md) and help make Langflow more accessible.

View file

@ -4,4 +4,6 @@ node_modules
**/node_modules/
dist/
**/build/
src/backend/langflow/frontend
src/backend/langflow/frontend
**/langflow-pre.db
**/langflow.db

View file

@ -72,16 +72,21 @@ COPY Makefile ./
COPY README.md ./
RUN --mount=type=cache,target=/root/.cache \
curl -sSL https://install.python-poetry.org | python3 -
RUN useradd -m -u 1000 user && \
mkdir -p /app/langflow && \
chown -R user:user /app && \
chmod -R u+w /app/langflow
# Update PATH with home/user/.local/bin
ENV PATH="/home/user/.local/bin:${PATH}"
RUN python -m pip install requests && cd ./scripts && python update_dependencies.py
RUN $POETRY_HOME/bin/poetry lock
RUN $POETRY_HOME/bin/poetry build
# Copy virtual environment and built .tar.gz from builder base
RUN useradd -m -u 1000 user
RUN chown -R user:user /app
USER user
# Install the package from the .tar.gz
RUN python -m pip install /app/dist/*.tar.gz
RUN python -m pip install /app/dist/*.tar.gz --user
ENTRYPOINT ["python", "-m", "langflow", "run"]
CMD ["--host", "0.0.0.0", "--port", "7860"]

View file

@ -78,14 +78,20 @@ RUN cd src/frontend && npm run build
COPY src/backend ./src/backend
RUN cp -r src/frontend/build src/backend/base/langflow/frontend
RUN rm -rf src/backend/base/dist
RUN useradd -m -u 1000 user && \
mkdir -p /app/langflow && \
chown -R user:user /app && \
chmod -R u+w /app/langflow
# Update PATH with home/user/.local/bin
ENV PATH="/home/user/.local/bin:${PATH}"
RUN cd src/backend/base && $POETRY_HOME/bin/poetry build
# Copy virtual environment and built .tar.gz from builder base
RUN useradd -m -u 1000 user
RUN chown -R user:user /app
USER user
# Install the package from the .tar.gz
RUN python -m pip install /app/src/backend/base/dist/*.tar.gz
RUN python -m pip install /app/src/backend/base/dist/*.tar.gz --user
ENTRYPOINT ["python", "-m", "langflow", "run"]

View file

@ -0,0 +1 @@
FROM langflowai/langflow:1.0-alpha

View file

@ -12,7 +12,7 @@ services:
# This variable defines where the logs, file storage, monitor data and secret keys are stored.
- LANGFLOW_CONFIG_DIR=app/langflow
volumes:
- langflow-data:app/langflow
- langflow-data:/app/langflow
postgres:
image: postgres:16

View file

@ -7,10 +7,12 @@ import Admonition from "@theme/Admonition";
Langflow provides an API key functionality that allows users to access their individual components and flows without traditional login authentication. The API key is a user-specific token that can be included in the request header or query parameter to authenticate API calls. This documentation outlines how to generate, use, and manage API keys in Langflow.
<Admonition type="warning">
This feature requires the LANGFLOW_AUTO_LOGIN environment variable to be set
to False. The default user and password are set using the LANGFLOW_SUPERUSER
and LANGFLOW_SUPERUSER_PASSWORD environment variables. The default values are
langflow and langflow, respectively.
The default user and password are set using the LANGFLOW_SUPERUSER and
LANGFLOW_SUPERUSER_PASSWORD environment variables.
The default values are
langflow and langflow, respectively.
</Admonition>
## Generating an API Key

View file

@ -7,12 +7,12 @@ Langflow's Command Line Interface (CLI) is a powerful tool that allows you to in
Running the CLI without any arguments will display a list of available commands and options.
```bash
python -m langflow --help
python -m langflow run --help
# or
python -m langflow
python -m langflow run
```
Each option is detailed below:
Each option for `run` command are detailed below:
- `--help`: Displays all available options.
- `--host`: Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`.
@ -35,6 +35,41 @@ Each option is detailed below:
These parameters are important for users who need to customize the behavior of Langflow, especially in development or specialized deployment scenarios.
### API Key Command
The `api-key` command allows you to create an API key for accessing Langflow's API when `LANGFLOW_AUTO_LOGIN` is set to `True`.
```bash
python -m langflow api-key --help
Usage: langflow api-key [OPTIONS]
Creates an API key for the default superuser if AUTO_LOGIN is enabled.
Args: log_level (str, optional): Logging level. Defaults to "error".
Returns: None
╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ --log-level TEXT Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] │
│ --help Show this message and exit. │
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
```
Once you run the `api-key` command, it will create an API key for the default superuser if `LANGFLOW_AUTO_LOGIN` is set to `True`.
```bash
python -m langflow api-key
╭─────────────────────────────────────────────────────────────────────╮
│ API Key Created Successfully: │
│ │
│ sk-O0elzoWID1izAH8RUKrnnvyyMwIzHi2Wk-uXWoNJ2Ro │
│ │
│ This is the only time the API key will be displayed. │
│ Make sure to store it in a secure location. │
│ │
│ The API key has been copied to your clipboard. Cmd + V to paste it. │
╰─────────────────────────────────────────────────────────────────────╯
```
### Environment Variables
You can configure many of the CLI options using environment variables. These can be exported in your operating system or added to a `.env` file and loaded using the `--env-file` option.

View file

@ -74,11 +74,6 @@ class DocumentProcessor(CustomComponent):
</div>
<Admonition type="tip">
Check out [FlowRunner Component](../examples/flow-runner) for a more complex
example.
</Admonition>
---
## Rules

View file

@ -100,7 +100,3 @@ The `CustomComponent` class also provides helpful methods for specific tasks (e.
- `field_order`: Controls the display order of fields.
- `icon`: Sets the canvas display icon.
<Admonition type="info" label="Tip">
Check out the [FlowRunner](../examples/flow-runner) example to understand how to call a flow from a custom component.
</Admonition>

View file

@ -1,35 +0,0 @@
import Admonition from "@theme/Admonition";
# Buffer Memory
For certain applications, retaining past interactions is crucial. For that, chains and agents may accept a memory component as one of their input parameters. The `ConversationBufferMemory` component is one of them. It stores messages and extracts them into variables.
## ⛓️ Langflow Example
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/buffer-memory.png",
dark: "img/buffer-memory.png",
}}
style={{
width: "80%",
margin: "20px auto",
display: "flex",
justifyContent: "center",
}}
/>
#### <a target="\_blank" href="json_files/Buffer_Memory.json" download>Download Flow</a>
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`ConversationBufferMemory`](https://python.langchain.com/docs/modules/memory/types/buffer)
- [`ConversationChain`](https://python.langchain.com/docs/modules/chains/)
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)
</Admonition>

View file

@ -0,0 +1,17 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Chat Memory
The **Chat Memory** component restores previous messages given a Session ID, which can be any string.
This component is available under the **Helpers** tab of the Langflow preview.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/chat_memory.mp4" />
</div>

View file

@ -0,0 +1,21 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Combine Text
With LLM pipelines, combining text from different sources may be as important as splitting text.
The **Combine Text** component concatenates two text inputs into a single chunk using a specified delimiter, such as whitespace or a newline.
Also, check out **Combine Texts (Unsorted)** as a similar alternative.
This component is available under the **Helpers** tab of the Langflow preview.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/combine_text.mp4" />
</div>

View file

@ -1,41 +0,0 @@
import Admonition from "@theme/Admonition";
# Conversation Chain
This example shows how to instantiate a simple `ConversationChain` component using a Language Model (LLM). Once the Node Status turns green 🟢, the chat will be ready to take in user messages. Here, we used `ChatOpenAI` to act as the required LLM input, but you can use any LLM for this purpose.
<Admonition type="info">
Make sure to always get the API key from the provider.
</Admonition>
## ⛓️ Langflow Example
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/basic-chat.png",
dark: "img/basic-chat.png",
}}
style={{
width: "80%",
margin: "20px auto",
display: "flex",
justifyContent: "center",
}}
/>
#### <a target="\_blank" href="json_files/Basic_Chat.json" download>Download Flow</a>
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`ConversationChain`](https://python.langchain.com/docs/modules/chains/)
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)
</Admonition>

View file

@ -0,0 +1,17 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Create Record
In Langflow, a `Record` has a structure very similar to a Python dictionary. It is a key-value pair data structure.
The **Create Record** component allows you to dynamically create a `Record` from a specified number of inputs. You can add as many key-value pairs as you want (as long as it is less than 15 😅). Once you've chosen the number of `Records`, add keys and fill up values, or pass on values from other components to the component using the input handles.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/create_record.mp4" />
</div>

View file

@ -1,57 +0,0 @@
import Admonition from "@theme/Admonition";
# CSV Loader
The `VectoStoreAgent` component retrieves information from one or more vector stores. This example shows a `VectoStoreAgent` connected to a CSV file through the `Chroma` vector store. Process description:
- The `CSVLoader` loads a CSV file into a list of documents.
- The extracted data is then processed by the `CharacterTextSplitter`, which splits the text into small, meaningful chunks (usually sentences).
- These chunks feed the `Chroma` vector store, which converts them into vectors and stores them for fast indexing.
- Finally, the agent accesses the information of the vector store through the `VectorStoreInfo` tool.
<Admonition type="info">
The vector store is used for efficient semantic search, while
`VectorStoreInfo` carries information about it, such as its name and
description. Embeddings are a way to represent words, phrases, or any entities
in a vector space. Learn more about them
[here](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings).
</Admonition>
<Admonition type="tip">
Once you build this flow, ask questions about the data in the chat interface
(e.g., number of rows or columns).
</Admonition>
## ⛓️ Langflow Example
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/csv-loader.png",
dark: "img/csv-loader.png",
}}
style={{
width: "80%",
margin: "20px auto",
display: "flex",
justifyContent: "center",
}}
/>
#### <a target="\_blank" href="json_files/CSV_Loader.json" download>Download Flow</a>
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`CSVLoader`](https://python.langchain.com/docs/integrations/document_loaders/csv)
- [`CharacterTextSplitter`](https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter)
- [`OpenAIEmbedding`](https://python.langchain.com/docs/integrations/text_embedding/openai)
- [`Chroma`](https://python.langchain.com/docs/integrations/vectorstores/chroma)
- [`VectorStoreInfo`](https://python.langchain.com/docs/modules/data_connection/vectorstores/)
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
- [`VectorStoreAgent`](https://js.langchain.com/docs/modules/agents/tools/how_to/agents_with_vectorstores)
</Admonition>

View file

@ -1,368 +0,0 @@
---
description: Custom Components
hide_table_of_contents: true
---
# FlowRunner Component
The CustomComponent class allows us to create components that interact with Langflow itself. In this example, we will make a component that runs other flows available in "My Collection".
<ZoomableImage
alt="Document Processor Component"
sources={{
light: "img/flow_runner.png",
dark: "img/flow_runner.png",
}}
style={{
width: "30%",
margin: "20px auto",
display: "flex",
justifyContent: "center",
}}
/>
We will cover how to:
- List Collection flows using the _`list_flows`_ method.
- Load a flow using the _`load_flow`_ method.
- Configure a dropdown input field using the _`options`_ parameter.
<details open>
<summary>Example Code</summary>
```python
from langflow.custom import CustomComponent
from langchain.schema import Document
class FlowRunner(CustomComponent):
display_name = "Flow Runner"
description = "Run other flows using a document as input."
def build_config(self):
flows = self.list_flows()
flow_names = [f.name for f in flows]
return {"flow_name": {"options": flow_names,
"display_name": "Flow Name",
},
"document": {"display_name": "Document"}
}
def build(self, flow_name: str, document: Document) -> Document:
# List the flows
flows = self.list_flows()
# Get the flow that matches the selected name
# You can also get the flow by id
# using self.get_flow(flow_id=flow_id)
tweaks = {}
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
# Get the page_content from the document
if document and isinstance(document, list):
document = document[0]
page_content = document.page_content
# Use it in the flow
result = flow(page_content)
return Document(page_content=str(result))
```
</details>
<CH.Scrollycoding rows={20} className={""}>
```python
from langflow.custom import CustomComponent
class MyComponent(CustomComponent):
display_name = "Custom Component"
description = "This is a custom component"
def build_config(self):
...
def build(self):
...
```
The typical structure of a Custom Component is composed of _`display_name`_ and _`description`_ attributes, _`build`_ and _`build_config`_ methods.
---
```python
from langflow.custom import CustomComponent
# focus
class FlowRunner(CustomComponent):
# focus
display_name = "Flow Runner"
# focus
description = "Run other flows"
def build_config(self):
...
def build(self):
...
```
Let's start by defining our component's _`display_name`_ and _`description`_.
---
```python
from langflow.custom import CustomComponent
# focus
from langchain.schema import Document
class FlowRunner(CustomComponent):
display_name = "Flow Runner"
description = "Run other flows using a document as input."
def build_config(self):
...
def build(self):
...
```
Second, we will import _`Document`_ from the [_langchain.schema_](https://docs.langchain.com/docs/components/schema/) module. This will be the return type of the _`build`_ method.
---
```python
from langflow.custom import CustomComponent
# focus
from langchain.schema import Document
class FlowRunner(CustomComponent):
display_name = "Flow Runner"
description = "Run other flows using a document as input."
def build_config(self):
...
# focus
def build(self, flow_name: str, document: Document) -> Document:
...
```
Now, let's add the [parameters](focus://11[20:55]) and the [return type](focus://11[60:69]) to the _`build`_ method. The parameters added are:
- _`flow_name`_ is the name of the flow we want to run.
- _`document`_ is the input document to be passed to that flow.
- Since _`Document`_ is a Langchain type, it will add an input [handle](../administration/components) to the component ([see more](../components/custom)).
---
```python focus=13:14
from langflow.custom import CustomComponent
from langchain.schema import Document
class FlowRunner(CustomComponent):
display_name = "Flow Runner"
description = "Run other flows using a document as input."
def build_config(self):
...
def build(self, flow_name: str, document: Document) -> Document:
# List the flows
flows = self.list_flows()
```
We can now start writing the _`build`_ method. Let's list available flows in "My Collection" using the _`list_flows`_ method.
---
```python focus=15:18
from langflow.custom import CustomComponent
from langchain.schema import Document
class FlowRunner(CustomComponent):
display_name = "Flow Runner"
description = "Run other flows using a document as input."
def build_config(self):
...
def build(self, flow_name: str, document: Document) -> Document:
# List the flows
flows = self.list_flows()
# Get the flow that matches the selected name
# You can also get the flow by id
# using self.get_flow(flow_id=flow_id)
tweaks = {}
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
```
And retrieve a flow that matches the selected name (we'll make a dropdown input field for the user to choose among flow names).
<Admonition type="caution">
From version 0.4.0, names are unique, which was not the case in previous
versions. This might lead to unexpected results if using flows with the same
name.
</Admonition>
---
```python
from langflow.custom import CustomComponent
from langchain.schema import Document
class FlowRunner(CustomComponent):
display_name = "Flow Runner"
description = "Run other flows using a document as input."
def build_config(self):
...
def build(self, flow_name: str, document: Document) -> Document:
# List the flows
flows = self.list_flows()
# Get the flow that matches the selected name
# You can also get the flow by id
# using self.get_flow(flow_id=flow_id)
tweaks = {}
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
```
You can load this flow using _`get_flow`_ and set a _`tweaks`_ dictionary to customize it. Find more about tweaks in our [features guidelines](../administration/features#code).
---
```python
from langflow.custom import CustomComponent
from langchain.schema import Document
class FlowRunner(CustomComponent):
display_name = "Flow Runner"
description = "Run other flows using a document as input."
def build_config(self):
...
def build(self, flow_name: str, document: Document) -> Document:
# List the flows
flows = self.list_flows()
# Get the flow that matches the selected name
# You can also get the flow by id
# using self.get_flow(flow_id=flow_id)
tweaks = {}
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
# Get the page_content from the document
if document and isinstance(document, list):
document = document[0]
page_content = document.page_content
# Use it in the flow
result = flow(page_content)
return Document(page_content=str(result))
```
We are using a _`Document`_ as input because it is a straightforward way to pass text data in Langflow (specifically because you can connect it to many [loaders](../components/loaders)).
Generally, a flow will take a string or a dictionary as input because that's what LangChain components expect.
In case you are passing a dictionary, you need to build it according to the needs of the flow you are using.
The content of a document can be extracted using the _`page_content`_ attribute, which is a string, and passed as an argument to the selected flow.
---
```python focus=9:16
from langflow.custom import CustomComponent
from langchain.schema import Document
class FlowRunner(CustomComponent):
display_name = "Flow Runner"
description = "Run other flows using a document as input."
def build_config(self):
flows = self.list_flows()
flow_names = [f.name for f in flows]
return {"flow_name": {"options": flow_names,
"display_name": "Flow Name",
},
"document": {"display_name": "Document"}
}
def build(self, flow_name: str, document: Document) -> Document:
# List the flows
flows = self.list_flows()
# Get the flow that matches the selected name
# You can also get the flow by id
# using self.get_flow(flow_id=flow_id)
tweaks = {}
flow = self.get_flow(flow_name=flow_name, tweaks=tweaks)
# Get the page_content from the document
if document and isinstance(document, list):
document = document[0]
page_content = document.page_content
# Use it in the flow
result = flow(page_content)
return Document(page_content=str(result))
```
Finally, we can add field customizations through the _`build_config`_ method. Here we added the _`options`_ key to make the _`flow_name`_ field a dropdown menu. Check out the [custom component reference](../components/custom) for a list of available keys.
<Admonition type="caution">
Make sure that the field type is _`str`_ and _`options`_ values are strings.
</Admonition>
</CH.Scrollycoding>
Done! This is what our script and custom component looks like:
<div style={{
display: "flex",
justifyContent: "center",
}}>
<ZoomableImage
alt="Document Processor Code"
sources={{
light: "img/flow_runner_code.png",
dark: "img/flow_runner_code.png",
}}
style={{
maxWidth: "100%",
margin: "0 auto",
display: "flex",
justifyContent: "center",
}}
/>
<ZoomableImage
alt="Document Processor Component"
sources={{
light: "img/flow_runner.png",
dark: "img/flow_runner.png",
}}
style={{
width: "40%",
margin: "0 auto",
display: "flex",
justifyContent: "center",
}}
/>
</div>
import ZoomableImage from "/src/theme/ZoomableImage.js";
import Admonition from "@theme/Admonition";

View file

@ -0,0 +1,17 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Pass
Sometimes all you need to do is… nothing!
The **Pass** component enables you to ignore one input and move forward with another one. This is super helpful to swap routes for A/B testing!
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/pass.mp4" />
</div>

View file

@ -1,62 +0,0 @@
import Admonition from "@theme/Admonition";
# Python Function
Langflow allows you to create a customized tool using the `PythonFunction` connected to a `Tool` component. In this example, Regex is used in Python to validate a pattern.
```python
import re
def is_brazilian_zipcode(zipcode: str) -> bool:
pattern = r"\d{5}-?\d{3}"
# Check if the zip code matches the pattern
if re.match(pattern, zipcode):
return True
return False
```
<Admonition type="tip">
When a tool is called, it is often desirable to have its output returned
directly to the user. You can do this by setting the **return_direct** flag
for a tool to be True.
</Admonition>
The `AgentInitializer` component is a quick way to construct an agent from the model and tools.
<Admonition type="info">
The `PythonFunction` is a custom component that uses the LangChain 🦜🔗 tool
decorator. Learn more about it
[here](https://python.langchain.com/docs/modules/agents/tools/custom_tools).
</Admonition>
## ⛓️ Langflow Example
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/python-function.png",
dark: "img/python-function.png",
}}
style={{
width: "80%",
margin: "20px auto",
display: "flex",
justifyContent: "center",
}}
/>
#### <a target="\_blank" href="json_files/Python_Function.json" download>Download Flow</a>
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`PythonFunctionTool`](https://python.langchain.com/docs/modules/agents/tools/custom_tools)
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)
- [`AgentInitializer`](https://python.langchain.com/docs/modules/agents/)
</Admonition>

View file

@ -1,52 +0,0 @@
import Admonition from "@theme/Admonition";
# SearchApi Tool
The [SearchApi](https://www.searchapi.io/) allows developers to retrieve results from search engines such as Google, Google Scholar, YouTube, YouTube transcripts, and more, and can be used as in Langflow through the `SearchApi` tool.
<Admonition type="info">
To use the SearchApi, you must first obtain an API key by registering at [SearchApi's website](https://www.searchapi.io/).
</Admonition>
In the given example, we specify `engine` as `youtube_transcripts` and provide a `video_id`.
<Admonition type="info">
All engines and parameters can be found in [SearchApi documentation](https://www.searchapi.io/docs/google).
</Admonition>
The `RetrievalQA` chain processes a `Document` along with a user's question to return an answer.
<Admonition type="tip">
In this example, we used [`ChatOpenAI`](https://platform.openai.com/) as the
LLM, but feel free to experiment with other Language Models!
</Admonition>
The `RetrievalQA` takes `CombineDocsChain` and `SearchApi` tool as inputs, using the tool as a `Document` to answer questions.
<Admonition type="info">
Learn more about the SearchApi
[here](https://python.langchain.com/docs/integrations/tools/searchapi).
</Admonition>
## ⛓️ Langflow Example
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/searchapi-tool.png",
}}
/>
#### <a target="\_blank" href="json_files/SearchApi_Tool.json" download>Download Flow</a>
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
- [`SearchApiAPIWrapper`](https://python.langchain.com/docs/integrations/providers/searchapi#wrappers)
- [`ZeroShotAgent`](https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent)
</Admonition>

View file

@ -1,58 +0,0 @@
import Admonition from "@theme/Admonition";
# Serp API Tool
The [Serp API](https://serpapi.com/) (Search Engine Results Page) allows developers to scrape results from search engines such as Google, Bing and Yahoo, and can be used as in Langflow through the `Search` component.
<Admonition type="info">
To use the Serp API, you first need to sign up [Serp
API](https://serpapi.com/) for an API key on the provider's website.
</Admonition>
Here, the `ZeroShotPrompt` component specifies a prompt template for the `ZeroShotAgent`. Set a _Prefix_ and _Suffix_ with rules for the agent to obey. In the example, we used default templates.
The `LLMChain` is a simple chain that takes in a prompt template, formats it with the user input, and returns the response from an LLM.
<Admonition type="tip">
In this example, we used [`ChatOpenAI`](https://platform.openai.com/) as the
LLM, but feel free to experiment with other Language Models!
</Admonition>
The `ZeroShotAgent` takes the `LLMChain` and the `Search` tool as inputs, using the tool to find information when necessary.
<Admonition type="info">
Learn more about the Serp API
[here](https://python.langchain.com/docs/integrations/providers/serpapi ).
</Admonition>
## ⛓️ Langflow Example
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/serp-api-tool.png",
dark: "img/serp-api-tool.png",
}}
style={{
width: "80%",
margin: "20px auto",
display: "flex",
justifyContent: "center",
}}
/>
#### <a target="\_blank" href="json_files/SerpAPI_Tool.json" download>Download Flow</a>
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`ZeroShotPrompt`](https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/)
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
- [`LLMChain`](https://python.langchain.com/docs/modules/chains/foundational/llm_chain)
- [`Search`](https://python.langchain.com/docs/integrations/providers/serpapi)
- [`ZeroShotAgent`](https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent)
</Admonition>

View file

@ -0,0 +1,17 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Store Message
The **Store Message** component allows you to save information under a specified Session ID and sender type.
The **Message History** component can then be used to retrieve stored messages.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/store_message.mp4" />
</div>

View file

@ -0,0 +1,15 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Sub Flow
The **Sub Flow** component enables a user to select a previously built flow and dynamically generate a component out of it.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/sub_flow.mp4" />
</div>

View file

@ -0,0 +1,15 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Text Operator
The **Text Operator** component simplifies logic. It evaluates the results from another component (for example, if the input text exactly equals `Tuna`) and runs another component based on the results. Basically, the text operator is an if/else component for your flow.
<div
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
>
<ReactPlayer playing controls url="/videos/text_operator.mp4" />
</div>

View file

@ -12,7 +12,7 @@ import Admonition from "@theme/Admonition";
to create your own Langflow workspace in minutes.
</Admonition>
Langflow requires [Python 3.10](https://www.python.org/downloads/release/python-3100/) and [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/) to be installed on your system.
Langflow requires [Python >=3.10](https://www.python.org/downloads/release/python-3100/) and [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/) to be installed on your system.
Install Langflow with pip:

View file

@ -10,8 +10,8 @@ The Notion integration in Langflow enables seamless connectivity with Notion dat
<ZoomableImage
alt="Notion Components in Langflow"
sources={{
light: "img/notion/notion_components_bundle.png",
dark: "img/notion/notion_components_bundle_dark.png",
light: "img/notion/notion_bundle.jpg",
dark: "img/notion/notion_bundle.jpg",
}}
style={{ width: "100%", margin: "20px 0" }}
/>

View file

@ -105,6 +105,8 @@ The default list at the moment is:
- PINECONE_API_KEY
- SEARCHAPI_API_KEY
- SERPAPI_API_KEY
- UPSTASH_VECTOR_REST_URL
- UPSTASH_VECTOR_REST_TOKEN
- VECTARA_CUSTOMER_ID
- VECTARA_CORPUS_ID
- VECTARA_API_KEY

View file

@ -80,13 +80,13 @@ module.exports = {
label: "Example Components",
collapsed: true,
items: [
"examples/flow-runner",
"examples/conversation-chain",
"examples/buffer-memory",
"examples/csv-loader",
"examples/searchapi-tool",
"examples/serp-api-tool",
"examples/python-function",
"examples/chat-memory",
"examples/combine-text",
"examples/create-record",
"examples/pass",
"examples/store-message",
"examples/sub-flow",
"examples/text-operator",
],
},
{

BIN
docs/static/img/langflow_basic_howto.gif vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 MiB

BIN
docs/static/img/notion/notion_bundle.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

BIN
docs/static/videos/chat_memory.mp4 vendored Normal file

Binary file not shown.

BIN
docs/static/videos/combine_text.mp4 vendored Normal file

Binary file not shown.

BIN
docs/static/videos/create_record.mp4 vendored Normal file

Binary file not shown.

BIN
docs/static/videos/pass.mp4 vendored Normal file

Binary file not shown.

BIN
docs/static/videos/store_message.mp4 vendored Normal file

Binary file not shown.

BIN
docs/static/videos/sub_flow.mp4 vendored Normal file

Binary file not shown.

BIN
docs/static/videos/text_operator.mp4 vendored Normal file

Binary file not shown.

676
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "1.0.0a39"
version = "1.0.0a44"
description = "A Python package with a built-in web application"
authors = ["Langflow <contact@langflow.org>"]
maintainers = [
@ -85,6 +85,7 @@ couchbase = "^4.2.1"
youtube-transcript-api = "^0.6.2"
markdown = "^3.6"
langchain-chroma = "^0.1.1"
upstash-vector = "^0.4.0"
[tool.poetry.group.dev.dependencies]

View file

@ -3,7 +3,7 @@ services:
- type: web
name: langflow
runtime: docker
dockerfilePath: ./docker/render.Dockerfile
dockerfilePath: ./docker/render.pre-release.Dockerfile
repo: https://github.com/langflow-ai/langflow
branch: dev
healthCheckPath: /health

View file

@ -0,0 +1,16 @@
import os
from huggingface_hub import HfApi, list_models
from rich import print
# Use root method
models = list_models()
# Or configure a HfApi client
hf_api = HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=os.getenv("HUGGINFACE_API_TOKEN"),
)
space_runtime = hf_api.restart_space("Langflow/Langflow-Preview", factory_reboot=True)
print(space_runtime)

View file

@ -11,6 +11,7 @@ from alembic import op
import sqlalchemy as sa
import sqlmodel
from sqlalchemy.engine.reflection import Inspector
from langflow.utils import migration
${imports if imports else ""}
# revision identifiers, used by Alembic.
@ -22,13 +23,9 @@ depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
${upgrades if upgrades else "pass"}
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
${downgrades if downgrades else "pass"}

View file

@ -48,10 +48,12 @@ def upgrade() -> None:
with op.batch_alter_table("folder", schema=None) as batch_op:
batch_op.create_index(batch_op.f("ix_folder_name"), ["name"], unique=False)
if "folder_id" not in inspector.get_columns("flow"):
with op.batch_alter_table("flow", schema=None) as batch_op:
column_names = [column["name"] for column in inspector.get_columns("flow")]
with op.batch_alter_table("flow", schema=None) as batch_op:
if "folder_id" not in column_names:
batch_op.add_column(sa.Column("folder_id", sqlmodel.sql.sqltypes.GUID(), nullable=True))
batch_op.create_foreign_key("flow_folder_id_fkey", "folder", ["folder_id"], ["id"])
if "folder" in column_names:
batch_op.drop_column("folder")
# ### end Alembic commands ###
@ -62,11 +64,13 @@ def downgrade() -> None:
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
# ### commands auto generated by Alembic - please adjust! ###
if "folder_id" in inspector.get_columns("flow"):
with op.batch_alter_table("flow", schema=None) as batch_op:
column_names = [column["name"] for column in inspector.get_columns("flow")]
with op.batch_alter_table("flow", schema=None) as batch_op:
if "folder" not in column_names:
batch_op.add_column(sa.Column("folder", sa.VARCHAR(), nullable=True))
batch_op.drop_constraint("flow_folder_id_fkey", type_="foreignkey")
if "folder_id" in column_names:
batch_op.drop_column("folder_id")
batch_op.drop_constraint("flow_folder_id_fkey", type_="foreignkey")
indexes = inspector.get_indexes("folder")
if "ix_folder_name" in [index["name"] for index in indexes]:

View file

@ -0,0 +1,42 @@
"""Add unique constraints per user in folder table
Revision ID: 1c79524817ed
Revises: 3bb0ddf32dfb
Create Date: 2024-05-29 23:12:09.146880
"""
from typing import Sequence, Union
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = "1c79524817ed"
down_revision: Union[str, None] = "3bb0ddf32dfb"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
constraints_names = [constraint["name"] for constraint in inspector.get_unique_constraints("folder")]
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("folder", schema=None) as batch_op:
if "unique_folder_name" not in constraints_names:
batch_op.create_unique_constraint("unique_folder_name", ["user_id", "name"])
# ### end Alembic commands ###
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
constraints_names = [constraint["name"] for constraint in inspector.get_unique_constraints("folder")]
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("folder", schema=None) as batch_op:
if "unique_folder_name" in constraints_names:
batch_op.drop_constraint("unique_folder_name", type_="unique")
# ### end Alembic commands ###

View file

@ -0,0 +1,54 @@
"""Add unique constraints per user in flow table
Revision ID: 3bb0ddf32dfb
Revises: a72f5cf9c2f9
Create Date: 2024-05-29 23:08:43.935040
"""
from typing import Sequence, Union
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = "3bb0ddf32dfb"
down_revision: Union[str, None] = "a72f5cf9c2f9"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
# ### commands auto generated by Alembic - please adjust! ###
indexes_names = [index["name"] for index in inspector.get_indexes("flow")]
constraints_names = [constraint["name"] for constraint in inspector.get_unique_constraints("flow")]
with op.batch_alter_table("flow", schema=None) as batch_op:
if "ix_flow_endpoint_name" in indexes_names:
batch_op.drop_index("ix_flow_endpoint_name")
batch_op.create_index(batch_op.f("ix_flow_endpoint_name"), ["endpoint_name"], unique=False)
if "unique_flow_endpoint_name" not in constraints_names:
batch_op.create_unique_constraint("unique_flow_endpoint_name", ["user_id", "endpoint_name"])
if "unique_flow_name" not in constraints_names:
batch_op.create_unique_constraint("unique_flow_name", ["user_id", "name"])
# ### end Alembic commands ###
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
# ### commands auto generated by Alembic - please adjust! ###
indexes_names = [index["name"] for index in inspector.get_indexes("flow")]
constraints_names = [constraint["name"] for constraint in inspector.get_unique_constraints("flow")]
with op.batch_alter_table("flow", schema=None) as batch_op:
if "unique_flow_name" in constraints_names:
batch_op.drop_constraint("unique_flow_name", type_="unique")
if "unique_flow_endpoint_name" in constraints_names:
batch_op.drop_constraint("unique_flow_endpoint_name", type_="unique")
if "ix_flow_endpoint_name" in indexes_names:
batch_op.drop_index(batch_op.f("ix_flow_endpoint_name"))
batch_op.create_index("ix_flow_endpoint_name", ["endpoint_name"], unique=1)
# ### end Alembic commands ###

View file

@ -0,0 +1,45 @@
"""Add webhook columns
Revision ID: 631faacf5da2
Revises: 1c79524817ed
Create Date: 2024-04-22 15:14:43.454784
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = "631faacf5da2"
down_revision: Union[str, None] = "1c79524817ed"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
# ### commands auto generated by Alembic - please adjust! ###
column_names = [column["name"] for column in inspector.get_columns("flow")]
with op.batch_alter_table("flow", schema=None) as batch_op:
if "flow" in table_names and "webhook" not in column_names:
batch_op.add_column(sa.Column("webhook", sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
# ### commands auto generated by Alembic - please adjust! ###
column_names = [column["name"] for column in inspector.get_columns("flow")]
with op.batch_alter_table("flow", schema=None) as batch_op:
if "flow" in table_names and "webhook" in column_names:
batch_op.drop_column("webhook")
# ### end Alembic commands ###

View file

@ -52,9 +52,14 @@ def upgrade() -> None:
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
column_names = [column["name"] for column in inspector.get_columns("flow")]
with op.batch_alter_table("flow", schema=None) as batch_op:
batch_op.drop_column("folder")
batch_op.drop_column("updated_at")
if "folder" in column_names:
batch_op.drop_column("folder")
if "updated_at" in column_names:
batch_op.drop_column("updated_at")
except Exception as e:
print(e)
pass

View file

@ -0,0 +1,52 @@
"""Add endpoint name col
Revision ID: a72f5cf9c2f9
Revises: 29fe8f1f806b
Create Date: 2024-05-29 21:44:04.240816
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = "a72f5cf9c2f9"
down_revision: Union[str, None] = "29fe8f1f806b"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
# ### commands auto generated by Alembic - please adjust! ###
column_names = [column["name"] for column in inspector.get_columns("flow")]
indexes = inspector.get_indexes("flow")
index_names = [index["name"] for index in indexes]
with op.batch_alter_table("flow", schema=None) as batch_op:
if "endpoint_name" not in column_names:
batch_op.add_column(sa.Column("endpoint_name", sqlmodel.sql.sqltypes.AutoString(), nullable=True))
if "ix_flow_endpoint_name" not in index_names:
batch_op.create_index(batch_op.f("ix_flow_endpoint_name"), ["endpoint_name"], unique=True)
# ### end Alembic commands ###
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
# ### commands auto generated by Alembic - please adjust! ###
column_names = [column["name"] for column in inspector.get_columns("flow")]
indexes = inspector.get_indexes("flow")
index_names = [index["name"] for index in indexes]
with op.batch_alter_table("flow", schema=None) as batch_op:
if "ix_flow_endpoint_name" in index_names:
batch_op.drop_index(batch_op.f("ix_flow_endpoint_name"))
if "endpoint_name" in column_names:
batch_op.drop_column("endpoint_name")
# ### end Alembic commands ###

View file

@ -286,7 +286,7 @@ async def get_next_runnable_vertices(
for v_id in set(next_runnable_vertices): # Use set to avoid duplicates
graph.vertices_to_run.remove(v_id)
graph.remove_from_predecessors(v_id)
await chat_service.set_cache(flow_id=flow_id, data=graph, lock=lock)
await chat_service.set_cache(key=flow_id, data=graph, lock=lock)
return next_runnable_vertices

View file

@ -1,6 +1,5 @@
import time
import uuid
from functools import partial
from typing import TYPE_CHECKING, Annotated, Optional
from fastapi import APIRouter, BackgroundTasks, Body, Depends, HTTPException
@ -164,7 +163,6 @@ async def build_vertex(
vertex = graph.get_vertex(vertex_id)
try:
lock = chat_service._cache_locks[flow_id_str]
set_cache_coro = partial(chat_service.set_cache, flow_id=flow_id_str)
(
next_runnable_vertices,
top_level_vertices,
@ -175,7 +173,7 @@ async def build_vertex(
vertex,
) = await graph.build_vertex(
lock=lock,
set_cache_coro=set_cache_coro,
chat_service=chat_service,
vertex_id=vertex_id,
user_id=current_user.id,
inputs_dict=inputs.model_dump() if inputs else {},

View file

@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Annotated, List, Optional, Union
from uuid import UUID
import sqlalchemy as sa
from fastapi import APIRouter, Body, Depends, HTTPException, UploadFile, status
from fastapi import APIRouter, BackgroundTasks, Body, Depends, HTTPException, Request, UploadFile, status
from loguru import logger
from sqlmodel import Session, select
@ -22,11 +22,14 @@ from langflow.api.v1.schemas import (
from langflow.custom import CustomComponent
from langflow.custom.utils import build_custom_component_template
from langflow.graph.graph.base import Graph
from langflow.graph.schema import RunOutputs
from langflow.helpers.flow import get_flow_by_id_or_endpoint_name
from langflow.processing.process import process_tweaks, run_graph_internal
from langflow.schema.graph import Tweaks
from langflow.services.auth.utils import api_key_security, get_current_active_user
from langflow.services.cache.utils import save_uploaded_file
from langflow.services.database.models.flow import Flow
from langflow.services.database.models.flow.utils import get_all_webhook_components_in_flow, get_flow_by_id
from langflow.services.database.models.user.model import User
from langflow.services.deps import get_session, get_session_service, get_settings_service, get_task_service
from langflow.services.session.service import SessionService
@ -53,10 +56,70 @@ def get_all(
raise HTTPException(status_code=500, detail=str(exc)) from exc
@router.post("/run/{flow_id}", response_model=RunResponse, response_model_exclude_none=True)
async def simple_run_flow(
db: Session,
flow: Flow,
input_request: SimplifiedAPIRequest,
session_service: SessionService,
stream: bool = False,
api_key_user: Optional[User] = None,
):
try:
task_result: List[RunOutputs] = []
artifacts = {}
user_id = api_key_user.id if api_key_user else None
flow_id_str = str(flow.id)
if input_request.session_id:
session_data = await session_service.load_session(input_request.session_id, flow_id=flow_id_str)
graph, artifacts = session_data if session_data else (None, None)
if graph is None:
raise ValueError(f"Session {input_request.session_id} not found")
else:
if flow.data is None:
raise ValueError(f"Flow {flow_id_str} has no data")
graph_data = flow.data
graph_data = process_tweaks(graph_data, input_request.tweaks or {}, stream=stream)
graph = Graph.from_payload(graph_data, flow_id=flow_id_str, user_id=str(user_id))
inputs = [
InputValueRequest(components=[], input_value=input_request.input_value, type=input_request.input_type)
]
if input_request.output_component:
outputs = [input_request.output_component]
else:
outputs = [
vertex.id
for vertex in graph.vertices
if input_request.output_type == "debug"
or (
vertex.is_output
and (input_request.output_type == "any" or input_request.output_type in vertex.id.lower())
)
]
task_result, session_id = await run_graph_internal(
graph=graph,
flow_id=flow_id_str,
session_id=input_request.session_id,
inputs=inputs,
outputs=outputs,
artifacts=artifacts,
session_service=session_service,
stream=stream,
)
return RunResponse(outputs=task_result, session_id=session_id)
except sa.exc.StatementError as exc:
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
if "badly formed hexadecimal UUID string" in str(exc):
logger.error(f"Flow ID {flow_id_str} is not a valid UUID")
# This means the Flow ID is not a valid UUID which means it can't find the flow
raise ValueError(str(exc)) from exc
@router.post("/run/{flow_id_or_name}", response_model=RunResponse, response_model_exclude_none=True)
async def simplified_run_flow(
db: Annotated[Session, Depends(get_session)],
flow_id: UUID,
flow: Annotated[Flow, Depends(get_flow_by_id_or_endpoint_name)],
input_request: SimplifiedAPIRequest = SimplifiedAPIRequest(),
stream: bool = False,
api_key_user: User = Depends(api_key_security),
@ -67,7 +130,7 @@ async def simplified_run_flow(
### Parameters:
- `db` (Session): Database session for executing queries.
- `flow_id` (str): Unique identifier of the flow to be executed.
- `flow_id_or_name` (str): ID or endpoint name of the flow to run.
- `input_request` (SimplifiedAPIRequest): Request object containing input values, types, output selection, tweaks, and session ID.
- `api_key_user` (User): User object derived from the provided API key, used for authentication.
- `session_service` (SessionService): Service for managing flow sessions, essential for session reuse and caching.
@ -110,73 +173,21 @@ async def simplified_run_flow(
This endpoint provides a powerful interface for executing flows with enhanced flexibility and efficiency, supporting a wide range of applications by allowing for dynamic input and output configuration along with performance optimizations through session management and caching.
"""
session_id = input_request.session_id
try:
flow_id_str = str(flow_id)
artifacts = {}
if input_request.session_id:
session_data = await session_service.load_session(input_request.session_id, flow_id=flow_id_str)
graph, artifacts = session_data if session_data else (None, None)
if graph is None:
raise ValueError(f"Session {input_request.session_id} not found")
else:
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = db.exec(select(Flow).where(Flow.id == flow_id_str).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id_str} not found")
if flow.data is None:
raise ValueError(f"Flow {flow_id_str} has no data")
graph_data = flow.data
graph_data = process_tweaks(graph_data, input_request.tweaks or {}, stream=stream)
graph = Graph.from_payload(graph_data, flow_id=flow_id_str, user_id=str(api_key_user.id))
inputs = [
InputValueRequest(components=[], input_value=input_request.input_value, type=input_request.input_type)
]
# outputs is a list of all components that should return output
# we need to get them by checking their type
# if the output type is debug, we return all outputs
# if the output type is any, we return all outputs that are either chat or text
# if the output type is chat or text, we return only the outputs that match the type
if input_request.output_component:
outputs = [input_request.output_component]
else:
outputs = [
vertex.id
for vertex in graph.vertices
if input_request.output_type == "debug"
or (
vertex.is_output
and (input_request.output_type == "any" or input_request.output_type in vertex.id.lower())
)
]
task_result, session_id = await run_graph_internal(
graph=graph,
flow_id=flow_id_str,
session_id=input_request.session_id,
inputs=inputs,
outputs=outputs,
artifacts=artifacts,
return await simple_run_flow(
db=db,
flow=flow,
input_request=input_request,
session_service=session_service,
stream=stream,
api_key_user=api_key_user,
)
return RunResponse(outputs=task_result, session_id=session_id)
except sa.exc.StatementError as exc:
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
if "badly formed hexadecimal UUID string" in str(exc):
logger.error(f"Flow ID {flow_id_str} is not a valid UUID")
# This means the Flow ID is not a valid UUID which means it can't find the flow
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
except ValueError as exc:
if f"Flow {flow_id_str} not found" in str(exc):
logger.error(f"Flow {flow_id_str} not found")
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
elif f"Session {session_id} not found" in str(exc):
logger.error(f"Session {session_id} not found")
if "badly formed hexadecimal UUID string" in str(exc):
# This means the Flow ID is not a valid UUID which means it can't find the flow
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(exc)) from exc
if "not found" in str(exc):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
else:
logger.exception(exc)
@ -186,6 +197,68 @@ async def simplified_run_flow(
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)) from exc
@router.post("/webhook/{flow_id}", response_model=dict, status_code=HTTPStatus.ACCEPTED)
async def webhook_run_flow(
db: Annotated[Session, Depends(get_session)],
flow: Annotated[Flow, Depends(get_flow_by_id)],
request: Request,
background_tasks: BackgroundTasks,
session_service: SessionService = Depends(get_session_service),
):
"""
Run a flow using a webhook request.
Args:
db (Session): The database session.
request (Request): The incoming HTTP request.
background_tasks (BackgroundTasks): The background tasks manager.
session_service (SessionService, optional): The session service. Defaults to Depends(get_session_service).
flow (Flow, optional): The flow to be executed. Defaults to Depends(get_flow_by_id).
Returns:
dict: A dictionary containing the status of the task.
Raises:
HTTPException: If the flow is not found or if there is an error processing the request.
"""
try:
logger.debug("Received webhook request")
data = await request.body()
if not data:
logger.error("Request body is empty")
raise ValueError(
"Request body is empty. You should provide a JSON payload containing the flow ID.",
)
# get all webhook components in the flow
webhook_components = get_all_webhook_components_in_flow(flow.data)
tweaks = {}
data_dict = await request.json()
for component in webhook_components:
tweaks[component["id"]] = {"data": data.decode() if isinstance(data, bytes) else data}
input_request = SimplifiedAPIRequest(
input_value=data_dict.get("input_value", ""),
input_type=data_dict.get("input_type", "chat"),
output_type=data_dict.get("output_type", "chat"),
tweaks=tweaks,
session_id=data_dict.get("session_id"),
)
logger.debug("Starting background task")
background_tasks.add_task(
simple_run_flow,
db=db,
flow=flow,
input_request=input_request,
session_service=session_service,
)
return {"message": "Task started in the background", "status": "in progress"}
except Exception as exc:
if "Flow ID is required" in str(exc) or "Request body is empty" in str(exc):
raise HTTPException(status_code=400, detail=str(exc)) from exc
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
@router.post("/run/advanced/{flow_id}", response_model=RunResponse, response_model_exclude_none=True)
async def experimental_run_flow(
session: Annotated[Session, Depends(get_session)],

View file

@ -13,6 +13,7 @@ from langflow.api.v1.schemas import FlowListCreate, FlowListIds, FlowListRead
from langflow.initial_setup.setup import STARTER_FOLDER_NAME
from langflow.services.auth.utils import get_current_active_user
from langflow.services.database.models.flow import Flow, FlowCreate, FlowRead, FlowUpdate
from langflow.services.database.models.flow.utils import get_webhook_component_in_flow
from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME
from langflow.services.database.models.folder.model import Folder
from langflow.services.database.models.user.model import User
@ -57,8 +58,22 @@ def read_flows(
current_user: User = Depends(get_current_active_user),
session: Session = Depends(get_session),
settings_service: "SettingsService" = Depends(get_settings_service),
remove_example_flows: bool = False,
):
"""Read all flows."""
"""
Retrieve a list of flows.
Args:
current_user (User): The current authenticated user.
session (Session): The database session.
settings_service (SettingsService): The settings service.
remove_example_flows (bool, optional): Whether to remove example flows. Defaults to False.
Returns:
List[Dict]: A list of flows in JSON format.
"""
try:
auth_settings = settings_service.auth_settings
if auth_settings.AUTO_LOGIN:
@ -73,15 +88,16 @@ def read_flows(
flows = validate_is_component(flows) # type: ignore
flow_ids = [flow.id for flow in flows]
# with the session get the flows that DO NOT have a user_id
try:
folder = session.exec(select(Folder).where(Folder.name == STARTER_FOLDER_NAME)).first()
if not remove_example_flows:
try:
folder = session.exec(select(Folder).where(Folder.name == STARTER_FOLDER_NAME)).first()
example_flows = folder.flows if folder else []
for example_flow in example_flows:
if example_flow.id not in flow_ids:
flows.append(example_flow) # type: ignore
except Exception as e:
logger.error(e)
example_flows = folder.flows if folder else []
for example_flow in example_flows:
if example_flow.id not in flow_ids:
flows.append(example_flow) # type: ignore
except Exception as e:
logger.error(e)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e)) from e
return [jsonable_encoder(flow) for flow in flows]
@ -120,30 +136,51 @@ def update_flow(
settings_service=Depends(get_settings_service),
):
"""Update a flow."""
try:
db_flow = read_flow(
session=session,
flow_id=flow_id,
current_user=current_user,
settings_service=settings_service,
)
if not db_flow:
raise HTTPException(status_code=404, detail="Flow not found")
flow_data = flow.model_dump(exclude_unset=True)
if settings_service.settings.remove_api_keys:
flow_data = remove_api_keys(flow_data)
for key, value in flow_data.items():
if value is not None:
setattr(db_flow, key, value)
webhook_component = get_webhook_component_in_flow(db_flow.data)
db_flow.webhook = webhook_component is not None
db_flow.updated_at = datetime.now(timezone.utc)
if db_flow.folder_id is None:
default_folder = session.exec(select(Folder).where(Folder.name == DEFAULT_FOLDER_NAME)).first()
if default_folder:
db_flow.folder_id = default_folder.id
session.add(db_flow)
session.commit()
session.refresh(db_flow)
return db_flow
except Exception as e:
# If it is a validation error, return the error message
if hasattr(e, "errors"):
raise HTTPException(status_code=400, detail=str(e)) from e
elif "UNIQUE constraint failed" in str(e):
# Get the name of the column that failed
columns = str(e).split("UNIQUE constraint failed: ")[1].split(".")[1].split("\n")[0]
# UNIQUE constraint failed: flow.user_id, flow.name
# or UNIQUE constraint failed: flow.name
# if the column has id in it, we want the other column
column = columns.split(",")[1] if "id" in columns.split(",")[0] else columns.split(",")[0]
db_flow = read_flow(
session=session,
flow_id=flow_id,
current_user=current_user,
settings_service=settings_service,
)
if not db_flow:
raise HTTPException(status_code=404, detail="Flow not found")
flow_data = flow.model_dump(exclude_unset=True)
if settings_service.settings.remove_api_keys:
flow_data = remove_api_keys(flow_data)
for key, value in flow_data.items():
if value is not None:
setattr(db_flow, key, value)
db_flow.updated_at = datetime.now(timezone.utc)
if db_flow.folder_id is None:
default_folder = session.exec(select(Folder).where(Folder.name == DEFAULT_FOLDER_NAME)).first()
if default_folder:
db_flow.folder_id = default_folder.id
session.add(db_flow)
session.commit()
session.refresh(db_flow)
return db_flow
raise HTTPException(
status_code=400, detail=f"{column.capitalize().replace('_', ' ')} must be unique"
) from e
elif isinstance(e, HTTPException):
raise e
else:
raise HTTPException(status_code=500, detail=str(e)) from e
@router.delete("/{flow_id}", status_code=200)

View file

@ -1,5 +1,4 @@
from typing import List
from uuid import UUID
import orjson
from fastapi import APIRouter, Depends, File, HTTPException, Response, UploadFile, status
@ -88,7 +87,7 @@ def read_folders(
def read_folder(
*,
session: Session = Depends(get_session),
folder_id: UUID,
folder_id: str,
current_user: User = Depends(get_current_active_user),
):
try:
@ -106,7 +105,7 @@ def read_folder(
def update_folder(
*,
session: Session = Depends(get_session),
folder_id: UUID,
folder_id: str,
folder: FolderUpdate, # Assuming FolderUpdate is a Pydantic model defining updatable fields
current_user: User = Depends(get_current_active_user),
):
@ -155,7 +154,7 @@ def update_folder(
def delete_folder(
*,
session: Session = Depends(get_session),
folder_id: UUID,
folder_id: str,
current_user: User = Depends(get_current_active_user),
):
try:
@ -177,7 +176,7 @@ def delete_folder(
async def download_file(
*,
session: Session = Depends(get_session),
folder_id: UUID,
folder_id: str,
current_user: User = Depends(get_current_active_user),
):
"""Download all flows from folder."""

View file

@ -46,6 +46,7 @@ async def login_to_get_access_token(
samesite=auth_settings.REFRESH_SAME_SITE,
secure=auth_settings.REFRESH_SECURE,
expires=auth_settings.REFRESH_TOKEN_EXPIRE_SECONDS,
domain=auth_settings.COOKIE_DOMAIN,
)
response.set_cookie(
"access_token_lf",
@ -54,6 +55,7 @@ async def login_to_get_access_token(
samesite=auth_settings.ACCESS_SAME_SITE,
secure=auth_settings.ACCESS_SECURE,
expires=auth_settings.ACCESS_TOKEN_EXPIRE_SECONDS,
domain=auth_settings.COOKIE_DOMAIN,
)
variable_service.initialize_user_variables(user.id, db)
# Create default folder for user if it doesn't exist
@ -71,8 +73,7 @@ async def login_to_get_access_token(
async def auto_login(
response: Response,
db: Session = Depends(get_session),
settings_service=Depends(get_settings_service),
variable_service: VariableService = Depends(get_variable_service),
settings_service=Depends(get_settings_service)
):
auth_settings = settings_service.auth_settings
if settings_service.auth_settings.AUTO_LOGIN:
@ -84,9 +85,9 @@ async def auto_login(
samesite=auth_settings.ACCESS_SAME_SITE,
secure=auth_settings.ACCESS_SECURE,
expires=None, # Set to None to make it a session cookie
domain=auth_settings.COOKIE_DOMAIN,
)
variable_service.initialize_user_variables(user_id, db)
create_default_folder_if_it_doesnt_exist(db, user_id)
return tokens
raise HTTPException(
@ -117,6 +118,7 @@ async def refresh_token(
samesite=auth_settings.REFRESH_SAME_SITE,
secure=auth_settings.REFRESH_SECURE,
expires=auth_settings.REFRESH_TOKEN_EXPIRE_SECONDS,
domain=auth_settings.COOKIE_DOMAIN,
)
response.set_cookie(
"access_token_lf",
@ -125,6 +127,7 @@ async def refresh_token(
samesite=auth_settings.ACCESS_SAME_SITE,
secure=auth_settings.ACCESS_SECURE,
expires=auth_settings.ACCESS_TOKEN_EXPIRE_SECONDS,
domain=auth_settings.COOKIE_DOMAIN,
)
return tokens
else:

View file

@ -256,6 +256,7 @@ class ResultDataResponse(BaseModel):
messages: List[ChatOutputResponse | None] = Field(default_factory=list)
timedelta: Optional[float] = None
duration: Optional[str] = None
used_frozen_result: Optional[bool] = False
class VertexBuildResponse(BaseModel):

View file

@ -0,0 +1,89 @@
"""
This file contains a fix for the implementation of the `uncurl` library, which is available at https://github.com/spulec/uncurl.git.
The `uncurl` library provides a way to parse and convert cURL commands into Python requests. However, there are some issues with the original implementation that this file aims to fix.
The `parse_context` function in this file takes a cURL command as input and returns a `ParsedContext` object, which contains the parsed information from the cURL command, such as the HTTP method, URL, headers, cookies, etc.
The `normalize_newlines` function is a helper function that replaces the line continuation character ("\") followed by a newline with a space.
"""
import re
import shlex
from collections import OrderedDict, namedtuple
from http.cookies import SimpleCookie
from uncurl.api import parser # type: ignore
parser.add_argument("-x", "--proxy", default={})
parser.add_argument("-U", "--proxy-user", default="")
ParsedContext = namedtuple("ParsedContext", ["method", "url", "data", "headers", "cookies", "verify", "auth", "proxy"])
def normalize_newlines(multiline_text):
return multiline_text.replace(" \\\n", " ")
def parse_context(curl_command):
method = "get"
tokens = shlex.split(normalize_newlines(curl_command))
tokens = [token for token in tokens if token and token != " "]
parsed_args = parser.parse_args(tokens)
post_data = parsed_args.data or parsed_args.data_binary
if post_data:
method = "post"
if parsed_args.X:
method = parsed_args.X.lower()
cookie_dict = OrderedDict()
quoted_headers = OrderedDict()
for curl_header in parsed_args.header:
if curl_header.startswith(":"):
occurrence = [m.start() for m in re.finditer(":", curl_header)]
header_key, header_value = curl_header[: occurrence[1]], curl_header[occurrence[1] + 1 :]
else:
header_key, header_value = curl_header.split(":", 1)
if header_key.lower().strip("$") == "cookie":
cookie = SimpleCookie(bytes(header_value, "ascii").decode("unicode-escape"))
for key in cookie:
cookie_dict[key] = cookie[key].value
else:
quoted_headers[header_key] = header_value.strip()
# add auth
user = parsed_args.user
if parsed_args.user:
user = tuple(user.split(":"))
# add proxy and its authentication if it's available.
proxies = parsed_args.proxy
# proxy_auth = parsed_args.proxy_user
if parsed_args.proxy and parsed_args.proxy_user:
proxies = {
"http": "http://{}@{}/".format(parsed_args.proxy_user, parsed_args.proxy),
"https": "http://{}@{}/".format(parsed_args.proxy_user, parsed_args.proxy),
}
elif parsed_args.proxy:
proxies = {
"http": "http://{}/".format(parsed_args.proxy),
"https": "http://{}/".format(parsed_args.proxy),
}
return ParsedContext(
method=method,
url=parsed_args.url,
data=post_data,
headers=quoted_headers,
cookies=cookie_dict,
verify=parsed_args.insecure,
auth=user,
proxy=proxies,
)

View file

@ -1,11 +1,15 @@
import asyncio
import json
from typing import List, Optional
from typing import Any, List, Optional
import httpx
from loguru import logger
from langflow.base.curl.parse import parse_context
from langflow.custom import CustomComponent
from langflow.field_typing import NestedDict
from langflow.schema import Record
from langflow.schema.dotdict import dotdict
class APIRequest(CustomComponent):
@ -17,10 +21,15 @@ class APIRequest(CustomComponent):
field_config = {
"urls": {"display_name": "URLs", "info": "URLs to make requests to."},
"curl": {
"display_name": "Curl",
"info": "Paste a curl command to populate the fields.",
"refresh_button": True,
"refresh_button_text": "",
},
"method": {
"display_name": "Method",
"info": "The HTTP method to use.",
"field_type": "str",
"options": ["GET", "POST", "PATCH", "PUT"],
"value": "GET",
},
@ -36,12 +45,33 @@ class APIRequest(CustomComponent):
},
"timeout": {
"display_name": "Timeout",
"field_type": "int",
"info": "The timeout to use for the request.",
"value": 5,
},
}
def parse_curl(self, curl: str, build_config: dotdict) -> dotdict:
try:
parsed = parse_context(curl)
build_config["urls"]["value"] = [parsed.url]
build_config["method"]["value"] = parsed.method.upper()
build_config["headers"]["value"] = dict(parsed.headers)
try:
json_data = json.loads(parsed.data)
build_config["body"]["value"] = json_data
except json.JSONDecodeError as e:
print(e)
except Exception as exc:
logger.error(f"Error parsing curl: {exc}")
raise ValueError(f"Error parsing curl: {exc}")
return build_config
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
if field_name == "curl" and field_value is not None:
build_config = self.parse_curl(field_value, build_config)
return build_config
async def make_request(
self,
client: httpx.AsyncClient,
@ -94,21 +124,25 @@ class APIRequest(CustomComponent):
self,
method: str,
urls: List[str],
headers: Optional[Record] = None,
body: Optional[Record] = None,
curl: Optional[str] = None,
headers: Optional[NestedDict] = {},
body: Optional[NestedDict] = {},
timeout: int = 5,
) -> List[Record]:
if headers is None:
headers_dict = {}
else:
elif isinstance(headers, Record):
headers_dict = headers.data
else:
headers_dict = headers
bodies = []
if body:
if isinstance(body, list):
bodies = [b.data for b in body]
if not isinstance(body, list):
bodies = [body]
else:
bodies = [body.data]
bodies = body
bodies = [b.data if isinstance(b, Record) else b for b in bodies] # type: ignore
if len(urls) != len(bodies):
# add bodies with None

View file

@ -0,0 +1,39 @@
import json
import uuid
from typing import Any, Optional
from langflow.custom import CustomComponent
from langflow.schema.dotdict import dotdict
from langflow.schema.schema import Record
class WebhookComponent(CustomComponent):
display_name = "Webhook Input"
description = "Defines a webhook input for the flow."
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
if field_name == "webhook_id":
build_config["webhook_id"]["value"] = uuid.uuid4().hex
return build_config
def build_config(self):
return {
"data": {
"display_name": "Data",
"info": "Use this field to quickly test the webhook component by providing a JSON payload.",
"multiline": True,
}
}
def build(self, data: Optional[str] = "") -> Record:
message = ""
try:
body = json.loads(data or "{}")
except json.JSONDecodeError:
body = {"payload": data}
message = f"Invalid JSON payload. Please check the format.\n\n{data}"
record = Record(data=body)
if not message:
message = json.dumps(body, indent=2)
self.status = message
return record

View file

@ -1,7 +1,8 @@
from .APIRequest import APIRequest
from .Directory import DirectoryComponent
from .File import FileComponent
from .Webhook import WebhookComponent
from .URL import URLComponent
__all__ = ["APIRequest", "DirectoryComponent", "FileComponent", "URLComponent"]
__all__ = ["APIRequest", "DirectoryComponent", "FileComponent", "URLComponent", "WebhookComponent"]

View file

@ -43,7 +43,7 @@ class SplitTextComponent(CustomComponent):
chunks = [chunk[:truncate_size] for chunk in chunks]
for chunk in chunks:
outputs.append(Record(text=chunk, data={"parent": text}))
outputs.append(Record(data={"parent": text, "text": chunk}))
self.status = outputs
return outputs

View file

@ -1,21 +1,11 @@
from typing import Any, Dict, List, Optional, Union
from langchain_community.chat_models.ollama import ChatOllama
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langchain_core.caches import BaseCache
from langflow.field_typing import Text
import asyncio
import json
from typing import Any, Dict, List, Optional
import httpx
from langchain_community.chat_models.ollama import ChatOllama
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class ChatOllamaComponent(LCModelComponent):
@ -26,18 +16,12 @@ class ChatOllamaComponent(LCModelComponent):
field_order = [
"base_url",
"headers",
"keep_alive_flag",
"keep_alive",
"metadata",
"model",
"temperature",
"cache",
"format",
"metadata",
"mirostat",
@ -67,10 +51,7 @@ class ChatOllamaComponent(LCModelComponent):
"base_url": {
"display_name": "Base URL",
"info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
},
"format": {
"display_name": "Format",
"info": "Specify the format of the output (e.g., json)",
@ -79,13 +60,10 @@ class ChatOllamaComponent(LCModelComponent):
"headers": {
"display_name": "Headers",
"advanced": True,
},
"keep_alive_flag": {
"display_name": "Unload interval",
"options": ["Keep", "Immediately","Minute", "Hour", "sec" ],
"options": ["Keep", "Immediately", "Minute", "Hour", "sec"],
"real_time_refresh": True,
"refresh_button": True,
},
@ -93,9 +71,6 @@ class ChatOllamaComponent(LCModelComponent):
"display_name": "interval",
"info": "How long the model will stay loaded into memory.",
},
"model": {
"display_name": "Model Name",
"options": [],
@ -109,14 +84,6 @@ class ChatOllamaComponent(LCModelComponent):
"value": 0.8,
"info": "Controls the creativity of model responses.",
},
"format": {
"display_name": "Format",
"field_type": "str",
"info": "Specify the format of the output (e.g., json).",
"advanced": True,
},
"metadata": {
"display_name": "Metadata",
"info": "Metadata to add to the run trace.",
@ -129,7 +96,6 @@ class ChatOllamaComponent(LCModelComponent):
"advanced": False,
"real_time_refresh": True,
"refresh_button": True,
},
"mirostat_eta": {
"display_name": "Mirostat Eta",
@ -260,10 +226,14 @@ class ChatOllamaComponent(LCModelComponent):
build_config["mirostat_tau"]["value"] = 5
if field_name == "model":
base_url = build_config.get("base_url", {}).get(
"value", "http://localhost:11434")
build_config["model"]["options"] = self.get_model(
base_url + "/api/tags")
base_url_dict = build_config.get("base_url", {})
base_url_load_from_db = base_url_dict.get("load_from_db", False)
base_url_value = base_url_dict.get("value")
if base_url_load_from_db:
base_url_value = self.variables(base_url_value)
elif not base_url_value:
base_url_value = "http://localhost:11434"
build_config["model"]["options"] = self.get_model(base_url_value + "/api/tags")
if field_name == "keep_alive_flag":
if field_value == "Keep":
@ -276,9 +246,6 @@ class ChatOllamaComponent(LCModelComponent):
build_config["keep_alive"]["advanced"] = False
return build_config
def get_model(self, url: str) -> List[str]:
try:
@ -287,8 +254,7 @@ class ChatOllamaComponent(LCModelComponent):
response.raise_for_status()
data = response.json()
model_names = [model['name']
for model in data.get("models", [])]
model_names = [model["name"] for model in data.get("models", [])]
return model_names
except Exception as e:
raise ValueError("Could not retrieve models") from e
@ -299,15 +265,13 @@ class ChatOllamaComponent(LCModelComponent):
base_url: Optional[str],
model: str,
input_value: Text,
mirostat: Optional[str],
mirostat: Optional[str] = "Disabled",
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
repeat_last_n: Optional[int] = None,
verbose: Optional[bool] = None,
keep_alive: Optional[int] = None,
keep_alive_flag: Optional[str] = None,
keep_alive_flag: Optional[str] = "Keep",
num_ctx: Optional[int] = None,
num_gpu: Optional[int] = None,
format: Optional[str] = None,
@ -326,12 +290,9 @@ class ChatOllamaComponent(LCModelComponent):
stream: bool = False,
system_message: Optional[str] = None,
) -> Text:
if not base_url:
base_url = "http://localhost:11434"
if keep_alive_flag == "Minute":
keep_alive_instance = f"{keep_alive}m"
elif keep_alive_flag == "Hour":

View file

@ -78,7 +78,7 @@ class OpenAIModelComponent(LCModelComponent):
self,
input_value: Text,
openai_api_key: str,
temperature: float,
temperature: float = 0.1,
model_name: str = "gpt-4o",
max_tokens: Optional[int] = 256,
model_kwargs: NestedDict = {},

View file

@ -0,0 +1,79 @@
from typing import List, Optional
from langchain_core.embeddings import Embeddings
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.Upstash import UpstashVectorStoreComponent
from langflow.field_typing import Text
from langflow.schema import Record
class UpstashSearchComponent(UpstashVectorStoreComponent, LCVectorStoreComponent):
"""
A custom component for implementing a Vector Store using Upstash.
"""
display_name: str = "Upstash Search"
description: str = "Search an Upstash Vector Store for similar documents."
def build_config(self):
"""
Builds the configuration for the component.
Returns:
- dict: A dictionary containing the configuration options for the component.
"""
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"inputs": {"display_name": "Input", "input_types": ["Document", "Record"]},
"embedding": {
"display_name": "Embedding",
"input_types": ["Embeddings"],
"info": "To use Upstash's embeddings, don't provide an embedding.",
},
"index_url": {
"display_name": "Index URL",
"info": "The URL of the Upstash index.",
},
"index_token": {
"display_name": "Index Token",
"info": "The token for the Upstash index.",
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
"text_key": {
"display_name": "Text Key",
"info": "The key in the record to use as text.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
search_type: str,
text_key: str = "text",
index_url: Optional[str] = None,
index_token: Optional[str] = None,
embedding: Optional[Embeddings] = None,
number_of_results: int = 4,
) -> List[Record]:
vector_store = super().build(
embedding=embedding,
text_key=text_key,
index_url=index_url,
index_token=index_token,
)
if not vector_store:
raise ValueError("Failed to load the Upstash Vector Store.")
return self.search_with_vector_store(
input_value=input_value, search_type=search_type, vector_store=vector_store, k=number_of_results
)

View file

@ -67,22 +67,19 @@ class QdrantComponent(CustomComponent):
documents.append(_input.to_lc_document())
else:
documents.append(_input)
if documents is None:
if not documents:
from qdrant_client import QdrantClient
client = QdrantClient(
location=location,
url=host,
url=url,
port=port,
grpc_port=grpc_port,
https=https,
prefix=prefix,
timeout=timeout,
prefer_grpc=prefer_grpc,
metadata_payload_key=metadata_payload_key,
content_payload_key=content_payload_key,
api_key=api_key,
collection_name=collection_name,
host=host,
path=path,
)
@ -90,6 +87,8 @@ class QdrantComponent(CustomComponent):
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
return vs
else:

View file

@ -0,0 +1,89 @@
from typing import List, Optional, Union
from langchain_community.vectorstores.upstash import UpstashVectorStore
from langchain_core.embeddings import Embeddings
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
class UpstashVectorStoreComponent(CustomComponent):
"""
A custom component for implementing a Vector Store using Upstash.
"""
display_name: str = "Upstash"
description: str = "Create and Utilize an Upstash Vector Store"
def build_config(self):
"""
Builds the configuration for the component.
Returns:
- dict: A dictionary containing the configuration options for the component.
"""
return {
"inputs": {"display_name": "Input", "input_types": ["Document", "Record"]},
"embedding": {
"display_name": "Embedding",
"input_types": ["Embeddings"],
"info": "To use Upstash's embeddings, don't provide an embedding.",
},
"index_url": {
"display_name": "Index URL",
"info": "The URL of the Upstash index.",
},
"index_token": {
"display_name": "Index Token",
"info": "The token for the Upstash index.",
},
"text_key": {
"display_name": "Text Key",
"info": "The key in the record to use as text.",
"advanced": True,
},
}
def build(
self,
inputs: Optional[List[Record]] = None,
text_key: str = "text",
index_url: Optional[str] = None,
index_token: Optional[str] = None,
embedding: Optional[Embeddings] = None,
) -> Union[VectorStore, BaseRetriever]:
documents = []
for _input in inputs or []:
if isinstance(_input, Record):
documents.append(_input.to_lc_document())
else:
documents.append(_input)
use_upstash_embedding = embedding is None
if not documents:
upstash_vs = UpstashVectorStore(
embedding=embedding or use_upstash_embedding,
text_key=text_key,
index_url=index_url,
index_token=index_token,
)
else:
if use_upstash_embedding:
upstash_vs = UpstashVectorStore(
embedding=use_upstash_embedding,
text_key=text_key,
index_url=index_url,
index_token=index_token,
)
upstash_vs.add_documents(documents)
elif embedding:
upstash_vs = UpstashVectorStore.from_documents(
documents=documents, # type: ignore
embedding=embedding,
text_key=text_key,
index_url=index_url,
index_token=index_token,
)
return upstash_vs

View file

@ -159,6 +159,11 @@ def add_new_custom_field(
if field_type == "bool" and field_value is None:
field_value = False
if field_type == "SecretStr":
field_config["password"] = True
field_config["load_from_db"] = True
field_config["input_types"] = ["Text"]
# If options is a list, then it's a dropdown
# If options is None, then it's a list of strings
is_list = isinstance(field_config.get("options"), list)

View file

@ -17,7 +17,10 @@ from langflow.graph.vertex.base import Vertex
from langflow.graph.vertex.types import InterfaceVertex, StateVertex
from langflow.schema import Record
from langflow.schema.schema import INPUT_FIELD_NAME, InputType
from langflow.services.cache.utils import CacheMiss
from langflow.services.chat.service import ChatService
from langflow.services.deps import get_chat_service
from langflow.services.monitor.utils import log_transaction
if TYPE_CHECKING:
from langflow.graph.schema import ResultData
@ -704,7 +707,7 @@ class Graph:
async def build_vertex(
self,
lock: asyncio.Lock,
set_cache_coro: Callable[["Graph", asyncio.Lock], Coroutine],
chat_service: ChatService,
vertex_id: str,
inputs_dict: Optional[Dict[str, str]] = None,
files: Optional[list[str]] = None,
@ -740,13 +743,14 @@ class Graph:
result_dict = vertex.result
else:
raise ValueError(f"No result found for vertex {vertex_id}")
set_cache_coro = partial(chat_service.set_cache, key=self.flow_id)
next_runnable_vertices, top_level_vertices = await self.get_next_and_top_level_vertices(
lock, set_cache_coro, vertex
)
return next_runnable_vertices, top_level_vertices, result_dict, params, valid, log_type, vertex
except Exception as exc:
logger.exception(f"Error building vertex: {exc}")
log_transaction(vertex, status="failure", error=str(exc))
raise exc
async def get_next_and_top_level_vertices(
@ -811,11 +815,10 @@ class Graph:
for vertex_id in current_batch:
vertex = self.get_vertex(vertex_id)
lock = chat_service._cache_locks[self.run_id]
set_cache_coro = partial(chat_service.set_cache, flow_id=self.run_id)
task = asyncio.create_task(
self.build_vertex(
lock=lock,
set_cache_coro=set_cache_coro,
chat_service=chat_service,
vertex_id=vertex_id,
user_id=self.user_id,
inputs_dict={},

View file

@ -15,6 +15,7 @@ class ResultData(BaseModel):
duration: Optional[str] = None
component_display_name: Optional[str] = None
component_id: Optional[str] = None
used_frozen_result: Optional[bool] = False
@field_serializer("results")
def serialize_results(self, value):

View file

@ -708,7 +708,8 @@ class Vertex:
self._finalize_build()
return await self.get_requester_result(requester)
result = await self.get_requester_result(requester)
return result
async def get_requester_result(self, requester: Optional["Vertex"]):
# If the requester is None, this means that

View file

@ -1,13 +1,14 @@
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple, Type, Union, cast
from uuid import UUID
from fastapi import Depends, HTTPException
from pydantic.v1 import BaseModel, Field, create_model
from sqlmodel import select
from sqlmodel import Session, select
from langflow.graph.schema import RunOutputs
from langflow.schema.schema import INPUT_FIELD_NAME, Record
from langflow.services.database.models.flow.model import Flow
from langflow.services.deps import session_scope
from langflow.services.database.models.flow import Flow
from langflow.services.deps import get_session, session_scope
if TYPE_CHECKING:
from langflow.graph.graph.base import Graph
@ -235,3 +236,22 @@ def get_arg_names(inputs: List["Vertex"]) -> List[dict[str, str]]:
{"component_name": input_.display_name, "arg_name": input_.display_name.lower().replace(" ", "_")}
for input_ in inputs
]
def get_flow_by_id_or_endpoint_name(
flow_id_or_name: str, db: Session = Depends(get_session), user_id: Optional[UUID] = None
) -> Flow:
endpoint_name = None
try:
flow_id = UUID(flow_id_or_name)
flow = db.get(Flow, flow_id)
except ValueError:
endpoint_name = flow_id_or_name
stmt = select(Flow).where(Flow.name == endpoint_name)
if user_id:
stmt = stmt.where(Flow.user_id == user_id)
flow = db.exec(stmt).first()
if flow is None:
raise HTTPException(status_code=404, detail=f"Flow identifier {flow_id_or_name} not found")
return flow

View file

@ -1,7 +1,10 @@
import logging
import os
from collections import defaultdict
from copy import deepcopy
from datetime import datetime, timezone
from pathlib import Path
from uuid import UUID
import orjson
from emoji import demojize, purely_emoji # type: ignore
@ -10,10 +13,16 @@ from sqlmodel import select
from langflow.base.constants import FIELD_FORMAT_ATTRIBUTES, NODE_FORMAT_ATTRIBUTES
from langflow.interface.types import get_all_components
from langflow.services.auth.utils import create_super_user
from langflow.services.database.models.flow.model import Flow, FlowCreate
from langflow.services.database.models.folder.model import Folder, FolderCreate
from langflow.services.database.models.user.crud import get_user_by_username
from langflow.services.deps import get_settings_service, session_scope
from langflow.services.database.models.folder.utils import create_default_folder_if_it_doesnt_exist
from langflow.services.deps import get_settings_service, session_scope, get_variable_service
STARTER_FOLDER_NAME = "Starter Projects"
STARTER_FOLDER_DESCRIPTION = "Starter projects to help you get started in Langflow."
@ -205,6 +214,63 @@ def create_starter_folder(session):
return session.exec(select(Folder).where(Folder.name == STARTER_FOLDER_NAME)).first()
def _is_valid_uuid(val):
try:
uuid_obj = UUID(val)
except ValueError:
return False
return str(uuid_obj) == val
def load_flows_from_directory():
settings_service = get_settings_service()
flows_path = settings_service.settings.load_flows_path
if not flows_path:
return
if not settings_service.auth_settings.AUTO_LOGIN:
logging.warning("AUTO_LOGIN is disabled, not loading flows from directory")
return
with session_scope() as session:
user_id = get_user_by_username(session, settings_service.auth_settings.SUPERUSER).id
files = [f for f in os.listdir(flows_path) if os.path.isfile(os.path.join(flows_path, f))]
for filename in files:
if not filename.endswith(".json"):
continue
logger.info(f"Loading flow from file: {filename}")
with open(os.path.join(flows_path, filename), "r", encoding="utf-8") as file:
flow = orjson.loads(file.read())
no_json_name = filename.replace(".json", "")
flow_endpoint_name = flow.get("endpoint_name")
if _is_valid_uuid(no_json_name):
flow["id"] = no_json_name
flow_id = flow.get("id")
existing = find_existing_flow(session, flow_id, flow_endpoint_name)
if existing:
logger.info(f"Updating existing flow: {flow_id} with endpoint name {flow_endpoint_name}")
for key, value in flow.items():
setattr(existing, key, value)
existing.updated_at = datetime.utcnow()
existing.user_id = user_id
session.add(existing)
session.commit()
else:
logger.info(f"Creating new flow: {flow_id} with endpoint name {flow_endpoint_name}")
flow["user_id"] = user_id
flow = Flow.model_validate(flow, from_attributes=True)
flow.updated_at = datetime.utcnow()
session.add(flow)
session.commit()
def find_existing_flow(session, flow_id, flow_endpoint_name):
if flow_endpoint_name:
stmt = select(Flow).where(Flow.endpoint_name == flow_endpoint_name)
if existing := session.exec(stmt).first():
return existing
stmt = select(Flow).where(Flow.id == flow_id)
if existing := session.exec(stmt).first():
return existing
return None
def create_or_update_starter_projects():
components_paths = get_settings_service().settings.components_path
try:
@ -249,3 +315,20 @@ def create_or_update_starter_projects():
project_icon_bg_color,
new_folder.id,
)
def initialize_super_user_if_needed():
settings_service = get_settings_service()
if not settings_service.auth_settings.AUTO_LOGIN:
return
username = settings_service.auth_settings.SUPERUSER
password = settings_service.auth_settings.SUPERUSER_PASSWORD
if not username or not password:
raise ValueError("SUPERUSER and SUPERUSER_PASSWORD must be set in the settings if AUTO_LOGIN is true.")
with session_scope() as session:
super_user = create_super_user(db=session, username=username, password=password)
get_variable_service().initialize_user_variables(super_user.id, session)
create_default_folder_if_it_doesnt_exist(session, super_user.id)
session.commit()
logger.info("Super user initialized")

View file

@ -45,7 +45,9 @@
"name": "template",
"display_name": "Template",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -84,14 +86,22 @@
"is_input": null,
"is_output": null,
"is_composition": null,
"base_classes": ["object", "str", "Text"],
"base_classes": [
"object",
"str",
"Text"
],
"name": "",
"display_name": "Prompt",
"documentation": "",
"custom_fields": {
"template": ["user_input"]
"template": [
"user_input"
]
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"full_path": null,
"field_formatters": {},
"frozen": false,
@ -140,7 +150,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"code": {
"type": "code",
@ -149,7 +161,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -223,7 +235,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_base": {
"type": "str",
@ -242,7 +256,9 @@
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_key": {
"type": "str",
@ -261,7 +277,9 @@
"info": "The OpenAI API Key to use for the OpenAI model.",
"load_from_db": true,
"title_case": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"value": "OPENAI_API_KEY"
},
"stream": {
@ -300,11 +318,13 @@
"info": "System message to pass to the model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"temperature": {
"type": "float",
"required": true,
"required": false,
"placeholder": "",
"list": false,
"show": true,
@ -331,7 +351,11 @@
},
"description": "Generates text using OpenAI LLMs.",
"icon": "OpenAI",
"base_classes": ["object", "Text", "str"],
"base_classes": [
"object",
"Text",
"str"
],
"display_name": "OpenAI",
"documentation": "",
"custom_fields": {
@ -345,7 +369,9 @@
"stream": null,
"system_message": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [
@ -416,7 +442,9 @@
"name": "input_value",
"display_name": "Message",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -440,7 +468,9 @@
"info": "In case of Message being a Record, this template will be used to convert it to text.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"return_record": {
"type": "bool",
@ -472,7 +502,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User"],
"options": [
"Machine",
"User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": true,
@ -480,7 +513,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -500,7 +535,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -519,13 +556,20 @@
"info": "If provided, the message will be stored in the memory.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Display a chat message in the Playground.",
"icon": "ChatOutput",
"base_classes": ["Record", "Text", "str", "object"],
"base_classes": [
"Record",
"Text",
"str",
"object"
],
"display_name": "Chat Output",
"documentation": "",
"custom_fields": {
@ -536,7 +580,10 @@
"return_record": null,
"record_template": null
},
"output_types": ["Text", "Record"],
"output_types": [
"Text",
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -632,7 +679,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User"],
"options": [
"Machine",
"User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": true,
@ -640,7 +690,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -660,7 +712,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -679,13 +733,20 @@
"info": "If provided, the message will be stored in the memory.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Get chat inputs from the Playground.",
"icon": "ChatInput",
"base_classes": ["object", "Record", "str", "Text"],
"base_classes": [
"object",
"Record",
"str",
"Text"
],
"display_name": "Chat Input",
"documentation": "",
"custom_fields": {
@ -695,7 +756,10 @@
"session_id": null,
"return_record": null
},
"output_types": ["Text", "Record"],
"output_types": [
"Text",
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -716,18 +780,24 @@
"edges": [
{
"source": "OpenAIModel-k39HS",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153Text\u0153,\u0153str\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-k39HS\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-k39HSœ}",
"target": "ChatOutput-njtka",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-njtka\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-njtkaœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-njtka",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "Text", "str"],
"baseClasses": [
"object",
"Text",
"str"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-k39HS"
}
@ -736,22 +806,28 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-k39HS{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153Text\u0153,\u0153str\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-k39HS\u0153}-ChatOutput-njtka{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-njtka\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-OpenAIModel-k39HS{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-k39HSœ}-ChatOutput-njtka{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-njtkaœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-uxBqP",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-uxBqP\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-uxBqPœ}",
"target": "OpenAIModel-k39HS",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-k39HS\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-k39HSœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-k39HS",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "str", "Text"],
"baseClasses": [
"object",
"str",
"Text"
],
"dataType": "Prompt",
"id": "Prompt-uxBqP"
}
@ -760,22 +836,32 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-uxBqP{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-uxBqP\u0153}-OpenAIModel-k39HS{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-k39HS\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-Prompt-uxBqP{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-uxBqPœ}-OpenAIModel-k39HS{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-k39HSœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "ChatInput-P3fgL",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153Record\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153ChatInput\u0153,\u0153id\u0153:\u0153ChatInput-P3fgL\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œRecordœ,œstrœ,œTextœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-P3fgLœ}",
"target": "Prompt-uxBqP",
"targetHandle": "{\u0153fieldName\u0153:\u0153user_input\u0153,\u0153id\u0153:\u0153Prompt-uxBqP\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œuser_inputœ,œidœ:œPrompt-uxBqPœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "user_input",
"id": "Prompt-uxBqP",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"],
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "Record", "str", "Text"],
"baseClasses": [
"object",
"Record",
"str",
"Text"
],
"dataType": "ChatInput",
"id": "ChatInput-P3fgL"
}
@ -784,7 +870,7 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-ChatInput-P3fgL{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153Record\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153ChatInput\u0153,\u0153id\u0153:\u0153ChatInput-P3fgL\u0153}-Prompt-uxBqP{\u0153fieldName\u0153:\u0153user_input\u0153,\u0153id\u0153:\u0153Prompt-uxBqP\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-ChatInput-P3fgL{œbaseClassesœ:[œobjectœ,œRecordœ,œstrœ,œTextœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-P3fgLœ}-Prompt-uxBqP{œfieldNameœ:œuser_inputœ,œidœ:œPrompt-uxBqPœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
}
],
"viewport": {
@ -797,4 +883,4 @@
"name": "Basic Prompting (Hello, World)",
"last_tested_version": "1.0.0a4",
"is_component": false
}
}

View file

@ -45,7 +45,9 @@
"name": "template",
"display_name": "Template",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -136,14 +138,24 @@
"is_input": null,
"is_output": null,
"is_composition": null,
"base_classes": ["object", "Text", "str"],
"base_classes": [
"object",
"Text",
"str"
],
"name": "",
"display_name": "Prompt",
"documentation": "",
"custom_fields": {
"template": ["reference_1", "reference_2", "instructions"]
"template": [
"reference_1",
"reference_2",
"instructions"
]
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"full_path": null,
"field_formatters": {},
"frozen": false,
@ -210,7 +222,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"value": [
"https://www.promptingguide.ai/techniques/prompt_chaining"
]
@ -219,13 +233,17 @@
},
"description": "Fetch content from one or more URLs.",
"icon": "layout-template",
"base_classes": ["Record"],
"base_classes": [
"Record"
],
"display_name": "URL",
"documentation": "",
"custom_fields": {
"urls": null
},
"output_types": ["Record"],
"output_types": [
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -284,7 +302,9 @@
"name": "input_value",
"display_name": "Message",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -308,7 +328,9 @@
"info": "In case of Message being a Record, this template will be used to convert it to text.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"return_record": {
"type": "bool",
@ -340,7 +362,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User"],
"options": [
"Machine",
"User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": true,
@ -348,7 +373,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -368,7 +395,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -387,13 +416,20 @@
"info": "If provided, the message will be stored in the memory.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Display a chat message in the Playground.",
"icon": "ChatOutput",
"base_classes": ["Text", "Record", "object", "str"],
"base_classes": [
"Text",
"Record",
"object",
"str"
],
"display_name": "Chat Output",
"documentation": "",
"custom_fields": {
@ -404,7 +440,10 @@
"return_record": null,
"record_template": null
},
"output_types": ["Text", "Record"],
"output_types": [
"Text",
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -444,7 +483,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"code": {
"type": "code",
@ -453,7 +494,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -527,7 +568,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_base": {
"type": "str",
@ -546,7 +589,9 @@
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_key": {
"type": "str",
@ -565,7 +610,9 @@
"info": "The OpenAI API Key to use for the OpenAI model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"value": "OPENAI_API_KEY"
},
"stream": {
@ -604,11 +651,13 @@
"info": "System message to pass to the model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"temperature": {
"type": "float",
"required": true,
"required": false,
"placeholder": "",
"list": false,
"show": true,
@ -635,7 +684,11 @@
},
"description": "Generates text using OpenAI LLMs.",
"icon": "OpenAI",
"base_classes": ["str", "Text", "object"],
"base_classes": [
"str",
"Text",
"object"
],
"display_name": "OpenAI",
"documentation": "",
"custom_fields": {
@ -649,7 +702,9 @@
"stream": null,
"system_message": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [
@ -722,20 +777,28 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"],
"value": ["https://www.promptingguide.ai/introduction/basics"]
"input_types": [
"Text"
],
"value": [
"https://www.promptingguide.ai/introduction/basics"
]
},
"_type": "CustomComponent"
},
"description": "Fetch content from one or more URLs.",
"icon": "layout-template",
"base_classes": ["Record"],
"base_classes": [
"Record"
],
"display_name": "URL",
"documentation": "",
"custom_fields": {
"urls": null
},
"output_types": ["Record"],
"output_types": [
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -795,7 +858,10 @@
"name": "input_value",
"display_name": "Value",
"advanced": false,
"input_types": ["Record", "Text"],
"input_types": [
"Record",
"Text"
],
"dynamic": false,
"info": "Text or Record to be passed as input.",
"load_from_db": false,
@ -819,20 +885,28 @@
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Get text inputs from the Playground.",
"icon": "type",
"base_classes": ["object", "Text", "str"],
"base_classes": [
"object",
"Text",
"str"
],
"display_name": "Instructions",
"documentation": "",
"custom_fields": {
"input_value": null,
"record_template": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -854,18 +928,25 @@
{
"source": "URL-HYPkR",
"target": "Prompt-Rse03",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153Record\u0153],\u0153dataType\u0153:\u0153URL\u0153,\u0153id\u0153:\u0153URL-HYPkR\u0153}",
"targetHandle": "{\u0153fieldName\u0153:\u0153reference_2\u0153,\u0153id\u0153:\u0153Prompt-Rse03\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"id": "reactflow__edge-URL-HYPkR{\u0153baseClasses\u0153:[\u0153Record\u0153],\u0153dataType\u0153:\u0153URL\u0153,\u0153id\u0153:\u0153URL-HYPkR\u0153}-Prompt-Rse03{\u0153fieldName\u0153:\u0153reference_2\u0153,\u0153id\u0153:\u0153Prompt-Rse03\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"sourceHandle": "{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-HYPkRœ}",
"targetHandle": "{œfieldNameœ:œreference_2œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"id": "reactflow__edge-URL-HYPkR{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-HYPkRœ}-Prompt-Rse03{œfieldNameœ:œreference_2œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "reference_2",
"id": "Prompt-Rse03",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"],
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["Record"],
"baseClasses": [
"Record"
],
"dataType": "URL",
"id": "URL-HYPkR"
}
@ -878,18 +959,24 @@
},
{
"source": "OpenAIModel-gi29P",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-gi29P\u0153}",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-gi29Pœ}",
"target": "ChatOutput-JPlxl",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-JPlxl\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-JPlxlœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-JPlxl",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["str", "Text", "object"],
"baseClasses": [
"str",
"Text",
"object"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-gi29P"
}
@ -898,22 +985,29 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-gi29P{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-gi29P\u0153}-ChatOutput-JPlxl{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-JPlxl\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-OpenAIModel-gi29P{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-gi29Pœ}-ChatOutput-JPlxl{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-JPlxlœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "URL-2cX90",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153Record\u0153],\u0153dataType\u0153:\u0153URL\u0153,\u0153id\u0153:\u0153URL-2cX90\u0153}",
"sourceHandle": "{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-2cX90œ}",
"target": "Prompt-Rse03",
"targetHandle": "{\u0153fieldName\u0153:\u0153reference_1\u0153,\u0153id\u0153:\u0153Prompt-Rse03\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œreference_1œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "reference_1",
"id": "Prompt-Rse03",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"],
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["Record"],
"baseClasses": [
"Record"
],
"dataType": "URL",
"id": "URL-2cX90"
}
@ -922,22 +1016,31 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-URL-2cX90{\u0153baseClasses\u0153:[\u0153Record\u0153],\u0153dataType\u0153:\u0153URL\u0153,\u0153id\u0153:\u0153URL-2cX90\u0153}-Prompt-Rse03{\u0153fieldName\u0153:\u0153reference_1\u0153,\u0153id\u0153:\u0153Prompt-Rse03\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-URL-2cX90{œbaseClassesœ:[œRecordœ],œdataTypeœ:œURLœ,œidœ:œURL-2cX90œ}-Prompt-Rse03{œfieldNameœ:œreference_1œ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "TextInput-og8Or",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153Text\u0153,\u0153str\u0153],\u0153dataType\u0153:\u0153TextInput\u0153,\u0153id\u0153:\u0153TextInput-og8Or\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-og8Orœ}",
"target": "Prompt-Rse03",
"targetHandle": "{\u0153fieldName\u0153:\u0153instructions\u0153,\u0153id\u0153:\u0153Prompt-Rse03\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinstructionsœ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "instructions",
"id": "Prompt-Rse03",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"],
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "Text", "str"],
"baseClasses": [
"object",
"Text",
"str"
],
"dataType": "TextInput",
"id": "TextInput-og8Or"
}
@ -946,22 +1049,28 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-TextInput-og8Or{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153Text\u0153,\u0153str\u0153],\u0153dataType\u0153:\u0153TextInput\u0153,\u0153id\u0153:\u0153TextInput-og8Or\u0153}-Prompt-Rse03{\u0153fieldName\u0153:\u0153instructions\u0153,\u0153id\u0153:\u0153Prompt-Rse03\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-TextInput-og8Or{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-og8Orœ}-Prompt-Rse03{œfieldNameœ:œinstructionsœ,œidœ:œPrompt-Rse03œ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-Rse03",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153Text\u0153,\u0153str\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-Rse03\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-Rse03œ}",
"target": "OpenAIModel-gi29P",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-gi29P\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-gi29Pœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-gi29P",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "Text", "str"],
"baseClasses": [
"object",
"Text",
"str"
],
"dataType": "Prompt",
"id": "Prompt-Rse03"
}
@ -970,7 +1079,7 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-Rse03{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153Text\u0153,\u0153str\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-Rse03\u0153}-OpenAIModel-gi29P{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-gi29P\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"id": "reactflow__edge-Prompt-Rse03{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-Rse03œ}-OpenAIModel-gi29P{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-gi29Pœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"selected": false
}
],
@ -984,4 +1093,4 @@
"name": "Blog Writer",
"last_tested_version": "1.0.0a0",
"is_component": false
}
}

View file

@ -45,7 +45,9 @@
"name": "template",
"display_name": "Template",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -110,14 +112,23 @@
"is_input": null,
"is_output": null,
"is_composition": null,
"base_classes": ["object", "str", "Text"],
"base_classes": [
"object",
"str",
"Text"
],
"name": "",
"display_name": "Prompt",
"documentation": "",
"custom_fields": {
"template": ["Document", "Question"]
"template": [
"Document",
"Question"
]
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"full_path": null,
"field_formatters": {},
"frozen": false,
@ -220,14 +231,18 @@
"_type": "CustomComponent"
},
"description": "A generic file loader.",
"base_classes": ["Record"],
"base_classes": [
"Record"
],
"display_name": "Files",
"documentation": "",
"custom_fields": {
"path": null,
"silent_errors": null
},
"output_types": ["Record"],
"output_types": [
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -323,7 +338,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User"],
"options": [
"Machine",
"User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": true,
@ -331,7 +349,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -351,7 +371,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -370,13 +392,20 @@
"info": "If provided, the message will be stored in the memory.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Get chat inputs from the Playground.",
"icon": "ChatInput",
"base_classes": ["str", "Record", "Text", "object"],
"base_classes": [
"str",
"Record",
"Text",
"object"
],
"display_name": "Chat Input",
"documentation": "",
"custom_fields": {
@ -386,7 +415,10 @@
"session_id": null,
"return_record": null
},
"output_types": ["Text", "Record"],
"output_types": [
"Text",
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -445,7 +477,9 @@
"name": "input_value",
"display_name": "Message",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -481,7 +515,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User"],
"options": [
"Machine",
"User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": true,
@ -489,7 +526,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -509,7 +548,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -528,13 +569,20 @@
"info": "If provided, the message will be stored in the memory.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Display a chat message in the Playground.",
"icon": "ChatOutput",
"base_classes": ["str", "Record", "Text", "object"],
"base_classes": [
"str",
"Record",
"Text",
"object"
],
"display_name": "Chat Output",
"documentation": "",
"custom_fields": {
@ -544,7 +592,10 @@
"session_id": null,
"return_record": null
},
"output_types": ["Text", "Record"],
"output_types": [
"Text",
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -589,7 +640,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"code": {
"type": "code",
@ -598,7 +651,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -672,7 +725,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_base": {
"type": "str",
@ -691,7 +746,9 @@
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_key": {
"type": "str",
@ -710,7 +767,9 @@
"info": "The OpenAI API Key to use for the OpenAI model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"value": "OPENAI_API_KEY"
},
"stream": {
@ -749,11 +808,13 @@
"info": "System message to pass to the model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"temperature": {
"type": "float",
"required": true,
"required": false,
"placeholder": "",
"list": false,
"show": true,
@ -780,7 +841,11 @@
},
"description": "Generates text using OpenAI LLMs.",
"icon": "OpenAI",
"base_classes": ["object", "str", "Text"],
"base_classes": [
"object",
"str",
"Text"
],
"display_name": "OpenAI",
"documentation": "",
"custom_fields": {
@ -794,7 +859,9 @@
"stream": null,
"system_message": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [
@ -825,18 +892,28 @@
"edges": [
{
"source": "ChatInput-MsSJ9",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Record\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153ChatInput\u0153,\u0153id\u0153:\u0153ChatInput-MsSJ9\u0153}",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œRecordœ,œTextœ,œobjectœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-MsSJ9œ}",
"target": "Prompt-tHwPf",
"targetHandle": "{\u0153fieldName\u0153:\u0153Question\u0153,\u0153id\u0153:\u0153Prompt-tHwPf\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œQuestionœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "Question",
"id": "Prompt-tHwPf",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"],
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["str", "Record", "Text", "object"],
"baseClasses": [
"str",
"Record",
"Text",
"object"
],
"dataType": "ChatInput",
"id": "ChatInput-MsSJ9"
}
@ -845,22 +922,29 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-ChatInput-MsSJ9{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Record\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153ChatInput\u0153,\u0153id\u0153:\u0153ChatInput-MsSJ9\u0153}-Prompt-tHwPf{\u0153fieldName\u0153:\u0153Question\u0153,\u0153id\u0153:\u0153Prompt-tHwPf\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-ChatInput-MsSJ9{œbaseClassesœ:[œstrœ,œRecordœ,œTextœ,œobjectœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-MsSJ9œ}-Prompt-tHwPf{œfieldNameœ:œQuestionœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "File-6TEsD",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153Record\u0153],\u0153dataType\u0153:\u0153File\u0153,\u0153id\u0153:\u0153File-6TEsD\u0153}",
"sourceHandle": "{œbaseClassesœ:[œRecordœ],œdataTypeœ:œFileœ,œidœ:œFile-6TEsDœ}",
"target": "Prompt-tHwPf",
"targetHandle": "{\u0153fieldName\u0153:\u0153Document\u0153,\u0153id\u0153:\u0153Prompt-tHwPf\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œDocumentœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "Document",
"id": "Prompt-tHwPf",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"],
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["Record"],
"baseClasses": [
"Record"
],
"dataType": "File",
"id": "File-6TEsD"
}
@ -869,22 +953,28 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-File-6TEsD{\u0153baseClasses\u0153:[\u0153Record\u0153],\u0153dataType\u0153:\u0153File\u0153,\u0153id\u0153:\u0153File-6TEsD\u0153}-Prompt-tHwPf{\u0153fieldName\u0153:\u0153Document\u0153,\u0153id\u0153:\u0153Prompt-tHwPf\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-File-6TEsD{œbaseClassesœ:[œRecordœ],œdataTypeœ:œFileœ,œidœ:œFile-6TEsDœ}-Prompt-tHwPf{œfieldNameœ:œDocumentœ,œidœ:œPrompt-tHwPfœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-tHwPf",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-tHwPf\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-tHwPfœ}",
"target": "OpenAIModel-Bt067",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-Bt067\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-Bt067œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-Bt067",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "str", "Text"],
"baseClasses": [
"object",
"str",
"Text"
],
"dataType": "Prompt",
"id": "Prompt-tHwPf"
}
@ -893,22 +983,28 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-tHwPf{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-tHwPf\u0153}-OpenAIModel-Bt067{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-Bt067\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-Prompt-tHwPf{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-tHwPfœ}-OpenAIModel-Bt067{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-Bt067œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "OpenAIModel-Bt067",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-Bt067\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-Bt067œ}",
"target": "ChatOutput-F5Awj",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-F5Awj\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-F5Awjœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-F5Awj",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "str", "Text"],
"baseClasses": [
"object",
"str",
"Text"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-Bt067"
}
@ -917,7 +1013,7 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-Bt067{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-Bt067\u0153}-ChatOutput-F5Awj{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-F5Awj\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-OpenAIModel-Bt067{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-Bt067œ}-ChatOutput-F5Awj{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-F5Awjœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
}
],
"viewport": {
@ -930,4 +1026,4 @@
"name": "Document QA",
"last_tested_version": "1.0.0a0",
"is_component": false
}
}

View file

@ -1,6 +1,6 @@
{
"id": "08d5cccf-d098-4367-b14b-1078429c9ed9",
"icon": "\ud83e\udd16",
"icon": "🤖",
"icon_bg_color": "#FFD700",
"data": {
"nodes": [
@ -83,7 +83,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User"],
"options": [
"Machine",
"User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": true,
@ -91,7 +94,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -111,7 +116,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -130,14 +137,21 @@
"info": "If provided, the message will be stored in the memory.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"value": "MySessionID"
},
"_type": "CustomComponent"
},
"description": "Get chat inputs from the Playground.",
"icon": "ChatInput",
"base_classes": ["Text", "object", "Record", "str"],
"base_classes": [
"Text",
"object",
"Record",
"str"
],
"display_name": "Chat Input",
"documentation": "",
"custom_fields": {
@ -147,7 +161,10 @@
"session_id": null,
"return_record": null
},
"output_types": ["Text", "Record"],
"output_types": [
"Text",
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -206,7 +223,9 @@
"name": "input_value",
"display_name": "Message",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -242,7 +261,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User"],
"options": [
"Machine",
"User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": true,
@ -250,7 +272,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -270,7 +294,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -289,14 +315,21 @@
"info": "If provided, the message will be stored in the memory.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"value": "MySessionID"
},
"_type": "CustomComponent"
},
"description": "Display a chat message in the Playground.",
"icon": "ChatOutput",
"base_classes": ["Text", "object", "Record", "str"],
"base_classes": [
"Text",
"object",
"Record",
"str"
],
"display_name": "Chat Output",
"documentation": "",
"custom_fields": {
@ -306,7 +339,10 @@
"session_id": null,
"return_record": null
},
"output_types": ["Text", "Record"],
"output_types": [
"Text",
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -382,7 +418,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Ascending", "Descending"],
"options": [
"Ascending",
"Descending"
],
"name": "order",
"display_name": "Order",
"advanced": true,
@ -390,7 +429,9 @@
"info": "Order of the messages.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"record_template": {
"type": "str",
@ -410,7 +451,9 @@
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender": {
"type": "str",
@ -423,7 +466,11 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User", "Machine and User"],
"options": [
"Machine",
"User",
"Machine and User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": false,
@ -431,7 +478,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -450,7 +499,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -465,7 +516,9 @@
"name": "session_id",
"display_name": "Session ID",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "Session ID of the chat history.",
"load_from_db": false,
@ -476,7 +529,11 @@
},
"description": "Retrieves stored chat messages given a specific Session ID.",
"icon": "history",
"base_classes": ["str", "Text", "object"],
"base_classes": [
"str",
"Text",
"object"
],
"display_name": "Chat Memory",
"documentation": "",
"custom_fields": {
@ -487,7 +544,9 @@
"order": null,
"record_template": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -549,7 +608,9 @@
"name": "template",
"display_name": "Template",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -614,14 +675,23 @@
"is_input": null,
"is_output": null,
"is_composition": null,
"base_classes": ["Text", "str", "object"],
"base_classes": [
"Text",
"str",
"object"
],
"name": "",
"display_name": "Prompt",
"documentation": "",
"custom_fields": {
"template": ["context", "user_message"]
"template": [
"context",
"user_message"
]
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"full_path": null,
"field_formatters": {},
"frozen": false,
@ -670,7 +740,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"code": {
"type": "code",
@ -679,7 +751,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -753,7 +825,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_base": {
"type": "str",
@ -772,7 +846,9 @@
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_key": {
"type": "str",
@ -791,7 +867,9 @@
"info": "The OpenAI API Key to use for the OpenAI model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"value": "OPENAI_API_KEY"
},
"stream": {
@ -830,11 +908,13 @@
"info": "System message to pass to the model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"temperature": {
"type": "float",
"required": true,
"required": false,
"placeholder": "",
"list": false,
"show": true,
@ -861,7 +941,11 @@
},
"description": "Generates text using OpenAI LLMs.",
"icon": "OpenAI",
"base_classes": ["str", "object", "Text"],
"base_classes": [
"str",
"object",
"Text"
],
"display_name": "OpenAI",
"documentation": "",
"custom_fields": {
@ -875,7 +959,9 @@
"stream": null,
"system_message": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [
@ -927,7 +1013,10 @@
"name": "input_value",
"display_name": "Value",
"advanced": false,
"input_types": ["Record", "Text"],
"input_types": [
"Record",
"Text"
],
"dynamic": false,
"info": "Text or Record to be passed as output.",
"load_from_db": false,
@ -969,20 +1058,28 @@
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Display a text output in the Playground.",
"icon": "type",
"base_classes": ["str", "object", "Text"],
"base_classes": [
"str",
"object",
"Text"
],
"display_name": "Inspect Memory",
"documentation": "",
"custom_fields": {
"input_value": null,
"record_template": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -1003,18 +1100,27 @@
"edges": [
{
"source": "MemoryComponent-cdA1J",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153MemoryComponent\u0153,\u0153id\u0153:\u0153MemoryComponent-cdA1J\u0153}",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}",
"target": "Prompt-ODkUx",
"targetHandle": "{\u0153fieldName\u0153:\u0153context\u0153,\u0153id\u0153:\u0153Prompt-ODkUx\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œcontextœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "context",
"type": "str",
"id": "Prompt-ODkUx",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"]
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
]
},
"sourceHandle": {
"baseClasses": ["str", "Text", "object"],
"baseClasses": [
"str",
"Text",
"object"
],
"dataType": "MemoryComponent",
"id": "MemoryComponent-cdA1J"
}
@ -1023,23 +1129,33 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-MemoryComponent-cdA1J{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153MemoryComponent\u0153,\u0153id\u0153:\u0153MemoryComponent-cdA1J\u0153}-Prompt-ODkUx{\u0153fieldName\u0153:\u0153context\u0153,\u0153id\u0153:\u0153Prompt-ODkUx\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"id": "reactflow__edge-MemoryComponent-cdA1J{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}-Prompt-ODkUx{œfieldNameœ:œcontextœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"selected": false
},
{
"source": "ChatInput-t7F8v",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153Text\u0153,\u0153object\u0153,\u0153Record\u0153,\u0153str\u0153],\u0153dataType\u0153:\u0153ChatInput\u0153,\u0153id\u0153:\u0153ChatInput-t7F8v\u0153}",
"sourceHandle": "{œbaseClassesœ:[œTextœ,œobjectœ,œRecordœ,œstrœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-t7F8vœ}",
"target": "Prompt-ODkUx",
"targetHandle": "{\u0153fieldName\u0153:\u0153user_message\u0153,\u0153id\u0153:\u0153Prompt-ODkUx\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œuser_messageœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "user_message",
"type": "str",
"id": "Prompt-ODkUx",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"]
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
]
},
"sourceHandle": {
"baseClasses": ["Text", "object", "Record", "str"],
"baseClasses": [
"Text",
"object",
"Record",
"str"
],
"dataType": "ChatInput",
"id": "ChatInput-t7F8v"
}
@ -1048,23 +1164,29 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-ChatInput-t7F8v{\u0153baseClasses\u0153:[\u0153Text\u0153,\u0153object\u0153,\u0153Record\u0153,\u0153str\u0153],\u0153dataType\u0153:\u0153ChatInput\u0153,\u0153id\u0153:\u0153ChatInput-t7F8v\u0153}-Prompt-ODkUx{\u0153fieldName\u0153:\u0153user_message\u0153,\u0153id\u0153:\u0153Prompt-ODkUx\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"id": "reactflow__edge-ChatInput-t7F8v{œbaseClassesœ:[œTextœ,œobjectœ,œRecordœ,œstrœ],œdataTypeœ:œChatInputœ,œidœ:œChatInput-t7F8vœ}-Prompt-ODkUx{œfieldNameœ:œuser_messageœ,œidœ:œPrompt-ODkUxœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"selected": false
},
{
"source": "Prompt-ODkUx",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153Text\u0153,\u0153str\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-ODkUx\u0153}",
"sourceHandle": "{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-ODkUxœ}",
"target": "OpenAIModel-9RykF",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-9RykF\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-9RykFœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-9RykF",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["Text", "str", "object"],
"baseClasses": [
"Text",
"str",
"object"
],
"dataType": "Prompt",
"id": "Prompt-ODkUx"
}
@ -1073,22 +1195,28 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-ODkUx{\u0153baseClasses\u0153:[\u0153Text\u0153,\u0153str\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-ODkUx\u0153}-OpenAIModel-9RykF{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-9RykF\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-Prompt-ODkUx{œbaseClassesœ:[œTextœ,œstrœ,œobjectœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-ODkUxœ}-OpenAIModel-9RykF{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-9RykFœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "OpenAIModel-9RykF",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153object\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-9RykF\u0153}",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œobjectœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-9RykFœ}",
"target": "ChatOutput-P1jEe",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-P1jEe\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-P1jEeœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-P1jEe",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["str", "object", "Text"],
"baseClasses": [
"str",
"object",
"Text"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-9RykF"
}
@ -1097,22 +1225,29 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-9RykF{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153object\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-9RykF\u0153}-ChatOutput-P1jEe{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-P1jEe\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-OpenAIModel-9RykF{œbaseClassesœ:[œstrœ,œobjectœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-9RykFœ}-ChatOutput-P1jEe{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-P1jEeœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "MemoryComponent-cdA1J",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153MemoryComponent\u0153,\u0153id\u0153:\u0153MemoryComponent-cdA1J\u0153}",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}",
"target": "TextOutput-vrs6T",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153TextOutput-vrs6T\u0153,\u0153inputTypes\u0153:[\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-vrs6Tœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "TextOutput-vrs6T",
"inputTypes": ["Record", "Text"],
"inputTypes": [
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["str", "Text", "object"],
"baseClasses": [
"str",
"Text",
"object"
],
"dataType": "MemoryComponent",
"id": "MemoryComponent-cdA1J"
}
@ -1121,7 +1256,7 @@
"stroke": "#555"
},
"className": "stroke-foreground stroke-connection",
"id": "reactflow__edge-MemoryComponent-cdA1J{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153MemoryComponent\u0153,\u0153id\u0153:\u0153MemoryComponent-cdA1J\u0153}-TextOutput-vrs6T{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153TextOutput-vrs6T\u0153,\u0153inputTypes\u0153:[\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-MemoryComponent-cdA1J{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œMemoryComponentœ,œidœ:œMemoryComponent-cdA1Jœ}-TextOutput-vrs6T{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-vrs6Tœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}"
}
],
"viewport": {
@ -1134,4 +1269,4 @@
"name": "Memory Chatbot",
"last_tested_version": "1.0.0a0",
"is_component": false
}
}

View file

@ -45,7 +45,9 @@
"name": "template",
"display_name": "Template",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -84,14 +86,22 @@
"is_input": null,
"is_output": null,
"is_composition": null,
"base_classes": ["object", "str", "Text"],
"base_classes": [
"object",
"str",
"Text"
],
"name": "",
"display_name": "Prompt",
"documentation": "",
"custom_fields": {
"template": ["document"]
"template": [
"document"
]
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"full_path": null,
"field_formatters": {},
"frozen": false,
@ -155,7 +165,9 @@
"name": "template",
"display_name": "Template",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -194,14 +206,22 @@
"is_input": null,
"is_output": null,
"is_composition": null,
"base_classes": ["object", "str", "Text"],
"base_classes": [
"object",
"str",
"Text"
],
"name": "",
"display_name": "Prompt",
"documentation": "",
"custom_fields": {
"template": ["summary"]
"template": [
"summary"
]
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"full_path": null,
"field_formatters": {},
"frozen": false,
@ -260,7 +280,9 @@
"name": "input_value",
"display_name": "Message",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -284,7 +306,9 @@
"info": "In case of Message being a Record, this template will be used to convert it to text.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"return_record": {
"type": "bool",
@ -316,7 +340,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User"],
"options": [
"Machine",
"User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": true,
@ -324,7 +351,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -344,7 +373,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -363,13 +394,20 @@
"info": "If provided, the message will be stored in the memory.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Display a chat message in the Playground.",
"icon": "ChatOutput",
"base_classes": ["object", "Record", "Text", "str"],
"base_classes": [
"object",
"Record",
"Text",
"str"
],
"display_name": "Chat Output",
"documentation": "",
"custom_fields": {
@ -380,7 +418,10 @@
"return_record": null,
"record_template": null
},
"output_types": ["Text", "Record"],
"output_types": [
"Text",
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -435,7 +476,9 @@
"name": "input_value",
"display_name": "Message",
"advanced": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"dynamic": false,
"info": "",
"load_from_db": false,
@ -459,7 +502,9 @@
"info": "In case of Message being a Record, this template will be used to convert it to text.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"return_record": {
"type": "bool",
@ -491,7 +536,10 @@
"fileTypes": [],
"file_path": "",
"password": false,
"options": ["Machine", "User"],
"options": [
"Machine",
"User"
],
"name": "sender",
"display_name": "Sender Type",
"advanced": true,
@ -499,7 +547,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"sender_name": {
"type": "str",
@ -519,7 +569,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"session_id": {
"type": "str",
@ -538,13 +590,20 @@
"info": "If provided, the message will be stored in the memory.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Display a chat message in the Playground.",
"icon": "ChatOutput",
"base_classes": ["object", "Record", "Text", "str"],
"base_classes": [
"object",
"Record",
"Text",
"str"
],
"display_name": "Chat Output",
"documentation": "",
"custom_fields": {
@ -555,7 +614,10 @@
"return_record": null,
"record_template": null
},
"output_types": ["Text", "Record"],
"output_types": [
"Text",
"Record"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -610,7 +672,10 @@
"name": "input_value",
"display_name": "Value",
"advanced": false,
"input_types": ["Record", "Text"],
"input_types": [
"Record",
"Text"
],
"dynamic": false,
"info": "Text or Record to be passed as input.",
"load_from_db": false,
@ -634,20 +699,28 @@
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Get text inputs from the Playground.",
"icon": "type",
"base_classes": ["str", "Text", "object"],
"base_classes": [
"str",
"Text",
"object"
],
"display_name": "Text Input",
"documentation": "",
"custom_fields": {
"input_value": null,
"record_template": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -689,7 +762,10 @@
"name": "input_value",
"display_name": "Value",
"advanced": false,
"input_types": ["Record", "Text"],
"input_types": [
"Record",
"Text"
],
"dynamic": false,
"info": "Text or Record to be passed as output.",
"load_from_db": false,
@ -731,20 +807,28 @@
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Display a text output in the Playground.",
"icon": "type",
"base_classes": ["str", "Text", "object"],
"base_classes": [
"str",
"Text",
"object"
],
"display_name": "First Prompt",
"documentation": "",
"custom_fields": {
"input_value": null,
"record_template": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -789,7 +873,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"code": {
"type": "code",
@ -798,7 +884,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -872,7 +958,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_base": {
"type": "str",
@ -891,7 +979,9 @@
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_key": {
"type": "str",
@ -910,7 +1000,9 @@
"info": "The OpenAI API Key to use for the OpenAI model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"value": "OPENAI_API_KEY"
},
"stream": {
@ -949,11 +1041,13 @@
"info": "System message to pass to the model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"temperature": {
"type": "float",
"required": true,
"required": false,
"placeholder": "",
"list": false,
"show": true,
@ -980,7 +1074,11 @@
},
"description": "Generates text using OpenAI LLMs.",
"icon": "OpenAI",
"base_classes": ["str", "Text", "object"],
"base_classes": [
"str",
"Text",
"object"
],
"display_name": "OpenAI",
"documentation": "",
"custom_fields": {
@ -994,7 +1092,9 @@
"stream": null,
"system_message": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [
@ -1046,7 +1146,10 @@
"name": "input_value",
"display_name": "Value",
"advanced": false,
"input_types": ["Record", "Text"],
"input_types": [
"Record",
"Text"
],
"dynamic": false,
"info": "Text or Record to be passed as output.",
"load_from_db": false,
@ -1088,20 +1191,28 @@
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"_type": "CustomComponent"
},
"description": "Display a text output in the Playground.",
"icon": "type",
"base_classes": ["str", "Text", "object"],
"base_classes": [
"str",
"Text",
"object"
],
"display_name": "Second Prompt",
"documentation": "",
"custom_fields": {
"input_value": null,
"record_template": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [],
@ -1146,7 +1257,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"code": {
"type": "code",
@ -1155,7 +1268,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\"},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-4o\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -1229,7 +1342,9 @@
"info": "",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_base": {
"type": "str",
@ -1248,7 +1363,9 @@
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"openai_api_key": {
"type": "str",
@ -1267,7 +1384,9 @@
"info": "The OpenAI API Key to use for the OpenAI model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"],
"input_types": [
"Text"
],
"value": ""
},
"stream": {
@ -1306,11 +1425,13 @@
"info": "System message to pass to the model.",
"load_from_db": false,
"title_case": false,
"input_types": ["Text"]
"input_types": [
"Text"
]
},
"temperature": {
"type": "float",
"required": true,
"required": false,
"placeholder": "",
"list": false,
"show": true,
@ -1337,7 +1458,11 @@
},
"description": "Generates text using OpenAI LLMs.",
"icon": "OpenAI",
"base_classes": ["str", "Text", "object"],
"base_classes": [
"str",
"Text",
"object"
],
"display_name": "OpenAI",
"documentation": "",
"custom_fields": {
@ -1351,7 +1476,9 @@
"stream": null,
"system_message": null
},
"output_types": ["Text"],
"output_types": [
"Text"
],
"field_formatters": {},
"frozen": false,
"field_order": [
@ -1382,18 +1509,27 @@
"edges": [
{
"source": "TextInput-sptaH",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153TextInput\u0153,\u0153id\u0153:\u0153TextInput-sptaH\u0153}",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-sptaHœ}",
"target": "Prompt-amqBu",
"targetHandle": "{\u0153fieldName\u0153:\u0153document\u0153,\u0153id\u0153:\u0153Prompt-amqBu\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œdocumentœ,œidœ:œPrompt-amqBuœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "document",
"id": "Prompt-amqBu",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"],
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["str", "Text", "object"],
"baseClasses": [
"str",
"Text",
"object"
],
"dataType": "TextInput",
"id": "TextInput-sptaH"
}
@ -1402,22 +1538,29 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-TextInput-sptaH{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153TextInput\u0153,\u0153id\u0153:\u0153TextInput-sptaH\u0153}-Prompt-amqBu{\u0153fieldName\u0153:\u0153document\u0153,\u0153id\u0153:\u0153Prompt-amqBu\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-TextInput-sptaH{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-sptaHœ}-Prompt-amqBu{œfieldNameœ:œdocumentœ,œidœ:œPrompt-amqBuœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-amqBu",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-amqBu\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}",
"target": "TextOutput-2MS4a",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153TextOutput-2MS4a\u0153,\u0153inputTypes\u0153:[\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-2MS4aœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "TextOutput-2MS4a",
"inputTypes": ["Record", "Text"],
"inputTypes": [
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "str", "Text"],
"baseClasses": [
"object",
"str",
"Text"
],
"dataType": "Prompt",
"id": "Prompt-amqBu"
}
@ -1426,22 +1569,28 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-amqBu{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-amqBu\u0153}-TextOutput-2MS4a{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153TextOutput-2MS4a\u0153,\u0153inputTypes\u0153:[\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-Prompt-amqBu{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}-TextOutput-2MS4a{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-2MS4aœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-amqBu",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-amqBu\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}",
"target": "OpenAIModel-uYXZJ",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-uYXZJ\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-uYXZJœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-uYXZJ",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "str", "Text"],
"baseClasses": [
"object",
"str",
"Text"
],
"dataType": "Prompt",
"id": "Prompt-amqBu"
}
@ -1450,22 +1599,31 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-amqBu{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-amqBu\u0153}-OpenAIModel-uYXZJ{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-uYXZJ\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-Prompt-amqBu{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}-OpenAIModel-uYXZJ{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-uYXZJœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "OpenAIModel-uYXZJ",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-uYXZJ\u0153}",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}",
"target": "Prompt-gTNiz",
"targetHandle": "{\u0153fieldName\u0153:\u0153summary\u0153,\u0153id\u0153:\u0153Prompt-gTNiz\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œsummaryœ,œidœ:œPrompt-gTNizœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "summary",
"id": "Prompt-gTNiz",
"inputTypes": ["Document", "BaseOutputParser", "Record", "Text"],
"inputTypes": [
"Document",
"BaseOutputParser",
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["str", "Text", "object"],
"baseClasses": [
"str",
"Text",
"object"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-uYXZJ"
}
@ -1474,22 +1632,28 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-uYXZJ{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-uYXZJ\u0153}-Prompt-gTNiz{\u0153fieldName\u0153:\u0153summary\u0153,\u0153id\u0153:\u0153Prompt-gTNiz\u0153,\u0153inputTypes\u0153:[\u0153Document\u0153,\u0153BaseOutputParser\u0153,\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-Prompt-gTNiz{œfieldNameœ:œsummaryœ,œidœ:œPrompt-gTNizœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "OpenAIModel-uYXZJ",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-uYXZJ\u0153}",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}",
"target": "ChatOutput-EJkG3",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-EJkG3\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-EJkG3œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-EJkG3",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["str", "Text", "object"],
"baseClasses": [
"str",
"Text",
"object"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-uYXZJ"
}
@ -1498,22 +1662,29 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-uYXZJ{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-uYXZJ\u0153}-ChatOutput-EJkG3{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-EJkG3\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-ChatOutput-EJkG3{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-EJkG3œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-gTNiz",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-gTNiz\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}",
"target": "TextOutput-MUDOR",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153TextOutput-MUDOR\u0153,\u0153inputTypes\u0153:[\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-MUDORœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "TextOutput-MUDOR",
"inputTypes": ["Record", "Text"],
"inputTypes": [
"Record",
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "str", "Text"],
"baseClasses": [
"object",
"str",
"Text"
],
"dataType": "Prompt",
"id": "Prompt-gTNiz"
}
@ -1522,22 +1693,28 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-gTNiz{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-gTNiz\u0153}-TextOutput-MUDOR{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153TextOutput-MUDOR\u0153,\u0153inputTypes\u0153:[\u0153Record\u0153,\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-Prompt-gTNiz{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}-TextOutput-MUDOR{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-MUDORœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-gTNiz",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-gTNiz\u0153}",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}",
"target": "OpenAIModel-XawYB",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-XawYB\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-XawYBœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-XawYB",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["object", "str", "Text"],
"baseClasses": [
"object",
"str",
"Text"
],
"dataType": "Prompt",
"id": "Prompt-gTNiz"
}
@ -1546,22 +1723,28 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-gTNiz{\u0153baseClasses\u0153:[\u0153object\u0153,\u0153str\u0153,\u0153Text\u0153],\u0153dataType\u0153:\u0153Prompt\u0153,\u0153id\u0153:\u0153Prompt-gTNiz\u0153}-OpenAIModel-XawYB{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153OpenAIModel-XawYB\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-Prompt-gTNiz{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}-OpenAIModel-XawYB{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-XawYBœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "OpenAIModel-XawYB",
"sourceHandle": "{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-XawYB\u0153}",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-XawYBœ}",
"target": "ChatOutput-DNmvg",
"targetHandle": "{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-DNmvg\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-DNmvgœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-DNmvg",
"inputTypes": ["Text"],
"inputTypes": [
"Text"
],
"type": "str"
},
"sourceHandle": {
"baseClasses": ["str", "Text", "object"],
"baseClasses": [
"str",
"Text",
"object"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-XawYB"
}
@ -1570,7 +1753,7 @@
"stroke": "#555"
},
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-XawYB{\u0153baseClasses\u0153:[\u0153str\u0153,\u0153Text\u0153,\u0153object\u0153],\u0153dataType\u0153:\u0153OpenAIModel\u0153,\u0153id\u0153:\u0153OpenAIModel-XawYB\u0153}-ChatOutput-DNmvg{\u0153fieldName\u0153:\u0153input_value\u0153,\u0153id\u0153:\u0153ChatOutput-DNmvg\u0153,\u0153inputTypes\u0153:[\u0153Text\u0153],\u0153type\u0153:\u0153str\u0153}"
"id": "reactflow__edge-OpenAIModel-XawYB{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-XawYBœ}-ChatOutput-DNmvg{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-DNmvgœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
}
],
"viewport": {
@ -1583,4 +1766,4 @@
"name": "Prompt Chaining",
"last_tested_version": "1.0.0a0",
"is_component": false
}
}

File diff suppressed because one or more lines are too long

View file

@ -14,7 +14,11 @@ from rich import print as rprint
from starlette.middleware.base import BaseHTTPMiddleware
from langflow.api import router
from langflow.initial_setup.setup import create_or_update_starter_projects
from langflow.initial_setup.setup import (
create_or_update_starter_projects,
initialize_super_user_if_needed,
load_flows_from_directory,
)
from langflow.interface.utils import setup_llm_caching
from langflow.services.plugins.langfuse_plugin import LangfuseInstance
from langflow.services.utils import initialize_services, teardown_services
@ -33,22 +37,22 @@ class JavaScriptMIMETypeMiddleware(BaseHTTPMiddleware):
return response
def get_lifespan(fix_migration=False, socketio_server=None):
from langflow.version import __version__ # type: ignore
def get_lifespan(fix_migration=False, socketio_server=None, version=None):
@asynccontextmanager
async def lifespan(app: FastAPI):
nest_asyncio.apply()
# Startup message
if __version__:
rprint(f"[bold green]Starting Langflow v{__version__}...[/bold green]")
if version:
rprint(f"[bold green]Starting Langflow v{version}...[/bold green]")
else:
rprint("[bold green]Starting Langflow...[/bold green]")
try:
initialize_services(fix_migration=fix_migration, socketio_server=socketio_server)
setup_llm_caching()
LangfuseInstance.update()
initialize_super_user_if_needed()
create_or_update_starter_projects()
load_flows_from_directory()
yield
except Exception as exc:
if "langflow migration --fix" not in str(exc):
@ -63,11 +67,17 @@ def get_lifespan(fix_migration=False, socketio_server=None):
def create_app():
"""Create the FastAPI app and include the router."""
try:
from langflow.version import __version__ # type: ignore
except ImportError:
from importlib.metadata import version
__version__ = version("langflow-base")
configure()
socketio_server = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*", logger=True)
lifespan = get_lifespan(socketio_server=socketio_server)
app = FastAPI(lifespan=lifespan)
lifespan = get_lifespan(socketio_server=socketio_server, version=__version__)
app = FastAPI(lifespan=lifespan, title="Langflow", version=__version__)
origins = ["*"]
app.add_middleware(

View file

@ -76,11 +76,6 @@ async def get_current_user(
if token:
return await get_current_user_by_jwt(token, db)
else:
if not query_param and not header_param:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="An API key as query or header, or a JWT token must be passed",
)
user = await api_key_security(query_param, header_param, db)
if user:
return user
@ -216,15 +211,14 @@ def create_super_user(
def create_user_longterm_token(db: Session = Depends(get_session)) -> tuple[UUID, dict]:
settings_service = get_settings_service()
username = settings_service.auth_settings.SUPERUSER
password = settings_service.auth_settings.SUPERUSER_PASSWORD
if not username or not password:
super_user = get_user_by_username(db, username)
if not super_user:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Missing first superuser credentials",
detail="Super user hasn't been created"
)
super_user = create_super_user(db=db, username=username, password=password)
access_token_expires_longterm = timedelta(days=365)
access_token = create_token(
data={"sub": str(super_user.id)},

View file

@ -9,6 +9,9 @@ from loguru import logger
from langflow.services.base import Service
from langflow.services.cache.base import AsyncBaseCacheService, CacheService
from langflow.services.cache.utils import CacheMiss
CACHE_MISS = CacheMiss()
class ThreadingInMemoryCache(CacheService, Service):
@ -341,12 +344,14 @@ class AsyncInMemoryCache(AsyncBaseCacheService, Service):
async def _get(self, key):
item = self.cache.get(key, None)
if item and (time.time() - item["time"] < self.expiration_time):
self.cache.move_to_end(key)
return pickle.loads(item["value"]) if isinstance(item["value"], bytes) else item["value"]
if item:
await self.delete(key)
return None
if time.time() - item["time"] < self.expiration_time:
self.cache.move_to_end(key)
return pickle.loads(item["value"]) if isinstance(item["value"], bytes) else item["value"]
else:
logger.info(f"Cache item for key '{key}' has expired and will be deleted.")
await self.delete(key) # Log before deleting the expired item
return CACHE_MISS
async def set(self, key, value, lock: Optional[asyncio.Lock] = None):
if not lock:

View file

@ -19,6 +19,11 @@ CACHE_DIR = user_cache_dir("langflow", "langflow")
PREFIX = "langflow_cache"
class CacheMiss:
def __repr__(self):
return "<CACHE_MISS>"
def create_cache_folder(func):
def wrapper(*args, **kwargs):
# Get the destination folder

View file

@ -13,7 +13,7 @@ class ChatService(Service):
self._cache_locks = defaultdict(asyncio.Lock)
self.cache_service = get_cache_service()
async def set_cache(self, flow_id: str, data: Any, lock: Optional[asyncio.Lock] = None) -> bool:
async def set_cache(self, key: str, data: Any, lock: Optional[asyncio.Lock] = None) -> bool:
"""
Set the cache for a client.
"""
@ -23,17 +23,17 @@ class ChatService(Service):
"result": data,
"type": type(data),
}
await self.cache_service.upsert(flow_id, result_dict, lock=lock or self._cache_locks[flow_id])
return flow_id in self.cache_service
await self.cache_service.upsert(key, result_dict, lock=lock or self._cache_locks[key])
return key in self.cache_service
async def get_cache(self, flow_id: str, lock: Optional[asyncio.Lock] = None) -> Any:
async def get_cache(self, key: str, lock: Optional[asyncio.Lock] = None) -> Any:
"""
Get the cache for a client.
"""
return await self.cache_service.get(flow_id, lock=lock or self._cache_locks[flow_id])
return await self.cache_service.get(key, lock=lock or self._cache_locks[key])
async def clear_cache(self, flow_id: str, lock: Optional[asyncio.Lock] = None):
async def clear_cache(self, key: str, lock: Optional[asyncio.Lock] = None):
"""
Clear the cache for a client.
"""
await self.cache_service.delete(flow_id, lock=lock or self._cache_locks[flow_id])
await self.cache_service.delete(key, lock=lock or self._cache_locks[key])

View file

@ -15,4 +15,4 @@ class DatabaseServiceFactory(ServiceFactory):
# Here you would have logic to create and configure a DatabaseService
if not settings_service.settings.database_url:
raise ValueError("No database URL provided")
return DatabaseService(settings_service.settings.database_url)
return DatabaseService(settings_service)

View file

@ -1,5 +1,6 @@
# Path: src/backend/langflow/services/database/models/flow/model.py
import re
import warnings
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Dict, Optional
@ -7,7 +8,9 @@ from uuid import UUID, uuid4
import emoji
from emoji import purely_emoji # type: ignore
from fastapi import HTTPException, status
from pydantic import field_serializer, field_validator
from sqlalchemy import UniqueConstraint
from sqlmodel import JSON, Column, Field, Relationship, SQLModel
from langflow.schema.schema import Record
@ -25,7 +28,26 @@ class FlowBase(SQLModel):
data: Optional[Dict] = Field(default=None, nullable=True)
is_component: Optional[bool] = Field(default=False, nullable=True)
updated_at: Optional[datetime] = Field(default_factory=lambda: datetime.now(timezone.utc), nullable=True)
webhook: Optional[bool] = Field(default=False, nullable=True, description="Can be used on the webhook endpoint")
folder_id: Optional[UUID] = Field(default=None, nullable=True)
endpoint_name: Optional[str] = Field(default=None, nullable=True, index=True)
@field_validator("endpoint_name")
@classmethod
def validate_endpoint_name(cls, v):
# Endpoint name must be a string containing only letters, numbers, hyphens, and underscores
if v is not None:
if not isinstance(v, str):
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail="Endpoint name must be a string",
)
if not re.match(r"^[a-zA-Z0-9_-]+$", v):
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail="Endpoint name must contain only letters, numbers, hyphens, and underscores",
)
return v
@field_validator("icon_bg_color")
def validate_icon_bg_color(cls, v):
@ -93,10 +115,15 @@ class FlowBase(SQLModel):
# updated_at can be serialized to JSON
@field_serializer("updated_at")
def serialize_dt(self, dt: datetime, _info):
if dt is None:
return None
return dt.isoformat()
def serialize_datetime(value):
if isinstance(value, datetime):
# I'm getting 2024-05-29T17:57:17.631346
# and I want 2024-05-29T17:57:17-05:00
value = value.replace(microsecond=0)
if value.tzinfo is None:
value = value.replace(tzinfo=timezone.utc)
return value.isoformat()
return value
@field_validator("updated_at", mode="before")
def validate_dt(cls, v):
@ -128,6 +155,11 @@ class Flow(FlowBase, table=True):
record = Record(data=data)
return record
__table_args__ = (
UniqueConstraint("user_id", "name", name="unique_flow_name"),
UniqueConstraint("user_id", "endpoint_name", name="unique_flow_endpoint_name"),
)
class FlowCreate(FlowBase):
user_id: Optional[UUID] = None
@ -145,3 +177,21 @@ class FlowUpdate(SQLModel):
description: Optional[str] = None
data: Optional[Dict] = None
folder_id: Optional[UUID] = None
endpoint_name: Optional[str] = None
@field_validator("endpoint_name")
@classmethod
def validate_endpoint_name(cls, v):
# Endpoint name must be a string containing only letters, numbers, hyphens, and underscores
if v is not None:
if not isinstance(v, str):
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail="Endpoint name must be a string",
)
if not re.match(r"^[a-zA-Z0-9_-]+$", v):
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail="Endpoint name must contain only letters, numbers, hyphens, and underscores",
)
return v

View file

@ -0,0 +1,33 @@
from typing import Optional
from fastapi import Depends
from sqlmodel import Session
from langflow.services.deps import get_session
from .model import Flow
def get_flow_by_id(session: Session = Depends(get_session), flow_id: Optional[str] = None) -> Flow | None:
"""Get flow by id."""
if flow_id is None:
raise ValueError("Flow id is required.")
return session.get(Flow, flow_id)
def get_webhook_component_in_flow(flow_data: dict):
"""Get webhook component in flow data."""
for node in flow_data.get("nodes", []):
if "Webhook" in node.get("id"):
return node
return None
def get_all_webhook_components_in_flow(flow_data: dict | None):
"""Get all webhook components in flow data."""
if not flow_data:
return []
return [node for node in flow_data.get("nodes", []) if "Webhook" in node.get("id")]

View file

@ -1,6 +1,7 @@
from typing import TYPE_CHECKING, List, Optional
from uuid import UUID, uuid4
from sqlalchemy import UniqueConstraint
from sqlmodel import Field, Relationship, SQLModel
from langflow.services.database.models.flow.model import FlowRead
@ -30,6 +31,8 @@ class Folder(FolderBase, table=True):
back_populates="folder", sa_relationship_kwargs={"cascade": "all, delete, delete-orphan"}
)
__table_args__ = (UniqueConstraint("user_id", "name", name="unique_folder_name"),)
class FolderCreate(FolderBase):
components_list: Optional[List[UUID]] = None

View file

@ -21,12 +21,17 @@ from langflow.services.utils import teardown_superuser
if TYPE_CHECKING:
from sqlalchemy.engine import Engine
from langflow.services.settings.service import SettingsService
class DatabaseService(Service):
name = "database_service"
def __init__(self, database_url: str):
self.database_url = database_url
def __init__(self, settings_service: "SettingsService"):
self.settings_service = settings_service
if settings_service.settings.database_url is None:
raise ValueError("No database URL provided")
self.database_url: str = settings_service.settings.database_url
# This file is in langflow.services.database.manager.py
# the ini is in langflow
langflow_dir = Path(__file__).parent.parent.parent
@ -41,7 +46,12 @@ class DatabaseService(Service):
connect_args = {"check_same_thread": False}
else:
connect_args = {}
return create_engine(self.database_url, connect_args=connect_args)
return create_engine(
self.database_url,
connect_args=connect_args,
pool_size=self.settings_service.settings.pool_size,
max_overflow=self.settings_service.settings.max_overflow,
)
def __enter__(self):
self._session = Session(self.engine)
@ -267,3 +277,4 @@ class DatabaseService(Service):
logger.error(f"Error tearing down database: {exc}")
self.engine.dispose()
self.engine.dispose()

View file

@ -8,6 +8,7 @@ from langflow.services.deps import get_monitor_service
if TYPE_CHECKING:
from langflow.api.v1.schemas import ResultDataResponse
from langflow.graph.vertex.base import Vertex
INDEX_KEY = "index"
@ -165,3 +166,35 @@ async def log_vertex_build(
monitor_service.add_row(table_name="vertex_builds", data=row)
except Exception as e:
logger.exception(f"Error logging vertex build: {e}")
def build_clean_params(target: "Vertex") -> dict:
"""
Cleans the parameters of the target vertex.
"""
# Removes all keys that the values aren't python types like str, int, bool, etc.
params = {
key: value for key, value in target.params.items() if isinstance(value, (str, int, bool, float, list, dict))
}
# if it is a list we need to check if the contents are python types
for key, value in params.items():
if isinstance(value, list):
params[key] = [item for item in value if isinstance(item, (str, int, bool, float, list, dict))]
return params
def log_transaction(vertex: "Vertex", status, error=None):
try:
monitor_service = get_monitor_service()
clean_params = build_clean_params(vertex)
data = {
"vertex_id": vertex.id,
"inputs": clean_params,
"output": str(vertex.result),
"timestamp": monitor_service.get_timestamp(),
"status": status,
"error": error,
}
monitor_service.add_row(table_name="transactions", data=data)
except Exception as e:
logger.error(f"Error logging transaction: {e}")

View file

@ -47,6 +47,9 @@ class AuthSettings(BaseSettings):
ACCESS_HTTPONLY: bool = False
"""The HttpOnly attribute of the access token cookie."""
COOKIE_DOMAIN: str | None = None
"""The domain attribute of the cookies. If None, the domain is not set."""
pwd_context: CryptContext = CryptContext(schemes=["bcrypt"], deprecated="auto")
class Config:

View file

@ -67,10 +67,16 @@ class Settings(BaseSettings):
dev: bool = False
database_url: Optional[str] = None
"""Database URL for Langflow. If not provided, Langflow will use a SQLite database."""
pool_size: int = 10
"""The number of connections to keep open in the connection pool. If not provided, the default is 10."""
max_overflow: int = 10
"""The number of connections to allow that can be opened beyond the pool size. If not provided, the default is 10."""
cache_type: str = "async"
remove_api_keys: bool = False
components_path: List[str] = []
langchain_cache: str = "InMemoryCache"
load_flows_path: Optional[str] = None
# Redis
redis_host: str = "localhost"
@ -146,7 +152,13 @@ class Settings(BaseSettings):
# if there is a database in that location
if not info.data["config_dir"]:
raise ValueError("config_dir not set, please set it or provide a database_url")
from langflow.version import is_pre_release # type: ignore
try:
from langflow.version import is_pre_release # type: ignore
except ImportError:
from importlib import metadata
version = metadata.version("langflow-base")
is_pre_release = "a" in version or "b" in version or "rc" in version
if info.data["save_db_in_config_dir"]:
database_dir = info.data["config_dir"]

View file

@ -17,6 +17,8 @@ VARIABLES_TO_GET_FROM_ENVIRONMENT = [
"PINECONE_API_KEY",
"SEARCHAPI_API_KEY",
"SERPAPI_API_KEY",
"UPSTASH_VECTOR_REST_URL",
"UPSTASH_VECTOR_REST_TOKEN",
"VECTARA_CUSTOMER_ID",
"VECTARA_CORPUS_ID",
"VECTARA_API_KEY",

View file

@ -1,4 +1,5 @@
import os
from typing import Optional
import yaml
from loguru import logger
@ -7,7 +8,6 @@ from langflow.services.base import Service
from langflow.services.settings.auth import AuthSettings
from langflow.services.settings.base import Settings
class SettingsService(Service):
name = "settings_service"
@ -27,7 +27,6 @@ class SettingsService(Service):
with open(file_path, "r") as f:
settings_dict = yaml.safe_load(f)
settings_dict = {k.upper(): v for k, v in settings_dict.items()}
for key in settings_dict:
if key not in Settings.model_fields.keys():

View file

@ -0,0 +1,65 @@
from sqlalchemy.engine.reflection import Inspector
def table_exists(name, conn):
"""
Check if a table exists.
Parameters:
name (str): The name of the table to check.
conn (sqlalchemy.engine.Engine or sqlalchemy.engine.Connection): The SQLAlchemy engine or connection to use.
Returns:
bool: True if the table exists, False otherwise.
"""
inspector = Inspector.from_engine(conn)
return name in inspector.get_table_names()
def column_exists(table_name, column_name, conn):
"""
Check if a column exists in a table.
Parameters:
table_name (str): The name of the table to check.
column_name (str): The name of the column to check.
conn (sqlalchemy.engine.Engine or sqlalchemy.engine.Connection): The SQLAlchemy engine or connection to use.
Returns:
bool: True if the column exists, False otherwise.
"""
inspector = Inspector.from_engine(conn)
return column_name in [column["name"] for column in inspector.get_columns(table_name)]
def foreign_key_exists(table_name, fk_name, conn):
"""
Check if a foreign key exists in a table.
Parameters:
table_name (str): The name of the table to check.
fk_name (str): The name of the foreign key to check.
conn (sqlalchemy.engine.Engine or sqlalchemy.engine.Connection): The SQLAlchemy engine or connection to use.
Returns:
bool: True if the foreign key exists, False otherwise.
"""
inspector = Inspector.from_engine(conn)
return fk_name in [fk["name"] for fk in inspector.get_foreign_keys(table_name)]
def constraint_exists(table_name, constraint_name, conn):
"""
Check if a constraint exists in a table.
Parameters:
table_name (str): The name of the table to check.
constraint_name (str): The name of the constraint to check.
conn (sqlalchemy.engine.Engine or sqlalchemy.engine.Connection): The SQLAlchemy engine or connection to use.
Returns:
bool: True if the constraint exists, False otherwise.
"""
inspector = Inspector.from_engine(conn)
constraints = inspector.get_unique_constraints(table_name)
return constraint_name in [constraint["name"] for constraint in constraints]

Some files were not shown because too many files have changed in this diff Show more