Merge branch 'dev' into feature/ui_settings

This commit is contained in:
cristhianzl 2024-05-02 16:09:10 -03:00
commit 8303d367a7
187 changed files with 5702 additions and 2378 deletions

View file

@ -27,7 +27,7 @@ jobs:
# Popular action to deploy to GitHub Pages:
# Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
uses: peaceiris/actions-gh-pages@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
# Build output to publish to the `gh-pages` branch:

View file

@ -74,4 +74,4 @@ jobs:
push: true
file: ./build_and_push_base.Dockerfile
tags: |
logspace/langflow:base-${{ needs.release.outputs.version }}
langflowai/langflow:base-${{ needs.release.outputs.version }}

View file

@ -53,7 +53,7 @@ jobs:
run: |
make publish main=true
- name: Upload Artifact
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: dist
path: dist
@ -63,6 +63,7 @@ jobs:
runs-on: ubuntu-latest
needs: release
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
@ -79,15 +80,15 @@ jobs:
push: true
file: ./build_and_push.Dockerfile
tags: |
logspace/langflow:${{ needs.release.outputs.version }}
logspace/langflow:1.0-alpha
langflowai/langflow:${{ needs.release.outputs.version }}
langflowai/langflow:1.0-alpha
create_release:
name: Create Release
runs-on: ubuntu-latest
needs: [docker_build, release]
steps:
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v4
with:
name: dist
path: dist

View file

@ -52,8 +52,8 @@ jobs:
push: true
file: ./build_and_push.Dockerfile
tags: |
logspace/langflow:${{ steps.check-version.outputs.version }}
logspace/langflow:latest
langflowai/langflow:${{ steps.check-version.outputs.version }}
langflowai/langflow:latest
- name: Create Release
uses: ncipollo/release-action@v1
with:

View file

@ -26,7 +26,7 @@ jobs:
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v3
uses: actions/setup-node@v4
id: setup-node
with:
node-version: ${{ env.NODE_VERSION }}
@ -99,7 +99,7 @@ jobs:
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v3
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}

View file

@ -69,7 +69,7 @@ services:
- traefik.http.routers.${STACK_NAME?Variable not set}-proxy-http.middlewares=${STACK_NAME?Variable not set}-www-redirect,${STACK_NAME?Variable not set}-https-redirect
backend: &backend
image: "logspace/langflow:latest"
image: "langflowai/langflow:latest"
depends_on:
- db
- broker

View file

@ -13,7 +13,7 @@ services:
- "7860:7860"
volumes:
- ./:/app
command: bash -c "uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --loop asyncio",
command: bash -c "uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --loop asyncio"
networks:
- langflow
frontend:

View file

@ -1,3 +1,3 @@
FROM logspace/langflow:latest
FROM langflowai/langflow:latest
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]

View file

@ -35,7 +35,7 @@ The Docker Compose configuration spins up two services: `langflow` and `postgres
### LangFlow Service
The `langflow` service uses the `logspace/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service.
The `langflow` service uses the `langflowai/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service.
Environment variables:
@ -62,4 +62,4 @@ Volumes:
## Switching to a Specific LangFlow Version
If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `logspace/langflow:latest` to `logspace/langflow:1.0-alpha`.
If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `langflowai/langflow:latest` to `langflowai/langflow:1.0-alpha`.

View file

@ -2,7 +2,7 @@ version: "3.8"
services:
langflow:
image: logspace/langflow:latest
image: langflowai/langflow:latest
ports:
- "7860:7860"
depends_on:

View file

@ -1,3 +1,3 @@
FROM logspace/langflow:1.0-alpha
FROM langflowai/langflow:1.0-alpha
CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]

View file

@ -2,7 +2,7 @@ version: "3.8"
services:
langflow:
image: logspace/langflow:1.0-alpha
image: langflowai/langflow:1.0-alpha
ports:
- "7860:7860"
depends_on:

View file

@ -12,7 +12,7 @@ To use a custom component, follow these steps:
<Admonition type="info" label="Tip">
For an in-depth explanation of custom components, their rules, and applications, make sure to read [Custom Component guidelines](../guidelines/custom-component).
For an in-depth explanation of custom components, their rules, and applications, make sure to read [Custom Component guidelines](../administration/custom-component).
</Admonition>
@ -57,7 +57,7 @@ The CustomComponent class serves as the foundation for creating custom component
<Admonition type="info">
Unlike Langchain types, base Python types do not add a
[handle](../guidelines/components) to the field by default. To add handles,
[handle](../administration/components) to the field by default. To add handles,
use the _`input_types`_ key in the _`build_config`_ method.
</Admonition>

View file

@ -47,7 +47,7 @@ If you expose all its fields, it will look like the image below.
style={{ width: "40%", margin: "20px auto" }}
/>
One key capability of the Chat Input component is how it transforms the Interaction Panel into a chat window. This feature is particularly useful for scenarios where user input is required to initiate or influence the flow.
One key capability of the Chat Input component is how it transforms the Playground into a chat window. This feature is particularly useful for scenarios where user input is required to initiate or influence the flow.
<ZoomableImage
alt="Docusaurus themed image"
@ -152,7 +152,7 @@ You can use a template like this: _`"Name: {name}, Age: {age}"`_ to convert the
style={{ width: "50%", margin: "20px auto" }}
/>
The Text Input component gives you the possibility to add an Input field on the Interaction Panel. This is useful because it allows you to define parameters while running and testing your flow.
The Text Input component gives you the possibility to add an Input field on the Playground. This is useful because it allows you to define parameters while running and testing your flow.
<ZoomableImage
alt="Docusaurus themed image"

View file

@ -21,7 +21,7 @@ The `PromptTemplate` component allows users to create prompts and define variabl
<Admonition type="info">
Once a variable is defined in the prompt template, it becomes a component
input of its own. Check out [Prompt
Customization](../guidelines/prompt-customization) to learn more.
Customization](../administration/prompt-customization) to learn more.
</Admonition>
- **template:** Template used to format an individual request.

View file

@ -159,7 +159,7 @@ Now, let's add the [parameters](focus://11[20:55]) and the [return type](focus:/
- _`flow_name`_ is the name of the flow we want to run.
- _`document`_ is the input document to be passed to that flow.
- Since _`Document`_ is a Langchain type, it will add an input [handle](../guidelines/components) to the component ([see more](../components/custom)).
- Since _`Document`_ is a Langchain type, it will add an input [handle](../administration/components) to the component ([see more](../components/custom)).
---
@ -242,7 +242,7 @@ class FlowRunner(CustomComponent):
```
You can load this flow using _`get_flow`_ and set a _`tweaks`_ dictionary to customize it. Find more about tweaks in our [features guidelines](../guidelines/features#code).
You can load this flow using _`get_flow`_ and set a _`tweaks`_ dictionary to customize it. Find more about tweaks in our [features guidelines](../administration/features#code).
---

View file

@ -0,0 +1,27 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import Admonition from "@theme/Admonition";
# 🤗 HuggingFace Spaces
Hugging Face provides a great alternative for running Langflow in their Spaces environment. This means you can run Langflow without any local installation required.
The first step is to go to the [Langflow Space](https://huggingface.co/spaces/Langflow/Langflow?duplicate=true) or [Langflow 1.0 Preview Space](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true)
Remember to use a Chromium-based browser for the best experience. You'll be presented with the following screen:
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/duplicate-space.png",
dark: "img/duplicate-space.png",
}}
style={{ width: "100%", margin: "20px auto" }}
/>
From here, just name your Space, define the visibility (Public or Private), and click on `Duplicate Space` to start the installation process. When that is done, you'll be redirected to the Space's main page to start using Langflow right away!
Once you get Langflow running, click on New Project in the top right corner of the screen. Langflow provides a range of example flows to help you get started.
To quickly try one of them, open a starter example, set up your API keys and click ⚡ Run, on the bottom right corner of the canvas. This will open up Langflow's Interaction Panel with the chat console, text inputs, and outputs.

View file

@ -0,0 +1,77 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import Admonition from "@theme/Admonition";
# 📦 Install Langflow
<Admonition type="info">
Langflow v1.0 is also available in a [HuggingFace Preview Space](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true) if you'd rather try it out before installing locally.
</Admonition>
## Prerequisites
Langflow requires the following programs installed on your system.
* [Python 3.10](https://www.python.org/downloads/release/python-3100/)
* [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/)
## Install Langflow
To install Langflow:
pip:
```bash
python -m pip install langflow -U
```
pipx:
```bash
pipx install langflow --python python3.10 --fetch-missing-python
```
Pipx can fetch the missing Python version for you with `--fetch-missing-python`, but you can also install the Python version manually.
## Install Langflow pre-release
Use `--force-reinstall` to ensure you have the latest version of Langflow and its dependencies.
To install a pre-release version of Langflow:
pip:
```bash
python -m pip install langflow --pre --force-reinstall
```
pipx:
```bash
pipx install langflow --python python3.10 --fetch-missing-python --pip-args="--pre --force-reinstall"
```
## Having a problem?
If you encounter a problem, see [Possible Installation Issues](/migration/possible-installation-issues).
To get help in the Langflow CLI:
```bash
python -m langflow --help
```
## ⛓️ Run Langflow
1. To run Langflow, enter the following command.
```bash
python -m langflow run
```
2. Confirm that a local Langflow instance starts by visiting `http://127.0.0.1:7860` in your browser.
```bash
│ Welcome to ⛓ Langflow │
│ │
│ Access http://127.0.0.1:7860 │
│ Collaborate, and contribute at our GitHub Repo 🚀 │
```
3. Continue on to the [Quickstart](./quickstart.mdx).

View file

@ -0,0 +1,10 @@
# 📚 New to LLMs?
Large Language Models, or LLMs, are part of an exciting new world in computing.
We made Langflow for anyone to create with LLMs, and hope you'll feel comfortable installing Langflow and [getting started](./quickstart.mdx).
If you want to learn more about LLMs, prompt engineering, and AI models, Langflow recommends [promptingguide.ai](https://promptingguide.ai), an open-source repository of prompt engineering content maintained by AI experts.
PromptingGuide offers content for [beginners](https://www.promptingguide.ai/introduction/basics) and [experts](https://www.promptingguide.ai/techniques/cot), as well as the latest [research papers](https://www.promptingguide.ai/papers) and [test results](https://www.promptingguide.ai/research) fueling AI's progress.
Wherever you are on your AI journey, it's helpful to keep Prompting Guide open in a tab.

View file

@ -0,0 +1,119 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# ⚡️ Quickstart
This quickstart demonstrates how to install Langflow, run it locally, build a basic prompt flow, and modify that prompt for different outcomes.
## Prerequisites
* [Python 3.10](https://www.python.org/downloads/release/python-3100/)
* [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/)
* [OpenAI API key](https://platform.openai.com)
## Install Langflow
<Admonition type="info">
Langflow v1.0 is also available in a [HuggingFace Preview Space](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true) if you'd rather try it out before installing locally. This quickstart will run there, too.
</Admonition>
1. To install Langflow, enter the following command in pip or pipx:
pip:
```bash
python -m pip install langflow -U
```
pipx:
```bash
pipx install langflow --python python3.10 --fetch-missing-python
```
Pipx can fetch the missing Python version for you with `--fetch-missing-python`, but you can also install the Python version manually.
2. Start a local Langflow instance with the Langflow CLI:
```bash
langflow run
```
Or start Langflow with Python:
```bash
python -m langflow run
```
Result:
```
│ Welcome to ⛓ Langflow │
│ │
│ Access http://127.0.0.1:7860 │
│ Collaborate, and contribute at our GitHub Repo 🚀 │
```
3. Go to `http://127.0.0.1:7860` and confirm the Langflow UI is available.
<Admonition type="info">
If you encounter a problem, see [Possible Installation Issues](/migration/possible-installation-issues).
</Admonition>
## Create the basic prompting project
Now that you have Langflow installed and running, let us formally welcome you to Langflow!👋
You will use Langflow's prompt tools to issue prompts to the OpenAI LLM.
Prompts serve as the inputs to a large language model (LLM), acting as the interface between human instructions and computational tasks.
By submitting natural language requests in a prompt to an LLM, you can obtain answers, generate text, and solve problems.
1. From the Langflow dashboard, click **New Project**.
2. Select **Basic Prompting**.
3. The **Basic Prompting** flow is created.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/quickstart.png",
dark: "img/quickstart.png",
}}
style={{ width: "80%", margin: "20px auto" }}
/>
This flow allows you to chat with the **OpenAI** component via a **Prompt** component.
Examine the **Prompt** component. The **Template** field instructs the LLM to `Answer the user as if you were a pirate.`
This should be interesting...
4. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
1. In the **Variable Name** field, enter `openai_api_key`.
2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
3. Click **Save Variable**.
## Run the basic prompting flow
1. Click the **Run** button.
The **Interaction Panel** opens, where you can converse with your bot.
2. Type a message and press Enter.
The bot responds in a markedly piratical manner!
## Modify the prompt for a different result
1. To modify your prompt results, in the **Prompt** template, click the **Template** field.
The **Edit Prompt** window opens.
2. Change `Answer the user as if you were a pirate` to a different character, perhaps `Answer the user as if you were Harold Abelson.`
3. Run the basic prompting flow again.
The response will be markedly different.
## Next steps
Well done! You've built your first prompt in Langflow. 🎉
By adding Langflow components to this prompt, you can build all sorts of interesting flows.
* [Memory chatbot](/starter-projects/memory-chatbot.mdx)
* [Blog writer](/starter-projects/blog-writer.mdx)
* [Document QA](/starter-projects/document-qa.mdx)

View file

@ -130,18 +130,18 @@ And run it! This will ingest the Text data from your file into the Astra DB data
style={{ width: "80%", margin: "20px auto" }}
/>
Now, on to the **RAG Flow**. This flow is responsible for generating responses to your queries. It will define all of the steps from getting the User's input to generating a response and displaying it in the Interaction Panel.
Now, on to the **RAG Flow**. This flow is responsible for generating responses to your queries. It will define all of the steps from getting the User's input to generating a response and displaying it in the Playground.
The RAG flow is a bit more complex. It consists of:
- **Chat Input** component that defines where to put the user input coming from the Interaction Panel
- **Chat Input** component that defines where to put the user input coming from the Playground
- **OpenAI Embeddings** component that generates embeddings from the user input
- **Astra DB Search** component that retrieves the most relevant Records from the Astra DB database
- **Text Output** component that turns the Records into Text by concatenating them and also displays it in the Interaction Panel
- One interesting point you'll see here is that this component is named `Extracted Chunks`, and that is how it will appear in the Interaction Panel
- **Text Output** component that turns the Records into Text by concatenating them and also displays it in the Playground
- One interesting point you'll see here is that this component is named `Extracted Chunks`, and that is how it will appear in the Playground
- **Prompt** component that takes in the user input and the retrieved Records as text and builds a prompt for the OpenAI model
- **OpenAI** component that generates a response to the prompt
- **Chat Output** component that displays the response in the Interaction Panel
- **Chat Output** component that displays the response in the Playground
<ZoomableImage
alt="Docusaurus themed image"
@ -163,7 +163,7 @@ To run it all we have to do is click on the ⚡ _Run_ button and start interacti
style={{ width: "80%", margin: "20px auto" }}
/>
This opens the Interaction Panel where you can chat your data.
This opens the Playground where you can chat your data.
Because this flow has a **Chat Input** and a **Text Output** component, the Panel displays a chat input at the bottom and the Extracted Chunks section on the left.

View file

@ -5,7 +5,9 @@ import Admonition from "@theme/Admonition";
# 👋 Welcome to Langflow
Langflow is an easy way to build from simple to complex AI applications. It is a low-code platform that allows you to integrate AI into everything you do.
Langflow is a low-code platform that allows you to integrate AI into everything you do.
Use Langflow's simple but powerful UI to build any AI application you can dream up, from simple to complex.
{" "}
@ -20,96 +22,17 @@ Langflow is an easy way to build from simple to complex AI applications. It is a
## 🚀 First steps
## Installation
- [Install Langflow](/getting-started/install-langflow) - Install and start a local Langflow server.
Make sure you have **Python 3.10** installed on your system.
- [Quickstart](/getting-started/quickstart) - Install Langflow, create a flow, and run it.
You can install **Langflow** with [pipx](https://pipx.pypa.io/stable/installation/) or with pip.
- [HuggingFace Spaces](/getting-started/huggingface-spaces) - Duplicate the Langflow preview space and try it out before you install.
Pipx can fetch the missing Python version for you, but you can also install it manually.
- [New to LLMs?](/getting-started/new-to-llms) - Learn more about LLMs, prompting, and more at [promptingguide.ai](https://promptingguide.ai).
```bash
# Remember to check if you have Python 3.10 installed
python -m pip install langflow -U
# or
pipx install langflow --python python3.10 --fetch-missing-python
```
## Learn more about Langflow 1.0
Or you can install a pre-release version using:
Learn more about the exciting changes in Langflow 1.0, and how to migrate your existing Langflow projects.
```bash
python -m pip install langflow --pre --force-reinstall
# or
pipx install langflow --python python3.10 --fetch-missing-python --pip-args="--pre --force-reinstall"
```
<Admonition type="tip">
<p>
Please, check out our [Possible Installation Issues
section](/migration/possible-installation-issues) if you encounter any
problems.
</p>
</Admonition>
We recommend using --force-reinstall to ensure you have the latest version of Langflow and its dependencies.
### ⛓️ Running Langflow
Langflow can be run in a variety of ways, including using the command-line interface (CLI) or HuggingFace Spaces.
```bash
python -m langflow run # or langflow --help
```
#### 🤗 HuggingFace Spaces
Hugging Face provides a great alternative for running Langflow in their Spaces environment. This means you can run Langflow without any local installation required.
The first step is to go to the [Langflow Space](https://huggingface.co/spaces/Langflow/Langflow?duplicate=true) or [Langflow 1.0 Preview Space](https://huggingface.co/spaces/Langflow/Langflow-Preview?duplicate=true)
Remember to use a Chromium-based browser for the best experience. You'll be presented with the following screen:
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/duplicate-space.png",
dark: "img/duplicate-space.png",
}}
style={{ width: "100%", margin: "20px auto" }}
/>
From here, just name your Space, define the visibility (Public or Private), and click on `Duplicate Space` to start the installation process. When that is done, you'll be redirected to the Space's main page to start using Langflow right away!
Once you get Langflow running, click on New Project in the top right corner of the screen. Langflow provides a range of example flows to help you get started.
To quickly try one of them, open a starter example, set up your API keys and click ⚡ Run, on the bottom right corner of the canvas. This will open up Langflow's Interaction Panel with the chat console, text inputs, and outputs.
### 🖥️ Command Line Interface (CLI)
Langflow provides a command-line interface (CLI) for easy management and configuration.
#### Usage
You can run the Langflow using the following command:
```bash
langflow run [OPTIONS]
```
Find more information about the available options by running:
```bash
python -m langflow --help
```
## Find out more about 1.0
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We are currently working on updating the documentation for Langflow 1.0.
</p>
</Admonition>
To get you learning more about what's new and why you should be excited about Langflow 1.0,
go to [A new chapter for Langflow](/whats-new/a-new-chapter-langflow) and also come back often
to check out our [migration guides](/whats-new/migrating-to-one-point-zero) as we release them.
- [A new chapter for Langflow](/whats-new/a-new-chapter-langflow)
- [Migration guides](/migration/migrating-to-one-point-zero)

View file

@ -1,7 +1,7 @@
# Inputs and Outputs
TL;DR: Inputs and Outputs are a category of components that are used to define where data comes in and out of your flow. They also
dynamically change the Interaction Panel and can be renamed to make it easier to build and maintain your flows.
dynamically change the Playground and can be renamed to make it easier to build and maintain your flows.
## Introduction
@ -10,8 +10,8 @@ Langflow 1.0 introduces new categories of components called Inputs and Outputs.
Let's start with what they have in common:
- Components in these categories connect to components that have Text or Record inputs or outputs. Some can connect to both but you have to pick what type of data you want to output or input.
- They can be renamed to help you identify them more easily in the Interaction Panel and while using the API.
- They dynamically change the Interaction Panel to make it easier to understand and interact with your flows.
- They can be renamed to help you identify them more easily in the Playground and while using the API.
- They dynamically change the Playground to make it easier to understand and interact with your flows.
Native Langflow Components were created to be powerful tools that work around Langflow's features. They are designed to be easy to use and understand, and to help you build your flows faster.
@ -21,7 +21,7 @@ Let's dive into Inputs and Outputs.
Inputs are components that are used to define where data comes into your flow. They can be used to receive data from the user, from a database, or from any other source that can be converted to Text or Record.
The difference between Chat Input and other Input components is the format of the output, the number of configurable fields, and the way they are displayed in the Interaction Panel.
The difference between Chat Input and other Input components is the format of the output, the number of configurable fields, and the way they are displayed in the Playground.
Chat Input components can output Text or Record. When you want to pass the sender name, or sender to the next component, you can use the Record output, and when you want to pass the message only you can use the Text output. This is useful when saving the message to a database or a memory system like Zep.
@ -29,7 +29,7 @@ You can find out more about it and the other Inputs [here](../components/inputs)
## Outputs
Outputs are components that are used to define where data comes out of your flow. They can be used to send data to the user, to the Interaction Panel, or to define how the data will be displayed in the Interaction Panel.
Outputs are components that are used to define where data comes out of your flow. They can be used to send data to the user, to the Playground, or to define how the data will be displayed in the Playground.
The Chat Output works similarly to the Chat Input but does not have a field that allows for written input. It is used as an Output definition and can be used to send data to the user.

View file

@ -55,7 +55,7 @@ Langflow 1.0 continues to support LangChain while also introducing support for m
**Guide coming soon**
## Sidebar Redesign and Customizable Interaction Panel
## Sidebar Redesign and Customizable Playground
We've expanded on the chat experience by creating a customizable interaction panel that allows you to design a panel that fits your needs and interact with it. The sidebar has also been redesigned to provide a more intuitive and user-friendly experience. Explore the new sidebar and interaction panel features to enhance your workflow.

View file

@ -25,3 +25,16 @@ For this error to occur, two scenarios are possible:
In this case, you might not be running the correct executable.
To solve this issue, you can run the correct executable by running _`python -m langflow run`_ instead of _`langflow run`_ and if that doesn't work, you can try uninstalling langflow and reinstalling it using _`python -m pip install langflow --pre -U`_.
2. Some version conflicts might have occurred during the installation process. Run _`python -m pip install langflow --pre -U --force-reinstall`_ to reinstall langflow and its dependencies.
## _`Something went wrong running migrations. Please, run 'langflow migration --fix'`_
TLDR;
- Clear the cache by deleting the contents of the cache folder.
This folder can be found at:
- **Linux or WSL2 on Windows**: `home/<username>/.cache/langflow/`
- **MacOS**: `/Users/<username>/Library/Caches/langflow/`
If you wish to retain your files, ensure to back them up before clearing the folder.
This error often occurs when upgrading Langflow, the new version can't override `langflow-pre.db` in `.cache/langflow/`. Clearing the cache removes this file but will also erase your settings.

View file

@ -0,0 +1,83 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
# Basic prompting
Prompts serve as the inputs to a large language model (LLM), acting as the interface between human instructions and computational tasks.
By submitting natural language requests in a prompt to an LLM, you can obtain answers, generate text, and solve problems.
This article demonstrates how to use Langflow's prompt tools to issue basic prompts to an LLM, and how various prompting strategies can affect your outcomes.
## Prerequisites
1. Install Langflow.
```bash
python -m pip install langflow --pre
```
2. Start a local Langflow instance with the Langflow CLI:
```bash
langflow run
```
Or start Langflow with Python:
```bash
python -m langflow run
```
Result:
```
│ Welcome to ⛓ Langflow │
│ │
│ Access http://127.0.0.1:7860 │
│ Collaborate, and contribute at our GitHub Repo 🚀 │
```
Alternatively, go to [HuggingFace Spaces](https://docs.langflow.org/getting-started/hugging-face-spaces) or [Lightning.ai Studio](https://lightning.ai/ogabrielluiz-8j6t8/studios/langflow) for a pre-built Langflow test environment.
3. Create an [OpenAI API key](https://platform.openai.com).
## Create the basic prompting project
1. From the Langflow dashboard, click **New Project**.
2. Select **Basic Prompting**.
3. The **Basic Prompting** flow is created.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/basic-prompting.png",
dark: "img/basic-prompting.png",
}}
style={{ width: "80%", margin: "20px auto" }}
/>
This flow allows you to chat with the **OpenAI** component via a **Prompt** component.
Examine the **Prompt** component. The **Template** field instructs the LLM to `Answer the user as if you were a pirate.`
This should be interesting...
4. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
1. In the **Variable Name** field, enter `openai_api_key`.
2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
3. Click **Save Variable**.
## Run the basic prompting flow
1. Click the **Run** button.
The **Interaction Panel** opens, where you can converse with your bot.
2. Type a message and press Enter.
The bot responds in a markedly piratical manner!
## Modify the prompt for a different result
1. To modify your prompt results, in the **Prompt** template, click the **Template** field.
The **Edit Prompt** window opens.
2. Change `Answer the user as if you were a pirate` to a different character, perhaps `Answer the user as if you were Harold Abelson.`
3. Run the basic prompting flow again.
The response will be markedly different.

View file

@ -0,0 +1,92 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Blog writer
Build a blog writer with OpenAI that uses URLs for reference content.
## Prerequisites
1. Install Langflow.
```bash
python -m pip install langflow --pre
```
2. Start a local Langflow instance with the Langflow CLI:
```bash
langflow run
```
Or start Langflow with Python:
```bash
python -m langflow run
```
Result:
```bash
│ Welcome to ⛓ Langflow │
│ │
│ Access http://127.0.0.1:7860 │
│ Collaborate, and contribute at our GitHub Repo 🚀 │
```
Alternatively, go to [HuggingFace Spaces](https://docs.langflow.org/getting-started/hugging-face-spaces) or [Lightning.ai Studio](https://lightning.ai/ogabrielluiz-8j6t8/studios/langflow) for a pre-built Langflow test environment.
3. Create an [OpenAI API key](https://platform.openai.com).
## Create the Blog Writer project
1. From the Langflow dashboard, click **New Project**.
2. Select **Blog Writer**.
3. The **Blog Writer** flow is created.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/blog-writer.png",
dark: "img/blog-writer.png",
}}
style={{ width: "80%", margin: "20px auto" }}
/>
This flow creates a one-shot prompt flow with **Prompt**, **OpenAI**, and **Chat Output** components, and augments the flow with reference content and instructions from the **URL** and **Instructions** components.
The **Prompt** component's default **Template** field looks like this:
```bash
Reference 1:
{reference_1}
---
Reference 2:
{reference_2}
---
{instructions}
Blog:
```
The `{instructions}` value is received from the **Value** field of the **Instructions** component.
The `reference_1` and `reference_2` values are received from the **URL** fields of the **URL** components.
4. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
1. In the **Variable Name** field, enter `openai_api_key`.
2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
3. Click **Save Variable**.
## Run the Blog Writer flow
1. Click the **Run** button.
The **Interaction Panel** opens, where you can run your one-shot flow.
2. Click the **Lighting Bolt** icon to run your flow.
3. The **OpenAI** component constructs a blog post with the **URL** items as context.
The default **URL** values are for web pages at `promptingguide.ai`, so your blog post will be about prompting LLMs.
To write about something different, change the values in the **URL** components, and see what the LLM constructs.

View file

@ -0,0 +1,82 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
import Admonition from "@theme/Admonition";
# Document QA
Build a question-and-answer chatbot with a document loaded from local memory.
## Prerequisites
1. Install Langflow.
```bash
python -m pip install langflow --pre
```
2. Start a local Langflow instance with the Langflow CLI:
```bash
langflow run
```
Or start Langflow with Python:
```bash
python -m langflow run
```
Result:
```
│ Welcome to ⛓ Langflow │
│ │
│ Access http://127.0.0.1:7860 │
│ Collaborate, and contribute at our GitHub Repo 🚀 │
```
Alternatively, go to [HuggingFace Spaces](https://docs.langflow.org/getting-started/hugging-face-spaces) or [Lightning.ai Studio](https://lightning.ai/ogabrielluiz-8j6t8/studios/langflow) for a pre-built Langflow test environment.
3. Create an [OpenAI API key](https://platform.openai.com).
## Create the Document QA project
1. From the Langflow dashboard, click **New Project**.
2. Select **Document QA**.
3. The **Document QA** flow is created.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/document-qa.png",
dark: "img/document-qa.png",
}}
style={{ width: "80%", margin: "20px auto" }}
/>
This flow creates a basic chatbot with the **Chat Input**, **Prompt**, **OpenAI**, and **Chat Output** components.
This chatbot is augmented with the **Files** component, which loads a file from your local machine into the **Prompt** component as `{Document}`.
The **Prompt** component is instructed to answer questions based on the contents of `{Document}`.
Including a file with the prompt gives the **OpenAI** component context it may not otherwise have access to.
4. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
1. In the **Variable Name** field, enter `openai_api_key`.
2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
3. Click **Save Variable**.
5. To select a document to load, in the **Files** component, click within the **Path** field.
1. Select a local file, and then click **Open**.
2. The file name appears in the field.
<Admonition type="tip">
The file must be of an extension type listed [here](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/base/data/utils.py#L13).
</Admonition>
## Run the Document QA flow
1. Click the **Run** button.
The **Interaction Panel** opens, where you can converse with your bot.
2. Type a message and press Enter.
For this example, we loaded an error log `.txt` file and asked, "What went wrong?"
The bot responded:
```
The issue occurred during the execution of migrations in the application. Specifically, an error was raised by the Alembic library, indicating that new upgrade operations were detected that had not been accounted for in the existing migration scripts. The operation in question involved modifying the nullable property of a column (apikey, created_at) in the database, with details about the existing type (DATETIME()), existing server default, and other properties.
```
This result indicates that the bot received the loaded document and understood the context surrounding the vague question. It also correctly identified the issue in the error log, and followed up with appropriate troubleshooting suggestions. Nice!

View file

@ -0,0 +1,99 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import ReactPlayer from "react-player";
# Memory chatbot
This flow extends the [basic prompting flow](./basic-prompting.mdx) to include chat memory for unique SessionIDs.
## Prerequisites
1. Install Langflow.
```bash
python -m pip install langflow --pre
```
2. Start a local Langflow instance with the Langflow CLI:
```bash
langflow run
```
Or start Langflow with Python:
```bash
python -m langflow run
```
Result:
```
│ Welcome to ⛓ Langflow │
│ │
│ Access http://127.0.0.1:7860 │
│ Collaborate, and contribute at our GitHub Repo 🚀 │
```
Alternatively, go to [HuggingFace Spaces](https://docs.langflow.org/getting-started/hugging-face-spaces) or [Lightning.ai Studio](https://lightning.ai/ogabrielluiz-8j6t8/studios/langflow) for a pre-built Langflow test environment.
3. Create an [OpenAI API key](https://platform.openai.com).
## Create the memory chatbot project
1. From the Langflow dashboard, click **New Project**.
2. Select **Memory Chatbot**.
3. The **Memory Chatbot** flow is created.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/memory-chatbot.png",
dark: "img/memory-chatbot.png",
}}
style={{
width: "80%",
margin: "20px auto",
display: "flex",
justifyContent: "center",
}}
/>
This flow creates a basic chatbot with the **Chat Input**, **Prompt**, and **OpenAI** components.
This chatbot is augmented with the **Chat Memory** component, which stores messages submitted via **Chat Input** and prepends them to subsequent prompts to OpenAI via `{context}`.
The **Chat History** component gives the **OpenAI** component a memory of previous questions.
4. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
1. In the **Variable Name** field, enter `openai_api_key`.
2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
3. Click **Save Variable**.
## Run the memory chatbot flow
1. Click the **Run** button.
The **Interaction Panel** opens, where you can converse with your bot.
2. Type a message and press Enter.
The bot will respond according to the template in the **Prompt** component.
3. Type more questions. In the **Outputs** log, your queries are logged in order. Up to 5 queries are stored by default. Try asking `What is the first subject I asked you about?` to see where the LLM's memory disappears.
## Modify the Session ID field to have multiple conversations
`SessionID` is a unique identifier in Langchain for a conversation session between a chatbot and a client.
A `SessionID` is created when a conversation is initiated, and then associated with all subsequent messages during that session.
In the **Memory Chatbot** flow you created, the **Chat Memory** component references past interactions with **Chat Input** by **Session ID**.
You can demonstrate this by modifying the **Session ID** value to switch between conversation histories.
1. In the **Session ID** field of the **Chat Memory** and **Chat Input** components, change the **Session ID** value from `MySessionID` to `AnotherSessionID`.
2. Click the **Run** button to run your flow.
In the **Interaction Panel**, you will have a new conversation. (You may need to clear the cache with the **Eraser** button).
3. Type a few questions to your bot.
4. In the **Session ID** field of the **Chat Memory** and **Chat Input** components, change the **Session ID** value back to `MySessionID`.
5. Run your flow.
The **Outputs** log of the **Interaction Panel** displays the history from your initial chat with `MySessionID`.
## Store Session ID as a Langflow variable
To store **Session ID** as a Langflow variable, in the **Session ID** field, click the **Globe** button, and then click **Add New Variable**.
1. In the **Variable Name** field, enter a name like `customer_chat_emea`.
2. In the **Value** field, enter a value like `1B5EBD79-6E9C-4533-B2C8-7E4FF29E983B`.
3. Click **Save Variable**.
4. Apply this variable to **Chat Input**.

View file

@ -121,18 +121,18 @@ And run it! This will ingest the Text data from your file into the Astra DB data
style={{ width: "80%", margin: "20px auto" }}
/>
Now, on to the **RAG Flow**. This flow is responsible for generating responses to your queries. It will define all of the steps from getting the User's input to generating a response and displaying it in the Interaction Panel.
Now, on to the **RAG Flow**. This flow is responsible for generating responses to your queries. It will define all of the steps from getting the User's input to generating a response and displaying it in the Playground.
The RAG flow is a bit more complex. It consists of:
- **Chat Input** component that defines where to put the user input coming from the Interaction Panel
- **Chat Input** component that defines where to put the user input coming from the Playground
- **OpenAI Embeddings** component that generates embeddings from the user input
- **Astra DB Search** component that retrieves the most relevant Records from the Astra DB database
- **Text Output** component that turns the Records into Text by concatenating them and also displays it in the Interaction Panel
- One interesting point you'll see here is that this component is named `Extracted Chunks`, and that is how it will appear in the Interaction Panel
- **Text Output** component that turns the Records into Text by concatenating them and also displays it in the Playground
- One interesting point you'll see here is that this component is named `Extracted Chunks`, and that is how it will appear in the Playground
- **Prompt** component that takes in the user input and the retrieved Records as text and builds a prompt for the OpenAI model
- **OpenAI** component that generates a response to the prompt
- **Chat Output** component that displays the response in the Interaction Panel
- **Chat Output** component that displays the response in the Playground
<ZoomableImage
alt="Docusaurus themed image"
@ -154,7 +154,7 @@ To run it all we have to do is click on the ⚡ _Run_ button and start interacti
style={{ width: "80%", margin: "20px auto" }}
/>
This opens the Interaction Panel where you can chat your data.
This opens the Playground where you can chat your data.
Because this flow has a **Chat Input** and a **Text Output** component, the Panel displays a chat input at the bottom and the Extracted Chunks section on the left.

View file

@ -36,18 +36,18 @@ The caveat is existing projects may need some new Components to get them back to
## Custom Interactions
The moment we decided to make this change, we saw the potential to make Langflow even more yours.
By having a clear definition of Inputs and Outputs, we could build the experience around that which led us to create the **Interaction Panel**.
By having a clear definition of Inputs and Outputs, we could build the experience around that which led us to create the **Playground**.
When building a project testing and debugging is crucial. The Interaction Panel is a tool that changes dynamically based on the Inputs and Outputs you defined in your project.
When building a project testing and debugging is crucial. The Playground is a tool that changes dynamically based on the Inputs and Outputs you defined in your project.
For example, let's say you are building a simple RAG application. Generally, you have an Input, some references that come from a Vector Store Search, a Prompt and the answer.
Now, you could plug the output of your Prompt into a [Text Output](../components/outputs#Text-Output), rename that to "Prompt Result" and see the output of your Prompt in the Interaction Panel.
Now, you could plug the output of your Prompt into a [Text Output](../components/outputs#Text-Output), rename that to "Prompt Result" and see the output of your Prompt in the Playground.
{/* Add image here of the described above */}
This is just one example of how the Interaction Panel can help you build and debug your projects.
This is just one example of how the Playground can help you build and debug your projects.
We have many planned features for the Interaction Panel, and we're excited to see how you use it and what you think of it.
We have many planned features for the Playground, and we're excited to see how you use it and what you think of it.
## An easier start
@ -61,11 +61,11 @@ We wanted to create start projects that would help you learn about new features
For now, we have:
- **[Basic Prompting (Hello, World)](/guides/basic-prompting)**: A simple flow that shows you how to use the Prompt Component and how to talk like a pirate.
- **[Vector Store RAG](/guides/rag-with-astradb)**: A flow that shows you how to ingest data into a Vector Store and then use it to run a RAG application.
- **[Memory Chatbot](/guides/memory-chatbot)**: This one shows you how to create a simple chatbot that can remember things about the user.
- **[Document QA](/guides/document-qa)**: This flow shows you how to build a simple flow that helps you get answers about a document.
- **[Blog Writer](/guides/blog-writer)**: Shows you how you can expand on the Prompt variables and be creative about what inputs you add to it.
- **[Basic Prompting (Hello, World)](/starter-projects/basic-prompting)**: A simple flow that shows you how to use the Prompt Component and how to talk like a pirate.
- **[Vector Store RAG](/tutorials/rag-with-astradb)**: A flow that shows you how to ingest data into a Vector Store and then use it to run a RAG application.
- **[Memory Chatbot](/starter-projects/memory-chatbot)**: This one shows you how to create a simple chatbot that can remember things about the user.
- **[Document QA](/starter-projects/document-qa)**: This flow shows you how to build a simple flow that helps you get answers about a document.
- **[Blog Writer](/starter-projects/blog-writer)**: Shows you how you can expand on the Prompt variables and be creative about what inputs you add to it.
As always, your feedback is invaluable, so please let us know what you think of the new starter projects and what you would like to see in the future.

View file

@ -2,74 +2,49 @@ module.exports = {
docs: [
{
type: "category",
label: " Getting Started",
label: "What's New?",
collapsed: false,
items: [
"whats-new/a-new-chapter-langflow"
],
},
{
type: "category",
label: "Getting Started",
collapsed: false,
items: [
"index",
"getting-started/cli",
// "guides/basic-prompting",
// "guides/document-qa",
// "guides/blog-writer",
// "guides/memory-chatbot",
"guides/rag-with-astradb",
"getting-started/install-langflow",
"getting-started/quickstart",
"getting-started/huggingface-spaces",
"getting-started/new-to-llms"
],
},
{
type: "category",
label: " What's New",
label: "Starter Projects",
collapsed: false,
items: [
"whats-new/a-new-chapter-langflow",
"whats-new/migrating-to-one-point-zero",
"starter-projects/basic-prompting",
"starter-projects/blog-writer",
"starter-projects/document-qa",
"starter-projects/memory-chatbot"
],
},
{
type: "category",
label: " Migration Guides",
label: "Administration",
collapsed: false,
items: [
"migration/possible-installation-issues",
// "migration/flow-of-data",
"migration/inputs-and-outputs",
// "migration/supported-frameworks",
// "migration/sidebar-and-interaction-panel",
// "migration/new-categories-and-components",
"migration/text-and-record",
// "migration/custom-component",
"migration/compatibility",
// "migration/multiple-flows",
// "migration/component-status-and-data-passing",
// "migration/connecting-output-components",
// "migration/renaming-and-editing-components",
// "migration/passing-tweaks-and-inputs",
"migration/global-variables",
// "migration/experimental-components",
// "migration/state-management",
"administration/login",
"administration/api",
"administration/cli",
"administration/components",
"administration/collection",
"administration/prompt-customization",
"administration/langfuse_integration"
],
},
{
type: "category",
label: "Guidelines",
collapsed: false,
items: [
"guidelines/login",
"guidelines/api",
"guidelines/components",
// "guidelines/features",
"guidelines/collection",
"guidelines/prompt-customization",
// "guidelines/chat-interface",
// "guidelines/chat-widget",
// "guidelines/custom-component",
],
},
{
type: "category",
label: "Extended Components",
collapsed: false,
items: ["guides/langfuse_integration"],
},
{
type: "category",
label: "Core Components",
@ -82,45 +57,69 @@ module.exports = {
"components/helpers",
"components/vector-stores",
"components/embeddings",
"components/custom"
],
},
{
type: "category",
label: "Extended Components",
collapsed: false,
collapsed: true,
items: [
"components/agents",
"components/chains",
"components/loaders",
"components/experimental",
"components/utilities",
"components/memories",
"components/model_specs",
"components/retrievers",
"components/text-splitters",
"components/toolkits",
"components/tools",
"components/tools"
],
},
{
type: "category",
label: "Example Components",
collapsed: true,
items: [
"examples/flow-runner",
"examples/conversation-chain",
"examples/buffer-memory",
"examples/csv-loader",
"examples/searchapi-tool",
"examples/serp-api-tool",
"examples/python-function"
],
},
{
type: "category",
label: "Migration Guides",
collapsed: false,
items: [
"migration/possible-installation-issues",
"migration/migrating-to-one-point-zero",
"migration/inputs-and-outputs",
"migration/text-and-record",
"migration/compatibility",
"migration/global-variables"
]
},
{
type: "category",
label: "Tutorials",
collapsed: true,
items: [
"tutorials/chatprompttemplate_guide",
"tutorials/loading_document",
"tutorials/rag-with-astradb"
],
},
// {
// type: "category",
// label: "Examples",
// collapsed: false,
// items: [
// // "examples/flow-runner",
// // "examples/conversation-chain",
// // "examples/buffer-memory",
// // "examples/csv-loader",
// // "examples/searchapi-tool",
// // "examples/serp-api-tool",
// // "examples/python-function",
// ],
// },
{
type: "category",
label: "Deployment",
collapsed: false,
items: ["deployment/gcp-deployment"],
collapsed: true,
items: [
"deployment/gcp-deployment"
],
},
{
type: "category",
@ -129,7 +128,7 @@ module.exports = {
items: [
"contributing/how-contribute",
"contributing/github-issues",
"contributing/community",
"contributing/community"
],
},
],

View file

@ -20,7 +20,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Interaction Panel.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n\n def build_config(self):\n build_config = super().build_config()\n build_config[\"input_value\"] = {\n \"input_types\": [],\n \"display_name\": \"Message\",\n \"multiline\": True,\n }\n\n return build_config\n\n def build(\n self,\n sender: Optional[str] = \"User\",\n sender_name: Optional[str] = \"User\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n ) -> Union[Text, Record]:\n return super().build(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -141,7 +141,7 @@
},
"_type": "CustomComponent"
},
"description": "Get chat inputs from the Interaction Panel.",
"description": "Get chat inputs from the Playground.",
"icon": "ChatInput",
"base_classes": [
"Text",
@ -214,7 +214,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Interaction Panel.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n",
"value": "from typing import Optional\n\nfrom langflow.base.io.text import TextComponent\nfrom langflow.field_typing import Text\n\n\nclass TextOutput(TextComponent):\n display_name = \"Text Output\"\n description = \"Display a text output in the Playground.\"\n icon = \"type\"\n\n def build_config(self):\n return {\n \"input_value\": {\n \"display_name\": \"Value\",\n \"input_types\": [\"Record\", \"Text\"],\n \"info\": \"Text or Record to be passed as output.\",\n },\n \"record_template\": {\n \"display_name\": \"Record Template\",\n \"multiline\": True,\n \"info\": \"Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.\",\n \"advanced\": True,\n },\n }\n\n def build(self, input_value: Optional[Text] = \"\", record_template: str = \"\") -> Text:\n return super().build(input_value=input_value, record_template=record_template)\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -249,7 +249,7 @@
},
"_type": "CustomComponent"
},
"description": "Display a text output in the Interaction Panel.",
"description": "Display a text output in the Playground.",
"icon": "type",
"base_classes": [
"object",
@ -1255,7 +1255,7 @@
"list": false,
"show": true,
"multiline": true,
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Interaction Panel.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template,\n )\n",
"value": "from typing import Optional, Union\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.field_typing import Text\nfrom langflow.schema import Record\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n def build(\n self,\n sender: Optional[str] = \"Machine\",\n sender_name: Optional[str] = \"AI\",\n input_value: Optional[str] = None,\n session_id: Optional[str] = None,\n return_record: Optional[bool] = False,\n record_template: Optional[str] = \"{text}\",\n ) -> Union[Text, Record]:\n return super().build(\n sender=sender,\n sender_name=sender_name,\n input_value=input_value,\n session_id=session_id,\n return_record=return_record,\n record_template=record_template,\n )\n",
"fileTypes": [],
"file_path": "",
"password": false,
@ -1399,7 +1399,7 @@
},
"_type": "CustomComponent"
},
"description": "Display a chat message in the Interaction Panel.",
"description": "Display a chat message in the Playground.",
"icon": "ChatOutput",
"base_classes": [
"object",

BIN
docs/static/img/basic-prompting.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 271 KiB

BIN
docs/static/img/blog-writer.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 524 KiB

BIN
docs/static/img/document-qa.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

BIN
docs/static/img/memory-chatbot.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 271 KiB

BIN
docs/static/img/quickstart.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 486 KiB

2087
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "1.0.0a24"
version = "1.0.0a30"
description = "A Python package with a built-in web application"
authors = ["Langflow <contact@langflow.org>"]
maintainers = [
@ -81,6 +81,7 @@ chromadb = "^0.4.24"
langchain-anthropic = "^0.1.6"
langchain-astradb = "^0.1.0"
langchain-openai = "^0.1.1"
zep-python = { version = "^2.0.0rc5", allow-prereleases = true }
[tool.poetry.group.dev.dependencies]
types-redis = "^4.6.0.5"

View file

@ -1,4 +1,4 @@
FROM logspace/backend_build as backend_build
FROM langflowai/backend_build as backend_build
FROM python:3.10-slim
WORKDIR /app

View file

@ -1,7 +1,7 @@
"""Add default_fields column
Revision ID: 1f4d6df60295
Revises: 58b28437a398
Revises: 6e7b581b5648
Create Date: 2024-04-29 09:49:46.864145
"""
@ -14,7 +14,7 @@ from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = "1f4d6df60295"
down_revision: Union[str, None] = "58b28437a398"
down_revision: Union[str, None] = "6e7b581b5648"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
@ -22,7 +22,6 @@ depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
# ### commands auto generated by Alembic - please adjust! ###
column_names = [column["name"] for column in inspector.get_columns("variable")]
with op.batch_alter_table("variable", schema=None) as batch_op:
@ -35,7 +34,6 @@ def upgrade() -> None:
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
# ### commands auto generated by Alembic - please adjust! ###
column_names = [column["name"] for column in inspector.get_columns("variable")]
with op.batch_alter_table("variable", schema=None) as batch_op:

View file

@ -0,0 +1,59 @@
"""Fix nullable
Revision ID: 6e7b581b5648
Revises: 58b28437a398
Create Date: 2024-04-30 09:17:45.024688
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = "6e7b581b5648"
down_revision: Union[str, None] = "58b28437a398"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
# ### commands auto generated by Alembic - please adjust! ###
columns = inspector.get_columns("apikey")
column_names = {column["name"]: column for column in columns}
with op.batch_alter_table("apikey", schema=None) as batch_op:
created_at_column = [column for column in columns if column["name"] == "created_at"][0]
if "created_at" in column_names and created_at_column.get("nullable"):
batch_op.alter_column(
"created_at",
existing_type=sa.DATETIME(),
nullable=False,
existing_server_default=sa.text("(CURRENT_TIMESTAMP)"), # type: ignore
)
# ### end Alembic commands ###
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
table_names = inspector.get_table_names()
columns = inspector.get_columns("apikey")
column_names = {column["name"]: column for column in columns}
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("apikey", schema=None) as batch_op:
created_at_column = [column for column in columns if column["name"] == "created_at"][0]
if "created_at" in column_names and not created_at_column.get("nullable"):
batch_op.alter_column(
"created_at",
existing_type=sa.DATETIME(),
nullable=True,
existing_server_default=sa.text("(CURRENT_TIMESTAMP)"), # type: ignore
)
# ### end Alembic commands ###

View file

@ -0,0 +1,52 @@
"""Set name and value to not nullable
Revision ID: c153816fd85f
Revises: 1f4d6df60295
Create Date: 2024-04-30 14:31:23.898995
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision: str = "c153816fd85f"
down_revision: Union[str, None] = "1f4d6df60295"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
# ### commands auto generated by Alembic - please adjust! ###
columns = inspector.get_columns("variable")
with op.batch_alter_table("variable", schema=None) as batch_op:
name_column = [column for column in columns if column["name"] == "name"][0]
if name_column and name_column["nullable"]:
batch_op.alter_column("name", existing_type=sa.VARCHAR(), nullable=False)
value_column = [column for column in columns if column["name"] == "value"][0]
if value_column and value_column["nullable"]:
batch_op.alter_column("value", existing_type=sa.VARCHAR(), nullable=False)
# ### end Alembic commands ###
def downgrade() -> None:
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
columns = inspector.get_columns("variable")
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("variable", schema=None) as batch_op:
name_column = [column for column in columns if column["name"] == "name"][0]
if name_column and not name_column["nullable"]:
batch_op.alter_column("name", existing_type=sa.VARCHAR(), nullable=True)
value_column = [column for column in columns if column["name"] == "value"][0]
if value_column and not value_column["nullable"]:
batch_op.alter_column("name", existing_type=sa.VARCHAR(), nullable=True)
# ### end Alembic commands ###

View file

@ -201,7 +201,7 @@ def format_elapsed_time(elapsed_time: float) -> str:
return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}"
async def build_and_cache_graph(
async def build_and_cache_graph_from_db(
flow_id: str,
session: Session,
chat_service: "ChatService",
@ -220,6 +220,17 @@ async def build_and_cache_graph(
return graph
async def build_and_cache_graph_from_data(
flow_id: str,
chat_service: "ChatService",
graph_data: dict,
): # -> Graph | Any:
"""Build and cache the graph."""
graph = Graph.from_payload(graph_data, flow_id)
await chat_service.set_cache(flow_id, graph)
return graph
def format_syntax_error_message(exc: SyntaxError) -> str:
"""Format a SyntaxError message for returning to the frontend."""
if exc.text is None:

View file

@ -8,13 +8,15 @@ from fastapi.responses import StreamingResponse
from loguru import logger
from langflow.api.utils import (
build_and_cache_graph,
build_and_cache_graph_from_data,
build_and_cache_graph_from_db,
format_elapsed_time,
format_exception_message,
get_top_level_vertices,
parse_exception,
)
from langflow.api.v1.schemas import (
FlowDataRequest,
InputValueRequest,
ResultDataResponse,
StreamData,
@ -49,9 +51,10 @@ async def try_running_celery_task(vertex, user_id):
return vertex
@router.get("/build/{flow_id}/vertices", response_model=VerticesOrderResponse)
async def get_vertices(
@router.post("/build/{flow_id}/vertices", response_model=VerticesOrderResponse)
async def retrieve_vertices_order(
flow_id: str,
data: Optional[Annotated[Optional[FlowDataRequest], Body(embed=True)]] = None,
stop_component_id: Optional[str] = None,
start_component_id: Optional[str] = None,
chat_service: "ChatService" = Depends(get_chat_service),
@ -62,6 +65,7 @@ async def get_vertices(
Args:
flow_id (str): The ID of the flow.
data (Optional[FlowDataRequest], optional): The flow data. Defaults to None.
stop_component_id (str, optional): The ID of the stop component. Defaults to None.
start_component_id (str, optional): The ID of the start component. Defaults to None.
chat_service (ChatService, optional): The chat service dependency. Defaults to Depends(get_chat_service).
@ -76,9 +80,16 @@ async def get_vertices(
try:
# First, we need to check if the flow_id is in the cache
graph = None
if cache := await chat_service.get_cache(flow_id):
graph = cache.get("result")
graph = await build_and_cache_graph(flow_id, session, chat_service, graph)
if not data:
if cache := await chat_service.get_cache(flow_id):
graph = cache.get("result")
graph = await build_and_cache_graph_from_db(
flow_id=flow_id, session=session, chat_service=chat_service, graph=graph
)
else:
graph = await build_and_cache_graph_from_data(
flow_id=flow_id, graph_data=data.model_dump(), chat_service=chat_service
)
if stop_component_id or start_component_id:
try:
first_layer = graph.sort_vertices(stop_component_id, start_component_id)
@ -144,7 +155,9 @@ async def build_vertex(
if not cache:
# If there's no cache
logger.warning(f"No cache found for {flow_id}. Building graph starting at {vertex_id}")
graph = await build_and_cache_graph(flow_id=flow_id, session=next(get_session()), chat_service=chat_service)
graph = await build_and_cache_graph_from_db(
flow_id=flow_id, session=next(get_session()), chat_service=chat_service
)
else:
graph = cache.get("result")
result_data_response = ResultDataResponse(results={})

View file

@ -130,7 +130,7 @@ async def simplified_run_flow(
graph_data = flow.data
graph_data = process_tweaks(graph_data, input_request.tweaks or {}, stream=stream)
graph = Graph.from_payload(graph_data, flow_id=flow_id, user_id=api_key_user.id)
graph = Graph.from_payload(graph_data, flow_id=flow_id, user_id=str(api_key_user.id))
inputs = [
InputValueRequest(components=[], input_value=input_request.input_value, type=input_request.input_type)
]

View file

@ -26,7 +26,7 @@ class BuildStatus(Enum):
class TweaksRequest(BaseModel):
tweaks: Optional[Dict[str, Dict[str, str]]] = Field(default_factory=dict)
tweaks: Optional[Dict[str, Dict[str, Any]]] = Field(default_factory=dict)
class UpdateTemplateRequest(BaseModel):
@ -294,3 +294,15 @@ class SimplifiedAPIRequest(BaseModel):
)
tweaks: Optional[Tweaks] = Field(default=None, description="The tweaks")
session_id: Optional[str] = Field(default=None, description="The session id")
# (alias) type ReactFlowJsonObject<NodeData = any, EdgeData = any> = {
# nodes: Node<NodeData>[];
# edges: Edge<EdgeData>[];
# viewport: Viewport;
# }
# import ReactFlowJsonObject
class FlowDataRequest(BaseModel):
nodes: List[dict]
edges: List[dict]
viewport: Optional[dict] = None

View file

@ -37,7 +37,11 @@ def create_variable(
variable_dict["user_id"] = current_user.id
db_variable = Variable.model_validate(variable_dict)
if not db_variable.value:
if not db_variable.name and not db_variable.value:
raise HTTPException(status_code=400, detail="Variable name and value cannot be empty")
elif not db_variable.name:
raise HTTPException(status_code=400, detail="Variable name cannot be empty")
elif not db_variable.value:
raise HTTPException(status_code=400, detail="Variable value cannot be empty")
encrypted = auth_utils.encrypt_api_key(db_variable.value, settings_service=settings_service)
db_variable.value = encrypted

View file

@ -1,10 +1,9 @@
import warnings
from typing import Optional, Union
from langflow.field_typing import Text
from langflow.helpers.record import records_to_text
from langflow.interface.custom.custom_component import CustomComponent
from langflow.memory import add_messages
from langflow.memory import store_message
from langflow.schema import Record
@ -50,34 +49,16 @@ class ChatComponent(CustomComponent):
sender: Optional[str] = None,
sender_name: Optional[str] = None,
) -> list[Record]:
if not message:
warnings.warn("No message provided.")
return []
if not session_id or not sender or not sender_name:
raise ValueError("All of session_id, sender, and sender_name must be provided.")
if isinstance(message, Record):
record = message
record.data.update(
{
"session_id": session_id,
"sender": sender,
"sender_name": sender_name,
}
)
else:
record = Record(
data={
"text": message,
"session_id": session_id,
"sender": sender,
"sender_name": sender_name,
},
)
records = store_message(
message,
session_id=session_id,
sender=sender,
sender_name=sender_name,
)
self.status = record
records = add_messages([record])
return records[0]
self.status = records
return records
def build_with_record(
self,

View file

@ -0,0 +1,51 @@
from typing import Optional
from langflow.field_typing import Text
from langflow.helpers.record import records_to_text
from langflow.interface.custom.custom_component import CustomComponent
from langflow.schema.schema import Record
class BaseMemoryComponent(CustomComponent):
display_name = "Chat Memory"
description = "Retrieves stored chat messages given a specific Session ID."
beta: bool = True
icon = "history"
def build_config(self):
return {
"sender": {
"options": ["Machine", "User", "Machine and User"],
"display_name": "Sender Type",
},
"sender_name": {"display_name": "Sender Name", "advanced": True},
"n_messages": {
"display_name": "Number of Messages",
"info": "Number of messages to retrieve.",
},
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
"input_types": ["Text"],
},
"order": {
"options": ["Ascending", "Descending"],
"display_name": "Order",
"info": "Order of the messages.",
"advanced": True,
},
"record_template": {
"display_name": "Record Template",
"multiline": True,
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"advanced": True,
},
}
def get_messages(self, **kwargs) -> list[Record]:
raise NotImplementedError
def add_message(
self, sender: str, sender_name: str, text: str, session_id: str, metadata: Optional[dict] = None, **kwargs
):
raise NotImplementedError

View file

@ -0,0 +1,44 @@
from typing import List, Optional
from langflow.interface.custom.custom_component import CustomComponent
from langflow.memory import get_messages, store_message
from langflow.schema import Record
class StoreMessageComponent(CustomComponent):
display_name = "Store Message"
description = "Stores a chat message given a Session ID."
beta: bool = True
def build_config(self):
return {
"sender": {
"options": ["Machine", "User"],
"display_name": "Sender Type",
},
"sender_name": {"display_name": "Sender Name"},
"message": {"display_name": "Message"},
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
"input_types": ["Text"],
},
}
def build(
self,
sender: str = "User",
sender_name: Optional[str] = None,
session_id: Optional[str] = None,
message: str = "",
) -> List[Record]:
store_message(
sender=sender,
sender_name=sender_name,
session_id=session_id,
message=message,
)
self.status = get_messages(session_id=session_id)
return get_messages(session_id=session_id)

View file

@ -0,0 +1,55 @@
from langflow.interface.custom.custom_component import CustomComponent
from langflow.schema import Record
from langflow.field_typing import Text
class TextOperatorComponent(CustomComponent):
display_name = "Text Operator"
description = "Compares two text inputs based on a specified condition such as equality or inequality, with optional case sensitivity."
def build_config(self) -> dict:
return {
"input_text": {
"display_name": "Input Text",
"info": "The primary text input for the operation.",
},
"match_text": {
"display_name": "Match Text",
"info": "The text input to compare against.",
},
"operator": {
"display_name": "Operator",
"info": "The operator to apply for comparing the texts.",
"options": ["equals", "not equals", "contains", "starts with", "ends with"],
},
"case_sensitive": {
"display_name": "Case Sensitive",
"info": "If true, the comparison will be case sensitive.",
"field_type": "bool",
"default": False,
}
}
def build(self, input_text: Text, match_text: Text, operator: Text, case_sensitive: bool = False) -> Text:
if not input_text or not match_text:
raise ValueError("Both 'input_text' and 'match_text' must be provided and non-empty.")
if not case_sensitive:
input_text = input_text.lower()
match_text = match_text.lower()
result = False
if operator == "equals":
result = input_text == match_text
elif operator == "not equals":
result = input_text != match_text
elif operator == "contains":
result = match_text in input_text
elif operator == "starts with":
result = input_text.startswith(match_text)
elif operator == "ends with":
result = input_text.endswith(match_text)
if not result:
self.stop()
self.status = f"{result} \n\n {input_text}"
return input_text

View file

@ -0,0 +1,25 @@
from langflow.interface.custom.custom_component import CustomComponent
from langflow.field_typing import Text
class CombineTextsUnsortedComponent(CustomComponent):
display_name = "Combine Texts (Unsorted)"
description = "Concatenate text sources into a single text chunk using a specified delimiter."
icon = "merge"
def build_config(self):
return {
"texts": {
"display_name": "Texts",
"info": "The first text input to concatenate.",
},
"delimiter": {
"display_name": "Delimiter",
"info": "A string used to separate the two text inputs. Defaults to a whitespace.",
},
}
def build(self, texts: list[str], delimiter: str = " ") -> Text:
combined = delimiter.join(texts)
self.status = combined
return combined

View file

@ -1,12 +1,13 @@
from typing import Optional
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.field_typing import Text
from langflow.helpers.record import records_to_text
from langflow.interface.custom.custom_component import CustomComponent
from langflow.memory import get_messages
from langflow.schema.schema import Record
class MemoryComponent(CustomComponent):
class MemoryComponent(BaseMemoryComponent):
display_name = "Chat Memory"
description = "Retrieves stored chat messages given a specific Session ID."
beta: bool = True
@ -42,6 +43,24 @@ class MemoryComponent(CustomComponent):
},
}
def get_messages(self, **kwargs) -> list[Record]:
# Validate kwargs by checking if it contains the correct keys
if "sender" not in kwargs:
kwargs["sender"] = None
if "sender_name" not in kwargs:
kwargs["sender_name"] = None
if "session_id" not in kwargs:
kwargs["session_id"] = None
if "limit" not in kwargs:
kwargs["limit"] = 5
if "order" not in kwargs:
kwargs["order"] = "Descending"
kwargs["order"] = "DESC" if kwargs["order"] == "Descending" else "ASC"
if kwargs["sender"] == "Machine and User":
kwargs["sender"] = None
return get_messages(**kwargs)
def build(
self,
sender: Optional[str] = "Machine and User",
@ -51,10 +70,7 @@ class MemoryComponent(CustomComponent):
order: Optional[str] = "Descending",
record_template: Optional[str] = "{sender_name}: {text}",
) -> Text:
order = "DESC" if order == "Descending" else "ASC"
if sender == "Machine and User":
sender = None
messages = get_messages(
messages = self.get_messages(
sender=sender,
sender_name=sender_name,
session_id=session_id,

View file

@ -0,0 +1,30 @@
from langchain_core.messages import BaseMessage
from langchain_core.prompts import PromptTemplate
from langflow.custom import CustomComponent
from langflow.field_typing import BaseLanguageModel, Text
class ShouldRunNextComponent(CustomComponent):
display_name = "Should Run Next"
description = "Determines if a vertex is runnable."
def build(self, llm: BaseLanguageModel, question: str, context: str, retries: int = 3) -> Text:
template = "Given the following question and the context below, answer with a yes or no.\n\n{error_message}\n\nQuestion: {question}\n\nContext: {context}\n\nAnswer:"
prompt = PromptTemplate.from_template(template)
chain = prompt | llm
error_message = ""
for i in range(retries):
result = chain.invoke(dict(question=question, context=context, error_message=error_message))
if isinstance(result, BaseMessage):
content = result.content
elif isinstance(result, str):
content = result
if isinstance(content, str) and content.lower().strip() in ["yes", "no"]:
break
condition = str(content).lower().strip() == "yes"
self.status = f"Should Run Next: {condition}"
if condition is False:
self.stop()
return context

View file

@ -7,7 +7,7 @@ from langflow.schema import Record
class ChatInput(ChatComponent):
display_name = "Chat Input"
description = "Get chat inputs from the Interaction Panel."
description = "Get chat inputs from the Playground."
icon = "ChatInput"
def build_config(self):

View file

@ -0,0 +1,48 @@
from pathlib import Path
from typing import Any, Dict
from langflow.base.data.utils import TEXT_FILE_TYPES, parse_text_file_to_record
from langflow.interface.custom.custom_component import CustomComponent
from langflow.schema import Record
class FileInput(CustomComponent):
display_name = "File Input"
description = "A generic file input."
icon = "file-text"
def build_config(self) -> Dict[str, Any]:
return {
"path": {
"display_name": "Path",
"field_type": "file",
"file_types": TEXT_FILE_TYPES,
"info": f"Supported file types: {', '.join(TEXT_FILE_TYPES)}",
},
"silent_errors": {
"display_name": "Silent Errors",
"advanced": True,
"info": "If true, errors will not raise an exception.",
},
}
def load_file(self, path: str, silent_errors: bool = False) -> Record:
resolved_path = self.resolve_path(path)
path_obj = Path(resolved_path)
extension = path_obj.suffix[1:].lower()
if extension == "doc":
raise ValueError("doc files are not supported. Please save as .docx")
if extension not in TEXT_FILE_TYPES:
raise ValueError(f"Unsupported file type: {extension}")
record = parse_text_file_to_record(resolved_path, silent_errors)
self.status = record if record else "No data"
return record or Record()
def build(
self,
path: str,
silent_errors: bool = False,
) -> Record:
record = self.load_file(path, silent_errors)
self.status = record
return record

View file

@ -0,0 +1,17 @@
from langflow.base.io.text import TextComponent
from langflow.field_typing.constants import Data, NestedDict
class JsonInput(TextComponent):
display_name = "JSON Input"
description = "JSON Input."
def build_config(self):
return {
"input_value": {
"display_name": "JSON",
"field_type": "NestedDict"
}
}
def build(self, input_value: NestedDict) -> NestedDict:
return input_value

View file

@ -0,0 +1,19 @@
from langflow.base.io.text import TextComponent
from langflow.field_typing.constants import Data
class KeyPairInput(TextComponent):
display_name = "Dictionary Input"
description = "Dictionary Input."
def build_config(self):
return {
"input_value": {
"display_name": "Dictionaries",
"field_type": "dict",
"list": True
}
}
def build(self, input_value: dict) -> dict:
return input_value

View file

@ -0,0 +1,13 @@
# from langflow.field_typing import Data
from langflow.schema import Record
from langflow.interface.custom.custom_component import CustomComponent
class StringListInput(CustomComponent):
display_name = "String List Input"
def build_config(self):
return {"input_value": {"display_name": "String List Input", "field_type": "str", "list": True}}
def build(self, input_value: list) -> Record:
return Record(data=input_value)

View file

@ -6,7 +6,7 @@ from langflow.field_typing import Text
class TextInput(TextComponent):
display_name = "Text Input"
description = "Get text inputs from the Interaction Panel."
description = "Get text inputs from the Playground."
icon = "type"
def build_config(self):

View file

@ -0,0 +1,137 @@
from typing import Optional, cast
from langchain_community.chat_message_histories.zep import SearchScope, SearchType, ZepChatMessageHistory
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.field_typing import Text
from langflow.schema.schema import Record
class ZepMessageReaderComponent(BaseMemoryComponent):
display_name = "Zep Message Reader"
description = "Retrieves stored chat messages from Zep."
def build_config(self):
return {
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
"input_types": ["Text"],
},
"url": {
"display_name": "Zep URL",
"info": "URL of the Zep instance.",
"input_types": ["Text"],
},
"api_key": {
"display_name": "Zep API Key",
"info": "API Key for the Zep instance.",
"password": True,
},
"query": {
"display_name": "Query",
"info": "Query to search for in the chat history.",
},
"metadata": {
"display_name": "Metadata",
"info": "Optional metadata to attach to the message.",
"advanced": True,
},
"search_scope": {
"options": ["Messages", "Summary"],
"display_name": "Search Scope",
"info": "Scope of the search.",
"advanced": True,
},
"search_type": {
"options": ["Similarity", "MMR"],
"display_name": "Search Type",
"info": "Type of search.",
"advanced": True,
},
"limit": {
"display_name": "Limit",
"info": "Limit of search results.",
"advanced": True,
},
}
def get_messages(self, **kwargs) -> list[Record]:
"""
Retrieves messages from the ZepChatMessageHistory memory.
If a query is provided, the search method is used to search for messages in the memory, otherwise all messages are returned.
Args:
memory (ZepChatMessageHistory): The ZepChatMessageHistory instance to retrieve messages from.
query (str, optional): The query string to search for messages. Defaults to None.
metadata (dict, optional): Additional metadata to filter the search results. Defaults to None.
search_scope (str, optional): The scope of the search. Can be 'messages' or 'summary'. Defaults to 'messages'.
search_type (str, optional): The type of search. Can be 'similarity' or 'exact'. Defaults to 'similarity'.
limit (int, optional): The maximum number of search results to return. Defaults to None.
Returns:
list[Record]: A list of Record objects representing the search results.
"""
memory: ZepChatMessageHistory = cast(ZepChatMessageHistory, kwargs.get("memory"))
if not memory:
raise ValueError("ZepChatMessageHistory instance is required.")
query = kwargs.get("query")
search_scope = kwargs.get("search_scope", SearchScope.messages).lower()
search_type = kwargs.get("search_type", SearchType.similarity).lower()
limit = kwargs.get("limit")
if query:
memory_search_results = memory.search(
query,
search_scope=search_scope,
search_type=search_type,
limit=limit,
)
# Get the messages from the search results if the search scope is messages
result_dicts = []
for result in memory_search_results:
result_dict = {}
if search_scope == SearchScope.messages:
result_dict["text"] = result.message
else:
result_dict["text"] = result.summary
result_dict["metadata"] = result.metadata
result_dict["score"] = result.score
result_dicts.append(result_dict)
results = [Record(data=result_dict) for result_dict in result_dicts]
else:
messages = memory.messages
results = [Record.from_lc_message(message) for message in messages]
return results
def build(
self,
session_id: Text,
url: Optional[Text] = None,
api_key: Optional[Text] = None,
query: Optional[Text] = None,
search_scope: SearchScope = SearchScope.messages,
search_type: SearchType = SearchType.similarity,
limit: Optional[int] = None,
) -> list[Record]:
try:
from zep_python import ZepClient
from zep_python.langchain import ZepChatMessageHistory
except ImportError:
raise ImportError(
"Could not import zep-python package. " "Please install it with `pip install zep-python`."
)
if url == "":
url = None
zep_client = ZepClient(api_url=url, api_key=api_key)
memory = ZepChatMessageHistory(session_id=session_id, zep_client=zep_client)
records = self.get_messages(
memory=memory,
query=query,
search_scope=search_scope,
search_type=search_type,
limit=limit,
)
self.status = records
return records

View file

@ -0,0 +1,96 @@
from typing import Optional, TYPE_CHECKING
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.field_typing import Text
from langflow.schema.schema import Record
if TYPE_CHECKING:
from zep_python.langchain import ZepChatMessageHistory
class ZepMessageWriterComponent(BaseMemoryComponent):
display_name = "Zep Message Writer"
description = "Writes a message to Zep."
def build_config(self):
return {
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
"input_types": ["Text"],
},
"url": {
"display_name": "Zep URL",
"info": "URL of the Zep instance.",
"input_types": ["Text"],
},
"api_key": {
"display_name": "Zep API Key",
"info": "API Key for the Zep instance.",
"password": True,
},
"limit": {
"display_name": "Limit",
"info": "Limit of search results.",
"advanced": True,
},
"input_value": {
"display_name": "Input Record",
"info": "Record to write to Zep.",
},
}
def add_message(
self, sender: Text, sender_name: Text, text: Text, session_id: Text, metadata: dict | None = None, **kwargs
):
"""
Adds a message to the ZepChatMessageHistory memory.
Args:
sender (Text): The type of the message sender. Valid values are "Machine" or "User".
sender_name (Text): The name of the message sender.
text (Text): The content of the message.
session_id (Text): The session ID associated with the message.
metadata (dict | None, optional): Additional metadata for the message. Defaults to None.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the ZepChatMessageHistory instance is not provided.
"""
memory: ZepChatMessageHistory | None = kwargs.pop("memory", None)
if memory is None:
raise ValueError("ZepChatMessageHistory instance is required.")
if metadata is None:
metadata = {}
metadata["sender_name"] = sender_name
metadata.update(kwargs)
if sender == "Machine":
memory.add_ai_message(text, metadata=metadata)
elif sender == "User":
memory.add_user_message(text, metadata=metadata)
else:
raise ValueError(f"Invalid sender type: {sender}")
def build(
self,
input_value: Record,
session_id: Text,
url: Optional[Text] = None,
api_key: Optional[Text] = None,
) -> Record:
try:
from zep_python import ZepClient
from zep_python.langchain import ZepChatMessageHistory
except ImportError:
raise ImportError(
"Could not import zep-python package. " "Please install it with `pip install zep-python`."
)
if url == "":
url = None
zep_client = ZepClient(api_url=url, api_key=api_key)
memory = ZepChatMessageHistory(session_id=session_id, zep_client=zep_client)
self.add_message(**input_value.data, memory=memory)
self.status = f"Added message to Zep memory for session {session_id}"
return input_value

View file

@ -1,6 +1,5 @@
from typing import Optional
from langchain.llms.base import BaseLanguageModel
from langchain_openai import AzureChatOpenAI
from pydantic.v1 import SecretStr

View file

@ -4,7 +4,7 @@ from langchain_community.chat_models.litellm import ChatLiteLLM, ChatLiteLLMExce
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import BaseLanguageModel, Text
from langflow.field_typing import Text
class ChatLiteLLMModelComponent(LCModelComponent):

View file

@ -0,0 +1,17 @@
from typing import Optional
from langflow.base.io.text import TextComponent
from langflow.field_typing import Text, Data
class CSVOutput(TextComponent):
display_name = "CSV Output"
description = "Used view csv files"
field_config = {
"input_value": {"display_name": "csv","info":"A csv blob","input_types":["Data"]},
"separator": {"display_name": "separator","info":"The separator used in the csv file","input_types":["Text"], "field_type":"Text","default_value":";","options":[";", ",", "|"]},
}
def build(self, input_value: Data, separator) -> Data:
return {"data": input_value, "separator": separator}

View file

@ -7,7 +7,7 @@ from langflow.schema import Record
class ChatOutput(ChatComponent):
display_name = "Chat Output"
description = "Display a chat message in the Interaction Panel."
description = "Display a chat message in the Playground."
icon = "ChatOutput"
def build(

View file

@ -0,0 +1,15 @@
from typing import Optional
from langflow.base.io.text import TextComponent
from langflow.field_typing import Text
class ImageOutput(TextComponent):
display_name = "Image Output"
description = "Used view image files"
field_config = {
"input_value": {"display_name": "image","info":"A image url","input_types":["Text"]},
}
def build(self, input_value: Text) -> Text:
return input_value

View file

@ -0,0 +1,17 @@
from langflow.base.io.text import TextComponent
from langflow.field_typing.constants import Data, NestedDict
class JsonOutput(TextComponent):
display_name = "JSON Output"
description = "JSON Output."
def build_config(self):
return {
"input_value": {
"display_name": "JSON",
"field_type": "NestedDict"
}
}
def build(self, input_value: NestedDict) -> NestedDict:
return input_value

View file

@ -0,0 +1,19 @@
from langflow.base.io.text import TextComponent
from langflow.field_typing.constants import Data
class KeyPairOutput(TextComponent):
display_name = "Dictionary Output"
description = "Dictionary Output."
def build_config(self):
return {
"input_value": {
"display_name": "Dictionaries",
"field_type": "dict",
"list": True
}
}
def build(self, input_value: dict) -> dict:
return input_value

View file

@ -0,0 +1,16 @@
from typing import Optional
from langflow.base.io.text import TextComponent
from langflow.field_typing import Text
class PDFOutput(TextComponent):
display_name = "PDF Output"
description = "Used view pdf files"
field_config = {
"input_value": {"display_name": "pdf","info":"A pdf url","input_types":["Text"]},
}
def build(self, input_value: Text) -> Text:
return input_value

View file

@ -0,0 +1,13 @@
# from langflow.field_typing import Data
from langflow.schema import Record
from langflow.interface.custom.custom_component import CustomComponent
class StringListOutput(CustomComponent):
display_name = "String List Output"
def build_config(self):
return {"input_value": {"display_name": "String List Output", "field_type": "str", "list": True}}
def build(self, input_value: list) -> Record:
return Record(data=input_value)

View file

@ -6,7 +6,7 @@ from langflow.field_typing import Text
class TextOutput(TextComponent):
display_name = "Text Output"
description = "Display a text output in the Interaction Panel."
description = "Display a text output in the Playground."
icon = "type"
def build_config(self):

View file

@ -61,10 +61,10 @@ class WeaviateSearchVectorStore(WeaviateVectorStoreComponent, LCVectorStoreCompo
input_value: Text,
search_type: str,
url: str,
index_name: str,
number_of_results: int = 4,
search_by_text: bool = False,
api_key: Optional[str] = None,
index_name: Optional[str] = None,
text_key: str = "text",
embedding: Optional[Embeddings] = None,
attributes: Optional[list] = None,

View file

@ -4,6 +4,7 @@ import weaviate # type: ignore
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore, Weaviate
from langchain_core.documents import Document
from langflow.interface.custom.custom_component import CustomComponent
from langflow.schema.schema import Record
@ -50,9 +51,9 @@ class WeaviateVectorStoreComponent(CustomComponent):
def build(
self,
url: str,
index_name: str,
search_by_text: bool = False,
api_key: Optional[str] = None,
index_name: Optional[str] = None,
text_key: str = "text",
embedding: Optional[Embeddings] = None,
inputs: Optional[Record] = None,
@ -78,11 +79,13 @@ class WeaviateVectorStoreComponent(CustomComponent):
return pascal_case_word
index_name = _to_pascal_case(index_name) if index_name else None
documents = []
if not index_name:
raise ValueError("Index name is required")
documents: list[Document] = []
for _input in inputs or []:
if isinstance(_input, Record):
documents.append(_input.to_lc_document())
else:
elif isinstance(_input, Document):
documents.append(_input)
if documents and embedding is not None:

View file

@ -232,3 +232,4 @@ output_parsers:
custom_components:
CustomComponent:
documentation: "https://docs.langflow.org/guidelines/custom-component"
# documentation: "https://docs.langflow.org/administration/custom-component"

View file

@ -3,7 +3,7 @@ import uuid
from collections import defaultdict, deque
from functools import partial
from itertools import chain
from typing import TYPE_CHECKING, Callable, Coroutine, Dict, Generator, List, Optional, Type, Union
from typing import TYPE_CHECKING, Callable, Coroutine, Dict, Generator, List, Optional, Tuple, Type, Union
from loguru import logger
@ -14,7 +14,7 @@ from langflow.graph.graph.state_manager import GraphStateManager
from langflow.graph.graph.utils import process_flow
from langflow.graph.schema import InterfaceComponentTypes, RunOutputs
from langflow.graph.vertex.base import Vertex
from langflow.graph.vertex.types import ChatVertex, FileToolVertex, LLMVertex, RoutingVertex, StateVertex, ToolkitVertex
from langflow.graph.vertex.types import ChatVertex, FileToolVertex, LLMVertex, StateVertex, ToolkitVertex
from langflow.interface.tools.constants import FILE_TOOLS
from langflow.schema import Record
from langflow.schema.schema import INPUT_FIELD_NAME, InputType
@ -75,7 +75,7 @@ class Graph:
self.vertices: List[Vertex] = []
self.run_manager = RunnableVerticesManager()
self._build_graph()
self.build_graph_maps()
self.build_graph_maps(self.edges)
self.define_vertices_lists()
self.state_manager = GraphStateManager()
@ -130,6 +130,18 @@ class Graph:
):
vertices_ids.append(vertex_id)
successors = self.get_all_successors(vertex, flat=True)
# Update run_manager.run_predecessors because we are activating vertices
# The run_prdecessors is the predecessor map of the vertices
# we remove the vertex_id from the predecessor map whenever we run a vertex
# So we need to get all edges of the vertex and successors
# and run self.build_adjacency_maps(edges) to get the new predecessor map
# that is not complete but we can use to update the run_predecessors
edges_set = set()
for vertex in [vertex] + successors:
edges_set.update(vertex.edges)
edges = list(edges_set)
new_predecessor_map, _ = self.build_adjacency_maps(edges)
self.run_manager.run_predecessors.update(new_predecessor_map)
self.vertices_to_run.update(list(map(lambda x: x.id, successors)))
self.activated_vertices = vertices_ids
self.vertices_to_run.update(vertices_ids)
@ -401,14 +413,20 @@ class Graph:
"inactivated_vertices": self.inactivated_vertices,
}
def build_graph_maps(self):
def build_graph_maps(self, edges: Optional[List[ContractEdge]] = None, vertices: Optional[List[Vertex]] = None):
"""
Builds the adjacency maps for the graph.
"""
self.predecessor_map, self.successor_map = self.build_adjacency_maps()
if edges is None:
edges = self.edges
self.in_degree_map = self.build_in_degree()
self.parent_child_map = self.build_parent_child_map()
if vertices is None:
vertices = self.vertices
self.predecessor_map, self.successor_map = self.build_adjacency_maps(edges)
self.in_degree_map = self.build_in_degree(edges)
self.parent_child_map = self.build_parent_child_map(vertices)
def reset_inactivated_vertices(self):
"""
@ -433,9 +451,9 @@ class Graph:
for child_id in self.parent_child_map[vertex_id]:
self.mark_branch(child_id, state)
def build_parent_child_map(self):
def build_parent_child_map(self, vertices: List[Vertex]):
parent_child_map = defaultdict(list)
for vertex in self.vertices:
for vertex in vertices:
parent_child_map[vertex.id] = [child.id for child in self.get_successors(vertex)]
return parent_child_map
@ -559,6 +577,7 @@ class Graph:
self.update_vertex_from_another(self_vertex, other_vertex)
self.build_graph_maps()
self.define_vertices_lists()
self.increment_update_count()
return self
@ -944,8 +963,6 @@ class Graph:
node_name = node_id.split("-")[0]
if node_name in ["ChatOutput", "ChatInput"]:
return ChatVertex
elif node_name in ["ShouldRunNext"]:
return RoutingVertex
elif node_name in ["SharedState", "Notify", "Listen"]:
return StateVertex
elif node_base_type in lazy_load_vertex_dict.VERTEX_TYPE_MAP:
@ -1277,17 +1294,17 @@ class Graph:
def remove_from_predecessors(self, vertex_id: str):
self.run_manager.remove_from_predecessors(vertex_id)
def build_in_degree(self):
in_degree = defaultdict(int)
for edge in self.edges:
def build_in_degree(self, edges: List[ContractEdge]) -> Dict[str, int]:
in_degree: Dict[str, int] = defaultdict(int)
for edge in edges:
in_degree[edge.target_id] += 1
return in_degree
def build_adjacency_maps(self):
def build_adjacency_maps(self, edges: List[ContractEdge]) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]:
"""Returns the adjacency maps for the graph."""
predecessor_map = defaultdict(list)
successor_map = defaultdict(list)
for edge in self.edges:
for edge in edges:
predecessor_map[edge.target_id].append(edge.source_id)
successor_map[edge.source_id].append(edge.target_id)
return predecessor_map, successor_map

View file

@ -15,7 +15,6 @@ from langflow.interface.wrappers.base import wrapper_creator
from langflow.utils.lazy_load import LazyLoadDictBase
CHAT_COMPONENTS = ["ChatInput", "ChatOutput", "TextInput", "SessionID"]
ROUTING_COMPONENTS = ["ShouldRunNext"]
class VertexTypesDict(LazyLoadDictBase):
@ -51,7 +50,6 @@ class VertexTypesDict(LazyLoadDictBase):
**{t: types.CustomComponentVertex for t in custom_component_creator.to_list()},
**{t: types.RetrieverVertex for t in retriever_creator.to_list()},
**{t: types.ChatVertex for t in CHAT_COMPONENTS},
**{t: types.RoutingVertex for t in ROUTING_COMPONENTS},
}
def get_custom_component_vertex_type(self):

Some files were not shown because too many files have changed in this diff Show more