Merge branch 'zustand/io/migration' into better_io
This commit is contained in:
commit
390ccb892a
343 changed files with 8981 additions and 6790 deletions
|
|
@ -56,6 +56,13 @@ LANGFLOW_REMOVE_API_KEYS=
|
|||
# LANGFLOW_REDIS_CACHE_EXPIRE (default: 3600)
|
||||
LANGFLOW_CACHE_TYPE=
|
||||
|
||||
|
||||
# Set AUTO_LOGIN to false if you want to disable auto login
|
||||
# and use the login form to login. LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD
|
||||
# must be set if AUTO_LOGIN is set to false
|
||||
# Values: true, false
|
||||
LANGFLOW_AUTO_LOGIN=
|
||||
|
||||
# Superuser username
|
||||
# Example: LANGFLOW_SUPERUSER=admin
|
||||
LANGFLOW_SUPERUSER=
|
||||
|
|
|
|||
|
|
@ -1,6 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
added_files=$(git diff --name-only --cached --diff-filter=d)
|
||||
|
||||
make format
|
||||
git add ${added_files}
|
||||
8
.github/workflows/lint.yml
vendored
8
.github/workflows/lint.yml
vendored
|
|
@ -3,7 +3,15 @@ name: lint
|
|||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "poetry.lock"
|
||||
- "pyproject.toml"
|
||||
- "src/backend/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- "poetry.lock"
|
||||
- "pyproject.toml"
|
||||
- "src/backend/**"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.0"
|
||||
|
|
|
|||
8
.github/workflows/test.yml
vendored
8
.github/workflows/test.yml
vendored
|
|
@ -3,8 +3,16 @@ name: test
|
|||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "poetry.lock"
|
||||
- "pyproject.toml"
|
||||
- "src/backend/**"
|
||||
pull_request:
|
||||
branches: [dev]
|
||||
paths:
|
||||
- "poetry.lock"
|
||||
- "pyproject.toml"
|
||||
- "src/backend/**"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.5.0"
|
||||
|
|
|
|||
3
.vscode/launch.json
vendored
3
.vscode/launch.json
vendored
|
|
@ -17,6 +17,9 @@
|
|||
],
|
||||
"jinja": true,
|
||||
"justMyCode": true,
|
||||
"env": {
|
||||
"LANGFLOW_LOG_LEVEL": "debug"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/.env"
|
||||
},
|
||||
{
|
||||
|
|
|
|||
4
Makefile
4
Makefile
|
|
@ -3,10 +3,6 @@
|
|||
all: help
|
||||
|
||||
init:
|
||||
@echo 'Installing pre-commit hooks'
|
||||
git config core.hooksPath .githooks
|
||||
@echo 'Making pre-commit hook executable'
|
||||
chmod +x .githooks/pre-commit
|
||||
@echo 'Installing backend dependencies'
|
||||
make install_backend
|
||||
@echo 'Installing frontend dependencies'
|
||||
|
|
|
|||
80
README.md
80
README.md
|
|
@ -1,46 +1,27 @@
|
|||
<!-- Title -->
|
||||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
# ⛓️ Langflow
|
||||
|
||||
~ An effortless way to experiment and prototype [LangChain](https://github.com/hwchase17/langchain) pipelines ~
|
||||
<h3>Discover a simpler & smarter way to build around Foundation Models</h3>
|
||||
|
||||
<p>
|
||||
<img alt="GitHub Contributors" src="https://img.shields.io/github/contributors/logspace-ai/langflow" />
|
||||
<img alt="GitHub Last Commit" src="https://img.shields.io/github/last-commit/logspace-ai/langflow" />
|
||||
<img alt="" src="https://img.shields.io/github/repo-size/logspace-ai/langflow" />
|
||||
<img alt="GitHub Issues" src="https://img.shields.io/github/issues/logspace-ai/langflow" />
|
||||
<img alt="GitHub Pull Requests" src="https://img.shields.io/github/issues-pr/logspace-ai/langflow" />
|
||||
<img alt="Github License" src="https://img.shields.io/github/license/logspace-ai/langflow" />
|
||||
</p>
|
||||
[](https://github.com/logspace-ai/langflow/releases)
|
||||
[](https://github.com/logspace-ai/langflow/contributors)
|
||||
[](https://github.com/logspace-ai/langflow/last-commit)
|
||||
[](https://github.com/logspace-ai/langflow/issues)
|
||||
[](https://github.com/logspace-ai/langflow/repo-size)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/logspace-ai/langflow)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://star-history.com/#logspace-ai/langflow)
|
||||
[](https://github.com/logspace-ai/langflow/fork)
|
||||
[](https://twitter.com/langflow_ai)
|
||||
[](https://discord.com/invite/EqksyE2EX9)
|
||||
[](https://huggingface.co/spaces/Logspace/Langflow)
|
||||
[](https://codespaces.new/logspace-ai/langflow)
|
||||
|
||||
<p>
|
||||
<a href="https://discord.gg/EqksyE2EX9"><img alt="Discord Server" src="https://dcbadge.vercel.app/api/server/EqksyE2EX9?compact=true&style=flat"/></a>
|
||||
<a href="https://huggingface.co/spaces/Logspace/Langflow"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
|
||||
</p>
|
||||
The easiest way to create and customize your flow
|
||||
|
||||
<a href="https://github.com/logspace-ai/langflow">
|
||||
<img width="100%" src="https://github.com/logspace-ai/langflow/blob/dev/img/langflow-demo.gif?raw=true"></a>
|
||||
|
||||
<p>
|
||||
</p>
|
||||
|
||||
# Table of Contents
|
||||
|
||||
- [⛓️ Langflow](#️-langflow)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [📦 Installation](#-installation)
|
||||
- [Locally](#locally)
|
||||
- [HuggingFace Spaces](#huggingface-spaces)
|
||||
- [🖥️ Command Line Interface (CLI)](#️-command-line-interface-cli)
|
||||
- [Usage](#usage)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Deployment](#deployment)
|
||||
- [Deploy Langflow on Google Cloud Platform](#deploy-langflow-on-google-cloud-platform)
|
||||
- [Deploy on Railway](#deploy-on-railway)
|
||||
- [Deploy on Render](#deploy-on-render)
|
||||
- [🎨 Creating Flows](#-creating-flows)
|
||||
- [👋 Contributing](#-contributing)
|
||||
- [📄 License](#-license)
|
||||
<img width="100%" src="https://github.com/logspace-ai/langflow/blob/dev/docs/static/img/new_langflow_demo.gif"></a>
|
||||
|
||||
# 📦 Installation
|
||||
|
||||
|
|
@ -65,7 +46,7 @@ This will install the following dependencies:
|
|||
- [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
|
||||
- [sentence-transformers](https://github.com/UKPLab/sentence-transformers)
|
||||
|
||||
You can still use models from projects like LocalAI
|
||||
You can still use models from projects like LocalAI, Ollama, LM Studio, Jan and others.
|
||||
|
||||
Next, run:
|
||||
|
||||
|
|
@ -117,7 +98,7 @@ Each option is detailed below:
|
|||
- `--backend-only`: This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable.
|
||||
- `--store`: This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable.
|
||||
|
||||
These parameters are important for users who need to customize the behavior of Langflow, especially in development or specialized deployment scenarios. You may want to update the documentation to include these parameters for completeness and clarity.
|
||||
These parameters are important for users who need to customize the behavior of Langflow, especially in development or specialized deployment scenarios.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
|
|
@ -147,19 +128,19 @@ Alternatively, click the **"Open in Cloud Shell"** button below to launch Google
|
|||
|
||||
# 🎨 Creating Flows
|
||||
|
||||
Creating flows with Langflow is easy. Simply drag sidebar components onto the canvas and connect them together to create your pipeline. Langflow provides a range of [LangChain components](https://python.langchain.com/docs/integrations/components) to choose from, including LLMs, prompt serializers, agents, and chains.
|
||||
Creating flows with Langflow is easy. Simply drag components from the sidebar onto the canvas and connect them to start building your application.
|
||||
|
||||
Explore by editing prompt parameters, link chains and agents, track an agent's thought process, and export your flow.
|
||||
Explore by editing prompt parameters, grouping components into a single high-level component, and building your own Custom Components.
|
||||
|
||||
Once you're done, you can export your flow as a JSON file to use with LangChain.
|
||||
To do so, click the "Export" button in the top right corner of the canvas, then
|
||||
in Python, you can load the flow with:
|
||||
Once you’re done, you can export your flow as a JSON file.
|
||||
|
||||
Load the flow with:
|
||||
|
||||
```python
|
||||
from langflow import load_flow_from_json
|
||||
|
||||
flow = load_flow_from_json("path/to/flow.json")
|
||||
# Now you can use it like any chain
|
||||
# Now you can use it
|
||||
flow("Hey, have you heard of Langflow?")
|
||||
```
|
||||
|
||||
|
|
@ -167,15 +148,16 @@ flow("Hey, have you heard of Langflow?")
|
|||
|
||||
We welcome contributions from developers of all levels to our open-source project on GitHub. If you'd like to contribute, please check our [contributing guidelines](./CONTRIBUTING.md) and help make Langflow more accessible.
|
||||
|
||||
Join our [Discord](https://discord.com/invite/EqksyE2EX9) server to ask questions, make suggestions, and showcase your projects! 🦾
|
||||
|
||||
---
|
||||
|
||||
Join our [Discord](https://discord.com/invite/EqksyE2EX9) server to ask questions, make suggestions and showcase your projects! 🦾
|
||||
|
||||
<p>
|
||||
</p>
|
||||
|
||||
[](https://star-history.com/#logspace-ai/langflow&Date)
|
||||
|
||||
# 🌟 Contributors
|
||||
|
||||
[](https://github.com/logspace-ai/langflow/graphs/contributors)
|
||||
|
||||
# 📄 License
|
||||
|
||||
Langflow is released under the MIT License. See the LICENSE file for details.
|
||||
|
|
|
|||
|
|
@ -81,7 +81,18 @@ The CustomComponent class serves as the foundation for creating custom component
|
|||
| _`required: bool`_ | Makes the field required. |
|
||||
| _`info: str`_ | Adds a tooltip to the field. |
|
||||
| _`file_types: List[str]`_ | This is a requirement if the _`field_type`_ is _file_. Defines which file types will be accepted. For example, _json_, _yaml_ or _yml_. |
|
||||
| _`range_spec: langflow.field_typing.RangeSpec`_ | This is a requirement if the _`field_type`_ is _`float`_. Defines the range of values accepted and the step size. If none is defined, the default is _`[-1, 1, 0.1]`_. |
|
||||
| _`range_spec: langflow.field_typing.RangeSpec`_ | This is a requirement if the _`field_type`_ is _`float`_. Defines the range of values accepted and the step size. If none is defined, the default is _`[-1, 1, 0.1]`_. |
|
||||
| _`title_case: bool`_ | Formats the name of the field when _`display_name`_ is not defined. Set it to False to keep the name as you set it in the _`build`_ method. |
|
||||
|
||||
<Admonition type="info" label="Tip">
|
||||
|
||||
Keys _`options`_ and _`value`_ can receive a method or function that returns a list of strings or a string, respectively. This is useful when you want to dynamically generate the options or the default value of a field. A refresh button will appear next to the field in the component, allowing the user to update the options or the default value.
|
||||
|
||||
</Admonition>
|
||||
|
||||
|
||||
|
||||
|
||||
- The CustomComponent class also provides helpful methods for specific tasks (e.g., to load and use other flows from the Langflow platform):
|
||||
|
||||
| Method Name | Description |
|
||||
|
|
@ -96,6 +107,7 @@ The CustomComponent class serves as the foundation for creating custom component
|
|||
| -------------- | ----------------------------------------------------------------------------- |
|
||||
| _`status`_ | Displays the value it receives in the _`build`_ method. Useful for debugging. |
|
||||
| _`field_order`_ | Defines the order the fields will be displayed in the canvas. |
|
||||
| _`icon`_ | Defines the emoji (for example, _`:rocket:`_) that will be displayed in the canvas. |
|
||||
|
||||
<Admonition type="info" label="Tip">
|
||||
|
||||
|
|
|
|||
|
|
@ -98,9 +98,9 @@ Used to load [OpenAI’s](https://openai.com/) embedding models.
|
|||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings).
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
|
||||
:::
|
||||
</Admonition>
|
||||
|
||||
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
|
||||
- **location:** The default location to use when making API calls – defaults to `us-central1`.
|
||||
|
|
|
|||
|
|
@ -1,9 +1,8 @@
|
|||
|
||||
import Admonition from '@theme/Admonition';
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# I/O
|
||||
|
||||
## ChatInput
|
||||
### ChatInput
|
||||
|
||||
This component is designed to get user input from the chat.
|
||||
|
||||
|
|
@ -15,9 +14,7 @@ This component is designed to get user input from the chat.
|
|||
|
||||
- **Message:** specifies the message text. It is a multiline text input.
|
||||
|
||||
- **Session ID:** specifies the session ID of the chat history.
|
||||
|
||||
- **As Record:** if true, the message will be returned as a _`Record`_. Defaults to _`false`_.
|
||||
- **Session ID:** specifies the session ID of the chat history. If provided, the message will be saved in the Message History.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
|
|
@ -25,7 +22,7 @@ This component is designed to get user input from the chat.
|
|||
</p>
|
||||
</Admonition>
|
||||
|
||||
## ChatOutput
|
||||
### ChatOutput
|
||||
|
||||
This component is designed to send a message to the chat.
|
||||
|
||||
|
|
@ -35,65 +32,21 @@ This component is designed to send a message to the chat.
|
|||
|
||||
- **Sender Name:** specifies the name of the sender. Defaults to _`"AI"`_.
|
||||
|
||||
- **Session ID:** specifies the session ID of the chat history.
|
||||
- **Session ID:** specifies the session ID of the chat history. If provided, the message will be saved in the Message History.
|
||||
|
||||
- **Message:** specifies the message text.
|
||||
|
||||
- **As Record:** if true, the message will be returned as a _`Record`_. Defaults to _`false`_.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
If _`As Record`_ is _`true`_ and the _`Message`_ is a _`Record`_, the data of the _`Record`_ will be updated with the _`Sender`_, _`Sender Name`_, and _`Session ID`_.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
## StoreMessages
|
||||
|
||||
This component is designed to store messages in a structured format. It can store messages provided directly as records or construct records from provided texts and metadata.
|
||||
### TextInput
|
||||
|
||||
**Params**
|
||||
This component is designed for simple text input, allowing users to pass textual data to subsequent components in the workflow. It's particularly useful for scenarios where a brief user input is required to initiate or influence the flow.
|
||||
|
||||
- **Records:** (Optional) Specifies the list of records to store. Each `Record` should contain the keys `'sender'`, `'sender_name'`, and `'session_id'`. If not provided, `texts` must be used along with `session_id`, `sender`, and `sender_name` to construct the records.
|
||||
|
||||
- **Texts:** (Optional) Specifies the list of text messages to store. If `records` is not provided, `texts` must be provided along with `session_id`, `sender`, and `sender_name`.
|
||||
|
||||
- **Session ID:** (Optional) Specifies the session ID associated with the messages. Required if `records` is not provided and `texts` are used.
|
||||
|
||||
- **Sender:** (Optional) Specifies the identifier of the sender. Required if `records` is not provided and `texts` are used.
|
||||
|
||||
- **Sender Name:** (Optional) Specifies the name of the sender. Required if `records` is not provided and `texts` are used.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
The component prioritizes the use of `Records` for storing messages. If `Records` are not provided, it requires `Texts`, `Session ID`, `Sender`, and `Sender Name` to construct and store records. Ensure that either `Records` or the combination of `Texts`, `Session ID`, `Sender`, and `Sender Name` is provided.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
## MessageHistory
|
||||
|
||||
This component is designed to retrieve stored messages based on various filters such as sender type, sender name, and session ID. It allows for a flexible retrieval of chat history, providing insights into past interactions.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Sender Type:** (Optional) Specifies the type of the sender. Options are _`"Machine"`_ or _`"User"`_. Filters the messages by the type of sender.
|
||||
|
||||
- **Sender Name:** (Optional) Specifies the name of the sender. Filters the messages by the name of the sender.
|
||||
|
||||
- **Session ID:** (Optional) Specifies the session ID of the chat history. Filters the messages belonging to a specific session.
|
||||
|
||||
- **Number of Messages:** Specifies the number of messages to retrieve. Defaults to _`5`_. Determines how many recent messages from the chat history to fetch.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
The component retrieves messages based on the provided criteria. If no specific criteria are provided, it will return the most recent messages up to the specified limit. This component can be used to review past interactions and analyze the flow of conversations.
|
||||
|
||||
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
## TextInput
|
||||
|
||||
This component is designed for simple text input, allowing users to pass textual data to subsequent components in the workflow. It's particularly useful for scenarios where a brief user input is required to initiate or influence the process flow.
|
||||
|
||||
**Params**
|
||||
|
||||
|
|
@ -101,6 +54,20 @@ This component is designed for simple text input, allowing users to pass textual
|
|||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
The `TextInput` component serves as a straightforward means for capturing user input. It ensures that textual data can be seamlessly integrated into the Langflow's component-based processing pipeline, fostering interactivity within automated workflows.
|
||||
The `TextInput` component serves as a straightforward means for setting Text input values in the chat window. It ensures that textual data can be seamlessly passed to subsequent components in the flow.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
### TextOutput
|
||||
|
||||
This component is designed to display text data to the user. It's particularly useful for scenarios where you don't want to send the text data to the chat, but still want to display it.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Value:** Specifies the text data to be displayed. This is where the text data to be displayed is provided. If no value is provided, it defaults to an empty string.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
The `TextOutput` component serves as a straightforward means for displaying text data. It ensures that textual data can be seamlessly observed in the chat window throughout your flow.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
|
@ -40,9 +40,10 @@ Wrapper around Anthropic's large language model used for chat-based interactions
|
|||
|
||||
The `CTransformers` component provides access to the Transformer models implemented in C/C++ using the [GGML](https://github.com/ggerganov/ggml) library.
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
|
||||
Make sure to have the `ctransformers` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/marella/ctransformers).
|
||||
:::
|
||||
</Admonition>
|
||||
|
||||
**config:** Configuration for the Transformer models. Check out [config](https://github.com/marella/ctransformers#config). Defaults to:
|
||||
|
||||
|
|
@ -115,9 +116,9 @@ Wrapper around [Cohere's](https://cohere.com) large language models.
|
|||
|
||||
Wrapper around [HuggingFace](https://www.huggingface.co/models) models.
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
The HuggingFace Hub is an online platform that hosts over 120k models, 20k datasets, and 50k demo apps, all of which are open-source and publicly available. Discover more at [HuggingFace](http://www.huggingface.co).
|
||||
:::
|
||||
</Admonition>
|
||||
|
||||
- **huggingfacehub_api_token:** Token needed to authenticate the API.
|
||||
- **model_kwargs:** Keyword arguments to pass to the model.
|
||||
|
|
@ -130,9 +131,9 @@ The HuggingFace Hub is an online platform that hosts over 120k models, 20k datas
|
|||
|
||||
The `LlamaCpp` component provides access to the `llama.cpp` models.
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
Make sure to have the `llama.cpp` python package installed. Learn more about installation, supported models, and usage [here](https://github.com/ggerganov/llama.cpp).
|
||||
:::
|
||||
</Admonition>
|
||||
|
||||
- **echo:** Whether to echo the prompt – defaults to `False`.
|
||||
- **f16_kv:** Use half-precision for key/value cache – defaults to `True`.
|
||||
|
|
@ -181,9 +182,9 @@ Wrapper around [OpenAI's](https://openai.com) large language models.
|
|||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
|
||||
:::
|
||||
</Admonition>
|
||||
|
||||
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
|
||||
- **location:** The default location to use when making API calls – defaults to `us-central1`.
|
||||
|
|
@ -203,9 +204,9 @@ Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP).
|
|||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) large language models.
|
||||
|
||||
:::info
|
||||
<Admonition type="info">
|
||||
Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP). It provides access, management, and development of applications and services through global data centers. To use Vertex AI PaLM, you need to have the [google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/) Python package installed and credentials configured for your environment.
|
||||
:::
|
||||
</Admonition>
|
||||
|
||||
- **credentials:** The default custom credentials (google.auth.credentials.Credentials) to use.
|
||||
- **location:** The default location to use when making API calls – defaults to `us-central1`.
|
||||
|
|
|
|||
|
|
@ -12,6 +12,26 @@ Memory is a concept in chat-based applications that allows the system to remembe
|
|||
|
||||
---
|
||||
|
||||
### MessageHistory
|
||||
|
||||
This component is designed to retrieve stored messages based on various filters such as sender type, sender name, session ID, and a specific file path where messages are stored. It allows for a flexible retrieval of chat history, providing insights into past interactions.
|
||||
|
||||
**Params**
|
||||
|
||||
- **Sender Type:** (Optional) Specifies the type of the sender. Options are _`"Machine"`_, _`"User"`_, or _`"Machine and User"`_. Filters the messages by the type of the sender.
|
||||
|
||||
- **Sender Name:** (Optional) Specifies the name of the sender. Filters the messages by the name of the sender.
|
||||
|
||||
- **Session ID:** (Optional) Specifies the session ID of the chat history. Filters the messages belonging to a specific session.
|
||||
|
||||
- **Number of Messages:** Specifies the number of messages to retrieve. Defaults to _`5`_. Determines how many recent messages from the chat history to fetch.
|
||||
|
||||
<Admonition type="note" title="Note">
|
||||
<p>
|
||||
The component retrieves messages based on the provided criteria, including the specific file path for stored messages. If no specific criteria are provided, it will return the most recent messages up to the specified limit. This component can be used to review past interactions and analyze the flow of conversations.
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
### ConversationBufferMemory
|
||||
|
||||
The `ConversationBufferMemory` component is a type of memory system that plainly stores the last few inputs and outputs of a conversation.
|
||||
|
|
@ -27,7 +47,7 @@ The `ConversationBufferMemory` component is a type of memory system that plainly
|
|||
|
||||
### ConversationBufferWindowMemory
|
||||
|
||||
`ConversationBufferWindowMemory` is a variation of the `ConversationBufferMemory` that maintains a list of the recent interactions in a conversation. It only keeps the last K interactions in memory, which can be useful for maintaining a sliding window of the most recent interactions without letting the buffer get too large.
|
||||
`ConversationBufferWindowMemory` is a variation of the `ConversationBufferMemory` that maintains a list of the recent interactions in a conversation. It only keeps the last K interactions in memory, which can be useful for maintaining a sliding window of the most recent interactions without letting the buffer get too large.
|
||||
|
||||
**Params**
|
||||
|
||||
|
|
@ -72,7 +92,7 @@ The `ConversationEntityMemory` component incorporates intricate memory structure
|
|||
|
||||
### ConversationSummaryMemory
|
||||
|
||||
The `ConversationSummaryMemory` is a memory component that creates a summary of the conversation over time. It condenses information from the conversation and stores the current summary in memory. It is particularly useful for longer conversations where keeping the entire message history in the prompt would take up too many tokens.
|
||||
The `ConversationSummaryMemory` is a memory component that creates a summary of the conversation over time. It condenses information from the conversation and stores the current summary in memory. It is particularly useful for longer conversations where keeping the entire message history in the prompt would take up too many tokens.
|
||||
|
||||
**Params**
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,21 @@ import Admonition from '@theme/Admonition';
|
|||
</Admonition>
|
||||
|
||||
|
||||
### SearchApi
|
||||
|
||||
Real-time search engine results API. Returns structured JSON data that includes answer box, knowledge graph, organic results, and more.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Api Key:** A unique identifier for the SearchApi, necessary for authenticating requests to real-time search engines. This key can be retrieved from the [SearchApi dashboard](https://www.searchapi.io/).
|
||||
- **Engine:** Specifies the search engine. For instance: google, google_scholar, bing, youtube, and youtube_transcripts. A full list of supported engines is available in the [documentation](https://www.searchapi.io/docs/google).
|
||||
- **Parameters:** Allows the selection of any parameters recognized by SearchApi, with some being required and others optional.
|
||||
|
||||
**Output**
|
||||
|
||||
- **Document:** The JSON response from the request as a Document.
|
||||
|
||||
|
||||
### BingSearchRun
|
||||
|
||||
Bing Search is a web search engine owned and operated by Microsoft. It provides search results for various types of content, including web pages, images, videos, and news articles. It uses a combination of algorithms and human editors to deliver search results to users.
|
||||
|
|
@ -60,4 +75,4 @@ Tool for getting metadata about a SQL database. The input to this tool is a comm
|
|||
|
||||
**Params**
|
||||
|
||||
- **Db:** SQLDatabase to query.
|
||||
- **Db:** SQLDatabase to query.
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
## 🐦 Stay tunned for **Langflow** on Twitter
|
||||
|
||||
Follow [@logspace_ai](https://twitter.com/langflow_ai) on **Twitter** to get the latest news about **Langflow**.
|
||||
Follow [@langflow_ai](https://twitter.com/langflow_ai) on **Twitter** to get the latest news about **Langflow**.
|
||||
|
||||
---
|
||||
## ⭐️ Star **Langflow** on GitHub
|
||||
|
|
|
|||
|
|
@ -4,9 +4,8 @@
|
|||
|
||||
This guide will help you set up a Langflow development VM in a Google Cloud Platform project using Google Cloud Shell.
|
||||
|
||||
:::note
|
||||
When Cloud Shell opens, be sure to select **Trust repo**. Some `gcloud` commands might not run in an ephemeral Cloud Shell environment.
|
||||
:::
|
||||
> Note: When Cloud Shell opens, be sure to select **Trust repo**. Some `gcloud` commands might not run in an ephemeral Cloud Shell environment.
|
||||
|
||||
|
||||
|
||||
## Standard VM
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/buffer-memory.png",
|
||||
dark: "img/buffer-memory.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/basic-chat.png",
|
||||
dark: "img/basic-chat.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/csv-loader.png",
|
||||
dark: "img/csv-loader.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
|
|
@ -39,12 +40,12 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`CSVLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/csv)
|
||||
- [`CSVLoader`](https://python.langchain.com/docs/integrations/document_loaders/csv)
|
||||
- [`CharacterTextSplitter`](https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter)
|
||||
- [`OpenAIEmbedding`](https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/openai)
|
||||
- [`Chroma`](https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/chroma)
|
||||
- [`OpenAIEmbedding`](https://python.langchain.com/docs/integrations/text_embedding/openai)
|
||||
- [`Chroma`](https://python.langchain.com/docs/integrations/vectorstores/chroma)
|
||||
- [`VectorStoreInfo`](https://python.langchain.com/docs/modules/data_connection/vectorstores/)
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`VectorStoreAgent`](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore)
|
||||
- [`VectorStoreAgent`](https://js.langchain.com/docs/modules/agents/tools/how_to/agents_with_vectorstores)
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ The CustomComponent class allows us to create components that interact with Lang
|
|||
alt="Document Processor Component"
|
||||
sources={{
|
||||
light: "img/flow_runner.png",
|
||||
dark: "img/flow_runner.png",
|
||||
}}
|
||||
style={{
|
||||
width: "30%",
|
||||
|
|
@ -339,6 +340,7 @@ Done! This is what our script and custom component looks like:
|
|||
alt="Document Processor Code"
|
||||
sources={{
|
||||
light: "img/flow_runner_code.png",
|
||||
dark: "img/flow_runner_code.png",
|
||||
}}
|
||||
style={{
|
||||
maxWidth: "100%",
|
||||
|
|
@ -353,6 +355,7 @@ Done! This is what our script and custom component looks like:
|
|||
alt="Document Processor Component"
|
||||
sources={{
|
||||
light: "img/flow_runner.png",
|
||||
dark: "img/flow_runner.png",
|
||||
}}
|
||||
style={{
|
||||
width: "40%",
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ Langflow Examples is a repository on [GitHub](https://github.com/logspace-ai/lan
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/community-examples.png",
|
||||
dark: "img/community-examples.png",
|
||||
}}
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/midjourney-prompt-chain.png",
|
||||
dark: "img/midjourney-prompt-chain.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
|
|
@ -40,6 +41,6 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`ConversationSummaryMemory`](https://python.langchain.com/docs/modules/memory/how_to/summary)
|
||||
- [`ConversationSummaryMemory`](https://python.langchain.com/docs/modules/memory/types/summary)
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ https://pt.wikipedia.org/wiki/Harry_Potter
|
|||
|
||||
<Admonition type="info">
|
||||
Learn more about Multiple Vector Stores
|
||||
[here](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore?highlight=Multiple%20Vector%20Stores#multiple-vectorstores).
|
||||
[here](https://python.langchain.com/docs/modules/data_connection/vectorstores/).
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
|
@ -37,6 +37,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/multiple-vectorstores.png",
|
||||
dark: "img/multiple-vectorstores.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
|
|
@ -44,14 +45,14 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`WebBaseLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/web_base)
|
||||
- [`TextLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/unstructured_file)
|
||||
- [`WebBaseLoader`](https://python.langchain.com/docs/integrations/document_loaders/web_base)
|
||||
- [`TextLoader`](https://python.langchain.com/docs/modules/data_connection/document_loaders/)
|
||||
- [`CharacterTextSplitter`](https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter)
|
||||
- [`OpenAIEmbedding`](https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/openai)
|
||||
- [`Chroma`](https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/chroma)
|
||||
- [`OpenAIEmbedding`](https://python.langchain.com/docs/integrations/text_embedding/openai)
|
||||
- [`Chroma`](https://python.langchain.com/docs/integrations/vectorstores/chroma)
|
||||
- [`VectorStoreInfo`](https://python.langchain.com/docs/modules/data_connection/vectorstores/)
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`VectorStoreRouterToolkit`](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore)
|
||||
- [`VectorStoreRouterAgent`](https://python.langchain.com/docs/modules/agents/toolkits/vectorstore)
|
||||
- [`VectorStoreRouterToolkit`](https://js.langchain.com/docs/modules/agents/tools/how_to/agents_with_vectorstores)
|
||||
- [`VectorStoreRouterAgent`](https://js.langchain.com/docs/modules/agents/tools/how_to/agents_with_vectorstores)
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ The `AgentInitializer` component is a quick way to construct an agent from the m
|
|||
<Admonition type="info">
|
||||
The `PythonFunction` is a custom component that uses the LangChain 🦜🔗 tool
|
||||
decorator. Learn more about it
|
||||
[here](https://python.langchain.com/docs/modules/agents/tools/how_to/custom_tools).
|
||||
[here](https://python.langchain.com/docs/modules/agents/tools/custom_tools).
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
|
@ -41,6 +41,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/python-function.png",
|
||||
dark: "img/python-function.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
|
|
@ -48,7 +49,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`PythonFunctionTool`](https://python.langchain.com/docs/modules/agents/tools/how_to/custom_tools)
|
||||
- [`PythonFunctionTool`](https://python.langchain.com/docs/modules/agents/tools/custom_tools)
|
||||
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)
|
||||
- [`AgentInitializer`](https://python.langchain.com/docs/modules/agents/)
|
||||
|
||||
|
|
|
|||
52
docs/docs/examples/searchapi-tool.mdx
Normal file
52
docs/docs/examples/searchapi-tool.mdx
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# SearchApi Tool
|
||||
|
||||
The [SearchApi](https://www.searchapi.io/) allows developers to retrieve results from search engines such as Google, Google Scholar, YouTube, YouTube transcripts, and more, and can be used as in Langflow through the `SearchApi` tool.
|
||||
|
||||
<Admonition type="info">
|
||||
To use the SearchApi, you must first obtain an API key by registering at [SearchApi's website](https://www.searchapi.io/).
|
||||
</Admonition>
|
||||
|
||||
In the given example, we specify `engine` as `youtube_transcripts` and provide a `video_id`.
|
||||
|
||||
<Admonition type="info">
|
||||
All engines and parameters can be found in [SearchApi documentation](https://www.searchapi.io/docs/google).
|
||||
</Admonition>
|
||||
|
||||
The `RetrievalQA` chain processes a `Document` along with a user's question to return an answer.
|
||||
|
||||
<Admonition type="tip">
|
||||
In this example, we used [`ChatOpenAI`](https://platform.openai.com/) as the
|
||||
LLM, but feel free to experiment with other Language Models!
|
||||
</Admonition>
|
||||
|
||||
The `RetrievalQA` takes `CombineDocsChain` and `SearchApi` tool as inputs, using the tool as a `Document` to answer questions.
|
||||
|
||||
<Admonition type="info">
|
||||
Learn more about the SearchApi
|
||||
[here](https://python.langchain.com/docs/integrations/tools/searchapi).
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
||||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/searchapi-tool.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
#### <a target="\_blank" href="json_files/SearchApi_Tool.json" download>Download Flow</a>
|
||||
|
||||
<Admonition type="note" title="LangChain Components 🦜🔗">
|
||||
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`SearchApiAPIWrapper`](https://python.langchain.com/docs/integrations/providers/searchapi#wrappers)
|
||||
- [`ZeroShotAgent`](https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent)
|
||||
|
||||
</Admonition>
|
||||
|
|
@ -22,7 +22,7 @@ The `ZeroShotAgent` takes the `LLMChain` and the `Search` tool as inputs, using
|
|||
|
||||
<Admonition type="info">
|
||||
Learn more about the Serp API
|
||||
[here](https://python.langchain.com/docs/modules/agents/tools/integrations/serpapi).
|
||||
[here](https://python.langchain.com/docs/integrations/providers/serpapi ).
|
||||
</Admonition>
|
||||
|
||||
## ⛓️ Langflow Example
|
||||
|
|
@ -35,6 +35,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/serp-api-tool.png",
|
||||
dark: "img/serp-api-tool.png",
|
||||
}}
|
||||
/>
|
||||
|
||||
|
|
@ -45,7 +46,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
- [`ZeroShotPrompt`](https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/)
|
||||
- [`OpenAI`](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai)
|
||||
- [`LLMChain`](https://python.langchain.com/docs/modules/chains/foundational/llm_chain)
|
||||
- [`Search`](https://python.langchain.com/docs/modules/agents/tools/integrations/serpapi)
|
||||
- [`Search`](https://python.langchain.com/docs/integrations/providers/serpapi)
|
||||
- [`ZeroShotAgent`](https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent)
|
||||
|
||||
</Admonition>
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ Creating flows with Langflow is easy. Drag sidebar components onto the canvas an
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/langflow_canvas.png",
|
||||
dark: "img/langflow_canvas.png"
|
||||
}}
|
||||
/>
|
||||
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/hugging-face.png",
|
||||
dark: "img/hugging-face.png",
|
||||
}}
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ Langflow offers an API Key functionality that allows users to access their indiv
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/api-key.png"),
|
||||
dark: useBaseUrl("img/api-key.png"),
|
||||
}}
|
||||
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ Langflow’s chat interface provides a user-friendly experience and functionalit
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface.png"),
|
||||
dark: useBaseUrl("img/chat_interface.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -25,6 +26,7 @@ Notice that editing variables in the chat interface take place temporarily and w
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface2.png"),
|
||||
dark: useBaseUrl("img/chat_interface2.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -36,6 +38,7 @@ To view the complete prompt in its original, structured format, click the "Displ
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface3.png"),
|
||||
dark: useBaseUrl("img/chat_interface3.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -47,6 +50,7 @@ In the chat interface, you can redefine which variable should be interpreted as
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/chat_interface4.png"),
|
||||
dark: useBaseUrl("img/chat_interface4.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ import Admonition from "@theme/Admonition";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/widget-sidebar.png"),
|
||||
dark: useBaseUrl("img/widget-sidebar.png"),
|
||||
}}
|
||||
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -53,6 +54,7 @@ import Admonition from "@theme/Admonition";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/widget-code.png"),
|
||||
dark: useBaseUrl("img/widget-code.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ Components are the building blocks of the flows. They are made of inputs, output
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/single-compenent.png"),
|
||||
dark: useBaseUrl("img/single-compenent.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ class DocumentProcessor(CustomComponent):
|
|||
alt="Document Processor Component"
|
||||
sources={{
|
||||
light: "img/document_processor.png",
|
||||
dark: "img/document_processor.png",
|
||||
}}
|
||||
style={{
|
||||
margin: "0 auto",
|
||||
|
|
@ -330,6 +331,7 @@ All done! This is what our script and brand-new custom component look like:
|
|||
alt="Document Processor Code"
|
||||
sources={{
|
||||
light: "img/document_processor_code.png",
|
||||
dark: "img/document_processor_code.png",
|
||||
}}
|
||||
style={{
|
||||
maxWidth: "100%",
|
||||
|
|
@ -344,6 +346,7 @@ All done! This is what our script and brand-new custom component look like:
|
|||
alt="Document Processor Component"
|
||||
sources={{
|
||||
light: "img/document_processor.png",
|
||||
dark: "img/document_processor.png",
|
||||
}}
|
||||
style={{
|
||||
width: "40%",
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import Admonition from "@theme/Admonition";
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/features.png"),
|
||||
dark: useBaseUrl("img/features.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -86,6 +86,7 @@ With _`LANGFLOW_AUTO_LOGIN`_ set to _`False`_, Langflow requires users to sign u
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/sign-up.png"),
|
||||
dark: useBaseUrl("img/sign-up.png"),
|
||||
}}
|
||||
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -102,6 +103,7 @@ Users can change their profile settings by clicking on the profile icon in the t
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/my-account.png"),
|
||||
dark: useBaseUrl("img/my-account.png"),
|
||||
}}
|
||||
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -112,6 +114,7 @@ By clicking on **Profile Settings**, the user is taken to the profile settings p
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/profile-settings.png"),
|
||||
dark: useBaseUrl("img/profile-settings.png"),
|
||||
}}
|
||||
style={{ maxWidth: "600px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -122,6 +125,7 @@ By clicking on **Admin Page**, the superuser is taken to the admin page, where t
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/admin-page.png"),
|
||||
dark: useBaseUrl("img/admin-page.png"),
|
||||
}}
|
||||
style={{ maxWidth: "600px", margin: "0 auto" }}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ The prompt template allows users to create prompts and define variables that pro
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization.png"),
|
||||
dark: useBaseUrl("img/prompt_customization.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -25,6 +26,7 @@ Variables can be used to define instructions, questions, context, inputs, or exa
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization2.png"),
|
||||
dark: useBaseUrl("img/prompt_customization2.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -37,6 +39,7 @@ Once inserted, these variables are immediately recognized as new fields in the p
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization3.png"),
|
||||
dark: useBaseUrl("img/prompt_customization3.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -49,6 +52,7 @@ You can also use documents or output parsers as prompt variables. By plugging th
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization4.png"),
|
||||
dark: useBaseUrl("img/prompt_customization4.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
@ -63,6 +67,7 @@ If working with an interactive (chat-like) flow, remember to keep one of the inp
|
|||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/prompt_customization5.png"),
|
||||
dark: useBaseUrl("img/prompt_customization5.png"),
|
||||
}}
|
||||
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# 👋 Welcome to Langflow
|
||||
|
||||
Langflow is an easy way to prototype [LangChain](https://github.com/hwchase17/langchain) flows. The drag-and-drop feature allows quick and effortless experimentation, while the built-in chat interface facilitates real-time interaction. It provides options to edit prompt parameters, create chains and agents, track thought processes, and export flows.
|
||||
Langflow is an easy way to create flows. The drag-and-drop feature allows quick and effortless experimentation, while the built-in chat interface facilitates real-time interaction. It provides options to edit prompt parameters, create chains and agents, track thought processes, and export flows.
|
||||
|
||||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
|
|
@ -11,7 +11,8 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
|
|||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/new_langflow.gif",
|
||||
light: "img/new_langflow_demo.gif",
|
||||
dark: "img/new_langflow_demo.gif",
|
||||
}}
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -82,6 +82,7 @@ module.exports = {
|
|||
"examples/buffer-memory",
|
||||
"examples/midjourney-prompt-chain",
|
||||
"examples/csv-loader",
|
||||
"examples/searchapi-tool",
|
||||
"examples/serp-api-tool",
|
||||
"examples/multiple-vectorstores",
|
||||
"examples/python-function",
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 20 MiB After Width: | Height: | Size: 20 MiB |
BIN
docs/static/img/searchapi-tool.png
vendored
Normal file
BIN
docs/static/img/searchapi-tool.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 654 KiB |
1
docs/static/json_files/SearchApi_Tool.json
vendored
Normal file
1
docs/static/json_files/SearchApi_Tool.json
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/static/videos/langflow_api.mp4
vendored
BIN
docs/static/videos/langflow_api.mp4
vendored
Binary file not shown.
BIN
docs/static/videos/langflow_build.mp4
vendored
BIN
docs/static/videos/langflow_build.mp4
vendored
Binary file not shown.
BIN
docs/static/videos/langflow_collection.mp4
vendored
BIN
docs/static/videos/langflow_collection.mp4
vendored
Binary file not shown.
BIN
docs/static/videos/langflow_collection_example.mp4
vendored
BIN
docs/static/videos/langflow_collection_example.mp4
vendored
Binary file not shown.
BIN
docs/static/videos/langflow_fork.mp4
vendored
BIN
docs/static/videos/langflow_fork.mp4
vendored
Binary file not shown.
BIN
docs/static/videos/langflow_parameters.mp4
vendored
BIN
docs/static/videos/langflow_parameters.mp4
vendored
Binary file not shown.
BIN
docs/static/videos/langflow_widget.mp4
vendored
BIN
docs/static/videos/langflow_widget.mp4
vendored
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 2 MiB |
Binary file not shown.
|
Before Width: | Height: | Size: 550 KiB |
1656
poetry.lock
generated
1656
poetry.lock
generated
File diff suppressed because it is too large
Load diff
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "langflow"
|
||||
version = "0.6.7a5"
|
||||
version = "0.7.0a0"
|
||||
description = "A Python package with a built-in web application"
|
||||
authors = ["Logspace <contact@logspace.ai>"]
|
||||
maintainers = [
|
||||
|
|
@ -39,14 +39,12 @@ gunicorn = "^21.2.0"
|
|||
langchain = "~0.1.0"
|
||||
openai = "^1.12.0"
|
||||
pandas = "2.2.0"
|
||||
chromadb = "^0.4.0"
|
||||
chromadb = "^0.4.23"
|
||||
huggingface-hub = { version = "^0.20.0", extras = ["inference"] }
|
||||
rich = "^13.7.0"
|
||||
llama-cpp-python = { version = "~0.2.0", optional = true }
|
||||
networkx = "^3.1"
|
||||
unstructured = "^0.12.0"
|
||||
pypdf = "^4.0.0"
|
||||
lxml = "^4.9.2"
|
||||
pysrt = "^1.1.2"
|
||||
fake-useragent = "^1.4.0"
|
||||
docstring-parser = "^0.15"
|
||||
|
|
@ -63,7 +61,7 @@ python-multipart = "^0.0.7"
|
|||
sqlmodel = "^0.0.14"
|
||||
faiss-cpu = "^1.7.4"
|
||||
anthropic = "^0.15.0"
|
||||
orjson = "3.9.3"
|
||||
orjson = "3.9.15"
|
||||
multiprocess = "^0.70.14"
|
||||
cachetools = "^5.3.1"
|
||||
types-cachetools = "^5.3.0.5"
|
||||
|
|
@ -105,8 +103,9 @@ langchain-google-genai = "^0.0.6"
|
|||
elasticsearch = "^8.12.0"
|
||||
pytube = "^15.0.0"
|
||||
python-socketio = "^5.11.0"
|
||||
llama-index = "0.9.48"
|
||||
llama-index = "^0.10.13"
|
||||
langchain-openai = "^0.0.6"
|
||||
unstructured = { extras = ["md"], version = "^0.12.4" }
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest-asyncio = "^0.23.1"
|
||||
|
|
|
|||
|
|
@ -109,7 +109,11 @@ def version_callback(value: bool):
|
|||
@app.callback()
|
||||
def main_entry_point(
|
||||
version: bool = typer.Option(
|
||||
None, "--version", callback=version_callback, is_eager=True, help="Show the version and exit."
|
||||
None,
|
||||
"--version",
|
||||
callback=version_callback,
|
||||
is_eager=True,
|
||||
help="Show the version and exit.",
|
||||
),
|
||||
):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ version_path_separator = os # Use os.pathsep. Default configuration used for ne
|
|||
# This is the path to the db in the root of the project.
|
||||
# When the user runs the Langflow the database url will
|
||||
# be set dinamically.
|
||||
sqlalchemy.url = sqlite:///../../../langflow.db
|
||||
sqlalchemy.url = sqlite:///./langflow.db
|
||||
|
||||
|
||||
[post_write_hooks]
|
||||
|
|
@ -98,7 +98,7 @@ handlers =
|
|||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
level = DEBUG
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
import os
|
||||
from logging.config import fileConfig
|
||||
|
||||
from sqlalchemy import engine_from_config
|
||||
from sqlalchemy import pool
|
||||
|
||||
from alembic import context
|
||||
from loguru import logger
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
|
||||
from langflow.services.database.models import * # noqa
|
||||
from langflow.services.database.service import SQLModel
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
|
|
@ -40,7 +41,8 @@ def run_migrations_offline() -> None:
|
|||
script output.
|
||||
|
||||
"""
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
url = os.getenv("LANGFLOW_DATABASE_URL")
|
||||
url = url or config.get_main_option("sqlalchemy.url")
|
||||
context.configure(
|
||||
url=url,
|
||||
target_metadata=target_metadata,
|
||||
|
|
@ -60,12 +62,17 @@ def run_migrations_online() -> None:
|
|||
and associate a connection with the context.
|
||||
|
||||
"""
|
||||
connectable = engine_from_config(
|
||||
config.get_section(config.config_ini_section, {}),
|
||||
prefix="sqlalchemy.",
|
||||
poolclass=pool.NullPool,
|
||||
)
|
||||
from langflow.services.deps import get_db_service
|
||||
|
||||
try:
|
||||
connectable = get_db_service().engine
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting database engine: {e}")
|
||||
connectable = engine_from_config(
|
||||
config.get_section(config.config_ini_section, {}),
|
||||
prefix="sqlalchemy.",
|
||||
poolclass=pool.NullPool,
|
||||
)
|
||||
with connectable.connect() as connection:
|
||||
context.configure(
|
||||
connection=connection, target_metadata=target_metadata, render_as_batch=True
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from typing import Sequence, Union
|
|||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
${imports if imports else ""}
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
|
|
@ -20,8 +21,12 @@ depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
|
|||
|
||||
|
||||
def upgrade() -> None:
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
${downgrades if downgrades else "pass"}
|
||||
|
|
|
|||
|
|
@ -5,28 +5,43 @@ Revises: 1ef9c4f3765d
|
|||
Create Date: 2023-12-13 18:55:52.587360
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '006b3990db50'
|
||||
down_revision: Union[str, None] = '1ef9c4f3765d'
|
||||
revision: str = "006b3990db50"
|
||||
down_revision: Union[str, None] = "1ef9c4f3765d"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
api_key_constraints = inspector.get_unique_constraints("apikey")
|
||||
flow_constraints = inspector.get_unique_constraints("flow")
|
||||
user_constraints = inspector.get_unique_constraints("user")
|
||||
try:
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.create_unique_constraint('uq_apikey_id', ['id'])
|
||||
if not any(
|
||||
constraint["name"] == "uq_apikey_id" for constraint in api_key_constraints
|
||||
):
|
||||
with op.batch_alter_table("apikey", schema=None) as batch_op:
|
||||
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.create_unique_constraint('uq_flow_id', ['id'])
|
||||
|
||||
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||
batch_op.create_unique_constraint('uq_user_id', ['id'])
|
||||
batch_op.create_unique_constraint("uq_apikey_id", ["id"])
|
||||
if not any(
|
||||
constraint["name"] == "uq_flow_id" for constraint in flow_constraints
|
||||
):
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
batch_op.create_unique_constraint("uq_flow_id", ["id"])
|
||||
if not any(
|
||||
constraint["name"] == "uq_user_id" for constraint in user_constraints
|
||||
):
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.create_unique_constraint("uq_user_id", ["id"])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
|
@ -36,15 +51,24 @@ def upgrade() -> None:
|
|||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
api_key_constraints = inspector.get_unique_constraints("apikey")
|
||||
flow_constraints = inspector.get_unique_constraints("flow")
|
||||
user_constraints = inspector.get_unique_constraints("user")
|
||||
try:
|
||||
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||
batch_op.drop_constraint('uq_user_id', type_='unique')
|
||||
if any(
|
||||
constraint["name"] == "uq_apikey_id" for constraint in api_key_constraints
|
||||
):
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.drop_constraint("uq_user_id", type_="unique")
|
||||
if any(constraint["name"] == "uq_flow_id" for constraint in flow_constraints):
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
batch_op.drop_constraint("uq_flow_id", type_="unique")
|
||||
if any(constraint["name"] == "uq_user_id" for constraint in user_constraints):
|
||||
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.drop_constraint('uq_flow_id', type_='unique')
|
||||
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.drop_constraint('uq_apikey_id', type_='unique')
|
||||
with op.batch_alter_table("apikey", schema=None) as batch_op:
|
||||
batch_op.drop_constraint("uq_apikey_id", type_="unique")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -5,67 +5,25 @@ Revises: 006b3990db50
|
|||
Create Date: 2024-01-17 10:32:56.686287
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '0b8757876a7c'
|
||||
down_revision: Union[str, None] = '006b3990db50'
|
||||
revision: str = "0b8757876a7c"
|
||||
down_revision: Union[str, None] = "006b3990db50"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.create_index(batch_op.f('ix_apikey_api_key'), ['api_key'], unique=True)
|
||||
batch_op.create_index(batch_op.f('ix_apikey_name'), ['name'], unique=False)
|
||||
batch_op.create_index(batch_op.f('ix_apikey_user_id'), ['user_id'], unique=False)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
try:
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.create_index(batch_op.f('ix_flow_description'), ['description'], unique=False)
|
||||
batch_op.create_index(batch_op.f('ix_flow_name'), ['name'], unique=False)
|
||||
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
pass
|
||||
|
||||
try:
|
||||
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], unique=True)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||
batch_op.drop_index(batch_op.f('ix_user_username'))
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
try:
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
|
||||
batch_op.drop_index(batch_op.f('ix_flow_name'))
|
||||
batch_op.drop_index(batch_op.f('ix_flow_description'))
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
try:
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.drop_index(batch_op.f('ix_apikey_user_id'))
|
||||
batch_op.drop_index(batch_op.f('ix_apikey_name'))
|
||||
batch_op.drop_index(batch_op.f('ix_apikey_api_key'))
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ Revises: fd531f8868b1
|
|||
Create Date: 2023-12-04 15:00:27.968998
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
|
@ -13,8 +14,8 @@ import sqlmodel
|
|||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '1ef9c4f3765d'
|
||||
down_revision: Union[str, None] = 'fd531f8868b1'
|
||||
revision: str = "1ef9c4f3765d"
|
||||
down_revision: Union[str, None] = "fd531f8868b1"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
|
@ -22,10 +23,10 @@ depends_on: Union[str, Sequence[str], None] = None
|
|||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.alter_column('name',
|
||||
existing_type=sqlmodel.sql.sqltypes.AutoString(),
|
||||
nullable=True)
|
||||
with op.batch_alter_table("apikey", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"name", existing_type=sqlmodel.sql.sqltypes.AutoString(), nullable=True
|
||||
)
|
||||
except Exception as e:
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
@ -34,10 +35,8 @@ def upgrade() -> None:
|
|||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.alter_column('name',
|
||||
existing_type=sa.VARCHAR(),
|
||||
nullable=False)
|
||||
with op.batch_alter_table("apikey", schema=None) as batch_op:
|
||||
batch_op.alter_column("name", existing_type=sa.VARCHAR(), nullable=False)
|
||||
except Exception as e:
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ Revises:
|
|||
Create Date: 2023-08-27 19:49:02.681355
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
|
@ -33,7 +34,9 @@ def upgrade() -> None:
|
|||
if "ix_flowstyle_flow_id" in [
|
||||
index["name"] for index in inspector.get_indexes("flowstyle")
|
||||
]:
|
||||
op.drop_index("ix_flowstyle_flow_id", table_name="flowstyle")
|
||||
op.drop_index(
|
||||
"ix_flowstyle_flow_id", table_name="flowstyle", if_exists=True
|
||||
)
|
||||
|
||||
existing_indices_flow = []
|
||||
existing_fks_flow = []
|
||||
|
|
@ -80,8 +83,7 @@ def upgrade() -> None:
|
|||
sa.Column("api_key", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
||||
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["user_id"],
|
||||
["user.id"],
|
||||
["user_id"], ["user.id"], name="fk_apikey_user_id_user"
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id", name="pk_apikey"),
|
||||
sa.UniqueConstraint("id", name="uq_apikey_id"),
|
||||
|
|
@ -103,8 +105,7 @@ def upgrade() -> None:
|
|||
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["user_id"],
|
||||
["user.id"],
|
||||
["user_id"], ["user.id"], name="fk_flow_user_id_user"
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id", name="pk_flow"),
|
||||
sa.UniqueConstraint("id", name="uq_flow_id"),
|
||||
|
|
@ -151,21 +152,21 @@ def downgrade() -> None:
|
|||
existing_tables = inspector.get_table_names()
|
||||
if "flow" in existing_tables:
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
batch_op.drop_index(batch_op.f("ix_flow_user_id"))
|
||||
batch_op.drop_index(batch_op.f("ix_flow_name"))
|
||||
batch_op.drop_index(batch_op.f("ix_flow_description"))
|
||||
batch_op.drop_index(batch_op.f("ix_flow_user_id"), if_exists=True)
|
||||
batch_op.drop_index(batch_op.f("ix_flow_name"), if_exists=True)
|
||||
batch_op.drop_index(batch_op.f("ix_flow_description"), if_exists=True)
|
||||
|
||||
op.drop_table("flow")
|
||||
if "apikey" in existing_tables:
|
||||
with op.batch_alter_table("apikey", schema=None) as batch_op:
|
||||
batch_op.drop_index(batch_op.f("ix_apikey_user_id"))
|
||||
batch_op.drop_index(batch_op.f("ix_apikey_name"))
|
||||
batch_op.drop_index(batch_op.f("ix_apikey_api_key"))
|
||||
batch_op.drop_index(batch_op.f("ix_apikey_user_id"), if_exists=True)
|
||||
batch_op.drop_index(batch_op.f("ix_apikey_name"), if_exists=True)
|
||||
batch_op.drop_index(batch_op.f("ix_apikey_api_key"), if_exists=True)
|
||||
|
||||
op.drop_table("apikey")
|
||||
if "user" in existing_tables:
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.drop_index(batch_op.f("ix_user_username"))
|
||||
batch_op.drop_index(batch_op.f("ix_user_username"), if_exists=True)
|
||||
|
||||
op.drop_table("user")
|
||||
|
||||
|
|
|
|||
|
|
@ -5,34 +5,44 @@ Revises: 7d2162acc8b2
|
|||
Create Date: 2023-11-24 10:45:38.465302
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
from alembic import op
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '2ac71eb9c3ae'
|
||||
down_revision: Union[str, None] = '7d2162acc8b2'
|
||||
revision: str = "2ac71eb9c3ae"
|
||||
down_revision: Union[str, None] = "7d2162acc8b2"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
tables = inspector.get_table_names()
|
||||
try:
|
||||
op.create_table('credential',
|
||||
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
||||
sa.Column('value', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
||||
sa.Column('provider', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
||||
sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.Column('id', sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
)
|
||||
if "credential" not in tables:
|
||||
op.create_table(
|
||||
"credential",
|
||||
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
||||
sa.Column("value", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
||||
sa.Column(
|
||||
"provider", sqlmodel.sql.sqltypes.AutoString(), nullable=True
|
||||
),
|
||||
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.Column("created_at", sa.DateTime(), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(), nullable=True),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
|
@ -40,7 +50,7 @@ def upgrade() -> None:
|
|||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
op.drop_table('credential')
|
||||
op.drop_table("credential")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ Revises: 260dbcc8b680
|
|||
Create Date: 2023-09-08 07:36:13.387318
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
|
@ -21,29 +22,36 @@ depends_on: Union[str, Sequence[str], None] = None
|
|||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
if "user" in inspector.get_table_names() and "profile_image" not in [
|
||||
column["name"] for column in inspector.get_columns("user")
|
||||
]:
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"profile_image", sqlmodel.sql.sqltypes.AutoString(), nullable=True
|
||||
try:
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
if "user" in inspector.get_table_names() and "profile_image" not in [
|
||||
column["name"] for column in inspector.get_columns("user")
|
||||
]:
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"profile_image",
|
||||
sqlmodel.sql.sqltypes.AutoString(),
|
||||
nullable=True,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
if "user" in inspector.get_table_names() and "profile_image" in [
|
||||
column["name"] for column in inspector.get_columns("user")
|
||||
]:
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.drop_column("profile_image")
|
||||
try:
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
if "user" in inspector.get_table_names() and "profile_image" in [
|
||||
column["name"] for column in inspector.get_columns("user")
|
||||
]:
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.drop_column("profile_image")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -5,12 +5,13 @@ Revises: eb5866d51fd2
|
|||
Create Date: 2023-10-18 23:08:57.744906
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
from alembic import op
|
||||
from loguru import logger
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "7843803a87b5"
|
||||
|
|
@ -21,19 +22,26 @@ depends_on: Union[str, Sequence[str], None] = None
|
|||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
flow_columns = [column["name"] for column in inspector.get_columns("flow")]
|
||||
user_columns = [column["name"] for column in inspector.get_columns("user")]
|
||||
try:
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column("is_component", sa.Boolean(), nullable=True))
|
||||
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"store_api_key", sqlmodel.AutoString(), nullable=True
|
||||
if "is_component" not in flow_columns:
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column("is_component", sa.Boolean(), nullable=True)
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
pass
|
||||
try:
|
||||
if "store_api_key" not in user_columns:
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column("store_api_key", sqlmodel.AutoString(), nullable=True)
|
||||
)
|
||||
except Exception as e:
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,88 +5,74 @@ Revises: f5ee9749d1a6
|
|||
Create Date: 2023-11-21 20:56:53.998781
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
from alembic import op
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '7d2162acc8b2'
|
||||
down_revision: Union[str, None] = 'f5ee9749d1a6'
|
||||
revision: str = "7d2162acc8b2"
|
||||
down_revision: Union[str, None] = "f5ee9749d1a6"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
api_key_columns = [column["name"] for column in inspector.get_columns("apikey")]
|
||||
flow_columns = [column["name"] for column in inspector.get_columns("flow")]
|
||||
|
||||
try:
|
||||
with op.batch_alter_table('component', schema=None) as batch_op:
|
||||
batch_op.drop_index('ix_component_frontend_node_id')
|
||||
batch_op.drop_index('ix_component_name')
|
||||
op.drop_table('component')
|
||||
op.drop_table('flowstyle')
|
||||
if "name" in api_key_columns:
|
||||
with op.batch_alter_table("apikey", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"name", existing_type=sa.VARCHAR(), nullable=False
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.alter_column('name',
|
||||
existing_type=sa.VARCHAR(),
|
||||
nullable=False)
|
||||
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
|
||||
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
|
||||
pass
|
||||
try:
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
if "updated_at" not in flow_columns:
|
||||
batch_op.add_column(
|
||||
sa.Column("updated_at", sa.DateTime(), nullable=True)
|
||||
)
|
||||
if "folder" not in flow_columns:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"folder", sqlmodel.sql.sqltypes.AutoString(), nullable=True
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
pass
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.drop_column('folder')
|
||||
batch_op.drop_column('updated_at')
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
batch_op.drop_column("folder")
|
||||
batch_op.drop_column("updated_at")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
try:
|
||||
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.alter_column('name',
|
||||
existing_type=sa.VARCHAR(),
|
||||
nullable=True)
|
||||
with op.batch_alter_table("apikey", schema=None) as batch_op:
|
||||
batch_op.alter_column("name", existing_type=sa.VARCHAR(), nullable=True)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
try:
|
||||
op.create_table('flowstyle',
|
||||
sa.Column('color', sa.VARCHAR(), nullable=False),
|
||||
sa.Column('emoji', sa.VARCHAR(), nullable=False),
|
||||
sa.Column('flow_id', sa.CHAR(length=32), nullable=True),
|
||||
sa.Column('id', sa.CHAR(length=32), nullable=False),
|
||||
sa.ForeignKeyConstraint(['flow_id'], ['flow.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('id')
|
||||
)
|
||||
op.create_table('component',
|
||||
sa.Column('id', sa.CHAR(length=32), nullable=False),
|
||||
sa.Column('frontend_node_id', sa.CHAR(length=32), nullable=False),
|
||||
sa.Column('name', sa.VARCHAR(), nullable=False),
|
||||
sa.Column('description', sa.VARCHAR(), nullable=True),
|
||||
sa.Column('python_code', sa.VARCHAR(), nullable=True),
|
||||
sa.Column('return_type', sa.VARCHAR(), nullable=True),
|
||||
sa.Column('is_disabled', sa.BOOLEAN(), nullable=False),
|
||||
sa.Column('is_read_only', sa.BOOLEAN(), nullable=False),
|
||||
sa.Column('create_at', sa.DATETIME(), nullable=False),
|
||||
sa.Column('update_at', sa.DATETIME(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
|
||||
with op.batch_alter_table('component', schema=None) as batch_op:
|
||||
batch_op.create_index('ix_component_name', ['name'], unique=False)
|
||||
batch_op.create_index('ix_component_frontend_node_id', ['frontend_node_id'], unique=False)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -5,55 +5,105 @@ Revises: 0b8757876a7c
|
|||
Create Date: 2024-01-26 13:31:14.797548
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
from alembic import op
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = 'b2fa308044b5'
|
||||
down_revision: Union[str, None] = '0b8757876a7c'
|
||||
revision: str = "b2fa308044b5"
|
||||
down_revision: Union[str, None] = "0b8757876a7c"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
tables = inspector.get_table_names()
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
op.drop_table('flowstyle')
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('is_component', sa.Boolean(), nullable=True))
|
||||
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
|
||||
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
|
||||
batch_op.add_column(sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=True))
|
||||
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
|
||||
batch_op.create_foreign_key('fk_flow_user_id_user', 'user', ['user_id'], ['id'])
|
||||
if "flowstyle" in tables:
|
||||
op.drop_table("flowstyle")
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
flow_columns = [column["name"] for column in inspector.get_columns("flow")]
|
||||
if "is_component" not in flow_columns:
|
||||
batch_op.add_column(
|
||||
sa.Column("is_component", sa.Boolean(), nullable=True)
|
||||
)
|
||||
if "updated_at" not in flow_columns:
|
||||
batch_op.add_column(
|
||||
sa.Column("updated_at", sa.DateTime(), nullable=True)
|
||||
)
|
||||
if "folder" not in flow_columns:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"folder", sqlmodel.sql.sqltypes.AutoString(), nullable=True
|
||||
)
|
||||
)
|
||||
if "user_id" not in flow_columns:
|
||||
batch_op.add_column(
|
||||
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=True)
|
||||
)
|
||||
indices = inspector.get_indexes("flow")
|
||||
indices_names = [index["name"] for index in indices]
|
||||
if "ix_flow_user_id" not in indices_names:
|
||||
batch_op.create_index(
|
||||
batch_op.f("ix_flow_user_id"), ["user_id"], unique=False
|
||||
)
|
||||
if "fk_flow_user_id_user" not in indices_names:
|
||||
batch_op.create_foreign_key(
|
||||
"fk_flow_user_id_user", "user", ["user_id"], ["id"]
|
||||
)
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
try:
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.drop_constraint('fk_flow_user_id_user', type_='foreignkey')
|
||||
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
|
||||
batch_op.drop_column('user_id')
|
||||
batch_op.drop_column('folder')
|
||||
batch_op.drop_column('updated_at')
|
||||
batch_op.drop_column('is_component')
|
||||
# Re-create the dropped table 'flowstyle' if it was previously dropped in upgrade
|
||||
if "flowstyle" not in inspector.get_table_names():
|
||||
op.create_table(
|
||||
"flowstyle",
|
||||
sa.Column("color", sa.String(), nullable=False),
|
||||
sa.Column("emoji", sa.String(), nullable=False),
|
||||
sa.Column("flow_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
|
||||
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.ForeignKeyConstraint(["flow_id"], ["flow.id"]),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("id"),
|
||||
)
|
||||
|
||||
op.create_table('flowstyle',
|
||||
sa.Column('color', sa.VARCHAR(), nullable=False),
|
||||
sa.Column('emoji', sa.VARCHAR(), nullable=False),
|
||||
sa.Column('flow_id', sa.CHAR(length=32), nullable=True),
|
||||
sa.Column('id', sa.CHAR(length=32), nullable=False),
|
||||
sa.ForeignKeyConstraint(['flow_id'], ['flow.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('id')
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
# Check and remove newly added columns and constraints in upgrade
|
||||
flow_columns = [column["name"] for column in inspector.get_columns("flow")]
|
||||
if "user_id" in flow_columns:
|
||||
batch_op.drop_column("user_id")
|
||||
if "folder" in flow_columns:
|
||||
batch_op.drop_column("folder")
|
||||
if "updated_at" in flow_columns:
|
||||
batch_op.drop_column("updated_at")
|
||||
if "is_component" in flow_columns:
|
||||
batch_op.drop_column("is_component")
|
||||
|
||||
indices = inspector.get_indexes("flow")
|
||||
indices_names = [index["name"] for index in indices]
|
||||
if "ix_flow_user_id" in indices_names:
|
||||
batch_op.drop_index("ix_flow_user_id")
|
||||
# Assuming fk_flow_user_id_user is a foreign key constraint's name, not an index
|
||||
constraints = inspector.get_foreign_keys("flow")
|
||||
constraint_names = [constraint["name"] for constraint in constraints]
|
||||
if "fk_flow_user_id_user" in constraint_names:
|
||||
batch_op.drop_constraint("fk_flow_user_id_user", type_="foreignkey")
|
||||
|
||||
except Exception as e:
|
||||
# It's generally a good idea to log the exception or handle it in a way other than a bare pass
|
||||
print(f"Error during downgrade: {e}")
|
||||
|
|
|
|||
|
|
@ -5,46 +5,68 @@ Revises: b2fa308044b5
|
|||
Create Date: 2024-01-26 13:34:14.496769
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
from alembic import op
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = 'bc2f01c40e4a'
|
||||
down_revision: Union[str, None] = 'b2fa308044b5'
|
||||
revision: str = "bc2f01c40e4a"
|
||||
down_revision: Union[str, None] = "b2fa308044b5"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('is_component', sa.Boolean(), nullable=True))
|
||||
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
|
||||
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
|
||||
batch_op.add_column(sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=True))
|
||||
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
|
||||
batch_op.create_foreign_key('flow_user_id_fkey'
|
||||
, 'user', ['user_id'], ['id'])
|
||||
except Exception:
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
flow_columns = {column["name"] for column in inspector.get_columns("flow")}
|
||||
flow_indexes = {index["name"] for index in inspector.get_indexes("flow")}
|
||||
flow_fks = {fk["name"] for fk in inspector.get_foreign_keys("flow")}
|
||||
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
if "is_component" not in flow_columns:
|
||||
batch_op.add_column(sa.Column("is_component", sa.Boolean(), nullable=True))
|
||||
if "updated_at" not in flow_columns:
|
||||
batch_op.add_column(sa.Column("updated_at", sa.DateTime(), nullable=True))
|
||||
if "folder" not in flow_columns:
|
||||
batch_op.add_column(
|
||||
sa.Column("folder", sqlmodel.sql.sqltypes.AutoString(), nullable=True)
|
||||
)
|
||||
if "user_id" not in flow_columns:
|
||||
batch_op.add_column(
|
||||
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=True)
|
||||
)
|
||||
if "ix_flow_user_id" not in flow_indexes:
|
||||
batch_op.create_index(
|
||||
batch_op.f("ix_flow_user_id"), ["user_id"], unique=False
|
||||
)
|
||||
if "flow_user_id_fkey" not in flow_fks:
|
||||
batch_op.create_foreign_key(
|
||||
"flow_user_id_fkey", "user", ["user_id"], ["id"]
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.drop_constraint('flow_user_id_fkey', type_='foreignkey')
|
||||
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
|
||||
batch_op.drop_column('user_id')
|
||||
batch_op.drop_column('folder')
|
||||
batch_op.drop_column('updated_at')
|
||||
batch_op.drop_column('is_component')
|
||||
except Exception:
|
||||
pass
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
flow_columns = {column["name"] for column in inspector.get_columns("flow")}
|
||||
flow_indexes = {index["name"] for index in inspector.get_indexes("flow")}
|
||||
flow_fks = {fk["name"] for fk in inspector.get_foreign_keys("flow")}
|
||||
|
||||
# ### end Alembic commands ###
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
if "flow_user_id_fkey" in flow_fks:
|
||||
batch_op.drop_constraint("flow_user_id_fkey", type_="foreignkey")
|
||||
if "ix_flow_user_id" in flow_indexes:
|
||||
batch_op.drop_index(batch_op.f("ix_flow_user_id"))
|
||||
if "user_id" in flow_columns:
|
||||
batch_op.drop_column("user_id")
|
||||
if "folder" in flow_columns:
|
||||
batch_op.drop_column("folder")
|
||||
if "updated_at" in flow_columns:
|
||||
batch_op.drop_column("updated_at")
|
||||
if "is_component" in flow_columns:
|
||||
batch_op.drop_column("is_component")
|
||||
|
|
|
|||
|
|
@ -5,11 +5,10 @@ Revises: 67cc006d50bf
|
|||
Create Date: 2023-10-04 10:18:25.640458
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
from sqlalchemy import exc
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "eb5866d51fd2"
|
||||
|
|
@ -21,70 +20,12 @@ depends_on: Union[str, Sequence[str], None] = None
|
|||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
connection = op.get_bind()
|
||||
try:
|
||||
op.drop_table("flowstyle")
|
||||
with op.batch_alter_table("component", schema=None) as batch_op:
|
||||
batch_op.drop_index("ix_component_frontend_node_id")
|
||||
batch_op.drop_index("ix_component_name")
|
||||
except exc.SQLAlchemyError:
|
||||
# connection.execute(text("ROLLBACK"))
|
||||
pass
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
try:
|
||||
op.drop_table("component")
|
||||
except exc.SQLAlchemyError:
|
||||
# connection.execute(text("ROLLBACK"))
|
||||
pass
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
op.create_table(
|
||||
"component",
|
||||
sa.Column("id", sa.CHAR(length=32), nullable=False),
|
||||
sa.Column("frontend_node_id", sa.CHAR(length=32), nullable=False),
|
||||
sa.Column("name", sa.VARCHAR(), nullable=False),
|
||||
sa.Column("description", sa.VARCHAR(), nullable=True),
|
||||
sa.Column("python_code", sa.VARCHAR(), nullable=True),
|
||||
sa.Column("return_type", sa.VARCHAR(), nullable=True),
|
||||
sa.Column("is_disabled", sa.BOOLEAN(), nullable=False),
|
||||
sa.Column("is_read_only", sa.BOOLEAN(), nullable=False),
|
||||
sa.Column("create_at", sa.DATETIME(), nullable=False),
|
||||
sa.Column("update_at", sa.DATETIME(), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id", name="pk_component"),
|
||||
)
|
||||
with op.batch_alter_table("component", schema=None) as batch_op:
|
||||
batch_op.create_index("ix_component_name", ["name"], unique=False)
|
||||
batch_op.create_index(
|
||||
"ix_component_frontend_node_id", ["frontend_node_id"], unique=False
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
try:
|
||||
op.create_table(
|
||||
"flowstyle",
|
||||
sa.Column("color", sa.VARCHAR(), nullable=False),
|
||||
sa.Column("emoji", sa.VARCHAR(), nullable=False),
|
||||
sa.Column("flow_id", sa.CHAR(length=32), nullable=True),
|
||||
sa.Column("id", sa.CHAR(length=32), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["flow_id"],
|
||||
["flow.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id", name="pk_flowstyle"),
|
||||
sa.UniqueConstraint("id", name="uq_flowstyle_id"),
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ Revises: 7843803a87b5
|
|||
Create Date: 2023-10-18 23:12:27.297016
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
|
|
|||
|
|
@ -5,22 +5,35 @@ Revises: 2ac71eb9c3ae
|
|||
Create Date: 2023-11-24 15:07:37.566516
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from typing import Optional, Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = 'fd531f8868b1'
|
||||
down_revision: Union[str, None] = '2ac71eb9c3ae'
|
||||
revision: str = "fd531f8868b1"
|
||||
down_revision: Union[str, None] = "2ac71eb9c3ae"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
tables = inspector.get_table_names()
|
||||
foreign_keys_names = []
|
||||
if "credential" in tables:
|
||||
foreign_keys = inspector.get_foreign_keys("credential")
|
||||
foreign_keys_names = [fk["name"] for fk in foreign_keys]
|
||||
|
||||
try:
|
||||
with op.batch_alter_table('credential', schema=None) as batch_op:
|
||||
batch_op.create_foreign_key("fk_credential_user_id", 'user', ['user_id'], ['id'])
|
||||
if "credential" in tables and "fk_credential_user_id" not in foreign_keys_names:
|
||||
with op.batch_alter_table("credential", schema=None) as batch_op:
|
||||
batch_op.create_foreign_key(
|
||||
"fk_credential_user_id", "user", ["user_id"], ["id"]
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
|
@ -30,9 +43,17 @@ def upgrade() -> None:
|
|||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
tables = inspector.get_table_names()
|
||||
foreign_keys_names: list[Optional[str]] = []
|
||||
if "credential" in tables:
|
||||
foreign_keys = inspector.get_foreign_keys("credential")
|
||||
foreign_keys_names = [fk["name"] for fk in foreign_keys]
|
||||
try:
|
||||
with op.batch_alter_table('credential', schema=None) as batch_op:
|
||||
batch_op.drop_constraint("fk_credential_user_id", type_='foreignkey')
|
||||
if "credential" in tables and "fk_credential_user_id" in foreign_keys_names:
|
||||
with op.batch_alter_table("credential", schema=None) as batch_op:
|
||||
batch_op.drop_constraint("fk_credential_user_id", type_="foreignkey")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -3,9 +3,7 @@ from pathlib import Path
|
|||
from typing import TYPE_CHECKING, List, Optional
|
||||
|
||||
from fastapi import HTTPException
|
||||
from langchain_core.documents import Document
|
||||
from platformdirs import user_cache_dir
|
||||
from pydantic import BaseModel
|
||||
from sqlmodel import Session
|
||||
|
||||
from langflow.graph.graph.base import Graph
|
||||
|
|
@ -199,20 +197,6 @@ def format_elapsed_time(elapsed_time: float) -> str:
|
|||
return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}"
|
||||
|
||||
|
||||
def serialize_field(value):
|
||||
"""Unified serialization function for handling both BaseModel and Document types,
|
||||
including handling lists of these types."""
|
||||
if isinstance(value, (list, tuple)):
|
||||
return [serialize_field(v) for v in value]
|
||||
elif isinstance(value, Document):
|
||||
return value.to_json()
|
||||
elif isinstance(value, BaseModel):
|
||||
return value.model_dump()
|
||||
elif isinstance(value, str):
|
||||
return {"result": value}
|
||||
return value
|
||||
|
||||
|
||||
def build_and_cache_graph(
|
||||
flow_id: str,
|
||||
session: Session,
|
||||
|
|
@ -220,13 +204,37 @@ def build_and_cache_graph(
|
|||
graph: Optional[Graph] = None,
|
||||
):
|
||||
"""Build and cache the graph."""
|
||||
flow: Flow = session.get(Flow, flow_id)
|
||||
flow: Optional[Flow] = session.get(Flow, flow_id)
|
||||
if not flow or not flow.data:
|
||||
raise ValueError("Invalid flow ID")
|
||||
other_graph = Graph.from_payload(flow.data)
|
||||
other_graph = Graph.from_payload(flow.data, flow_id)
|
||||
if graph is None:
|
||||
graph = other_graph
|
||||
else:
|
||||
graph = graph.update(other_graph)
|
||||
chat_service.set_cache(flow_id, graph)
|
||||
return graph
|
||||
|
||||
|
||||
def format_syntax_error_message(exc: SyntaxError) -> str:
|
||||
"""Format a SyntaxError message for returning to the frontend."""
|
||||
if exc.text is None:
|
||||
return f"Syntax error in code. Error on line {exc.lineno}"
|
||||
return f"Syntax error in code. Error on line {exc.lineno}: {exc.text.strip()}"
|
||||
|
||||
|
||||
def get_causing_exception(exc: BaseException) -> BaseException:
|
||||
"""Get the causing exception from an exception."""
|
||||
if hasattr(exc, "__cause__") and exc.__cause__:
|
||||
return get_causing_exception(exc.__cause__)
|
||||
return exc
|
||||
|
||||
|
||||
def format_exception_message(exc: Exception) -> str:
|
||||
"""Format an exception message for returning to the frontend."""
|
||||
# We need to check if the __cause__ is a SyntaxError
|
||||
# If it is, we need to return the message of the SyntaxError
|
||||
causing_exception = get_causing_exception(exc)
|
||||
if isinstance(causing_exception, SyntaxError):
|
||||
return format_syntax_error_message(causing_exception)
|
||||
return str(exc)
|
||||
|
|
|
|||
|
|
@ -68,8 +68,6 @@ INVALID_CHARACTERS = {
|
|||
")",
|
||||
"[",
|
||||
"]",
|
||||
"{",
|
||||
"}",
|
||||
}
|
||||
|
||||
INVALID_NAMES = {
|
||||
|
|
@ -88,73 +86,110 @@ def validate_prompt(template: str):
|
|||
# Check if there are invalid characters in the input_variables
|
||||
input_variables = check_input_variables(input_variables)
|
||||
if any(var in INVALID_NAMES for var in input_variables):
|
||||
raise ValueError(f"Invalid input variables. None of the variables can be named {', '.join(input_variables)}. ")
|
||||
raise ValueError(
|
||||
f"Invalid input variables. None of the variables can be named {', '.join(input_variables)}. "
|
||||
)
|
||||
|
||||
try:
|
||||
PromptTemplate(template=template, input_variables=input_variables)
|
||||
except Exception as exc:
|
||||
raise ValueError(str(exc)) from exc
|
||||
raise ValueError(f"Invalid prompt: {exc}") from exc
|
||||
|
||||
return input_variables
|
||||
|
||||
|
||||
def check_input_variables(input_variables: list):
|
||||
def is_json_like(var):
|
||||
if var.startswith("{{") and var.endswith("}}"):
|
||||
# If it is a double brance variable
|
||||
# we don't want to validate any of its content
|
||||
return True
|
||||
# the above doesn't work on all cases because the json string can be multiline
|
||||
# or indented which can add \n or spaces at the start or end of the string
|
||||
# test_case_3 new_var == '\n{{\n "test": "hello",\n "text": "world"\n}}\n'
|
||||
# what we can do is to remove the \n and spaces from the start and end of the string
|
||||
# and then check if the string starts with {{ and ends with }}
|
||||
var = var.strip()
|
||||
var = var.replace("\n", "")
|
||||
var = var.replace(" ", "")
|
||||
# Now it should be a valid json string
|
||||
return var.startswith("{{") and var.endswith("}}")
|
||||
|
||||
|
||||
def fix_variable(var, invalid_chars, wrong_variables):
|
||||
if not var:
|
||||
return var, invalid_chars, wrong_variables
|
||||
new_var = var
|
||||
|
||||
# Handle variables starting with a number
|
||||
if var[0].isdigit():
|
||||
invalid_chars.append(var[0])
|
||||
new_var, invalid_chars, wrong_variables = fix_variable(
|
||||
var[1:], invalid_chars, wrong_variables
|
||||
)
|
||||
|
||||
# Temporarily replace {{ and }} to avoid treating them as invalid
|
||||
new_var = new_var.replace("{{", "ᴛᴇᴍᴘᴏᴘᴇɴ").replace("}}", "ᴛᴇᴍᴘᴄʟᴏsᴇ")
|
||||
|
||||
# Remove invalid characters
|
||||
for char in new_var:
|
||||
if char in INVALID_CHARACTERS:
|
||||
invalid_chars.append(char)
|
||||
new_var = new_var.replace(char, "")
|
||||
if var not in wrong_variables: # Avoid duplicating entries
|
||||
wrong_variables.append(var)
|
||||
|
||||
# Restore {{ and }}
|
||||
new_var = new_var.replace("ᴛᴇᴍᴘᴏᴘᴇɴ", "{{").replace("ᴛᴇᴍᴘᴄʟᴏsᴇ", "}}")
|
||||
|
||||
return new_var, invalid_chars, wrong_variables
|
||||
|
||||
|
||||
def check_variable(var, invalid_chars, wrong_variables, empty_variables):
|
||||
if any(char in invalid_chars for char in var):
|
||||
wrong_variables.append(var)
|
||||
elif var == "":
|
||||
empty_variables.append(var)
|
||||
return wrong_variables, empty_variables
|
||||
|
||||
|
||||
def check_for_errors(
|
||||
input_variables, fixed_variables, wrong_variables, empty_variables
|
||||
):
|
||||
if any(var for var in input_variables if var not in fixed_variables):
|
||||
error_message = (
|
||||
f"Error: Input variables contain invalid characters or formats. \n"
|
||||
f"Invalid variables: {', '.join(wrong_variables)}.\n"
|
||||
f"Empty variables: {', '.join(empty_variables)}. \n"
|
||||
f"Fixed variables: {', '.join(fixed_variables)}."
|
||||
)
|
||||
raise ValueError(error_message)
|
||||
|
||||
|
||||
def check_input_variables(input_variables):
|
||||
invalid_chars = []
|
||||
fixed_variables = []
|
||||
wrong_variables = []
|
||||
empty_variables = []
|
||||
for variable in input_variables:
|
||||
new_var = variable
|
||||
variables_to_check = []
|
||||
|
||||
# if variable is empty, then we should add that to the wrong variables
|
||||
if not variable:
|
||||
empty_variables.append(variable)
|
||||
for var in input_variables:
|
||||
# First, let's check if the variable is a JSON string
|
||||
# because if it is, it won't be considered a variable
|
||||
# and we don't need to validate it
|
||||
if is_json_like(var):
|
||||
continue
|
||||
|
||||
# if variable starts with a number we should add that to the invalid chars
|
||||
# and wrong variables
|
||||
if variable[0].isdigit():
|
||||
invalid_chars.append(variable[0])
|
||||
new_var = new_var.replace(variable[0], "")
|
||||
wrong_variables.append(variable)
|
||||
else:
|
||||
for char in INVALID_CHARACTERS:
|
||||
if char in variable:
|
||||
invalid_chars.append(char)
|
||||
new_var = new_var.replace(char, "")
|
||||
wrong_variables.append(variable)
|
||||
fixed_variables.append(new_var)
|
||||
# If any of the input_variables is not in the fixed_variables, then it means that
|
||||
# there are invalid characters in the input_variables
|
||||
|
||||
if any(var not in fixed_variables for var in input_variables):
|
||||
error_message = build_error_message(
|
||||
input_variables,
|
||||
invalid_chars,
|
||||
wrong_variables,
|
||||
fixed_variables,
|
||||
empty_variables,
|
||||
new_var, wrong_variables, empty_variables = fix_variable(
|
||||
var, invalid_chars, wrong_variables
|
||||
)
|
||||
raise ValueError(error_message)
|
||||
return input_variables
|
||||
wrong_variables, empty_variables = check_variable(
|
||||
var, INVALID_CHARACTERS, wrong_variables, empty_variables
|
||||
)
|
||||
fixed_variables.append(new_var)
|
||||
variables_to_check.append(var)
|
||||
|
||||
check_for_errors(
|
||||
variables_to_check, fixed_variables, wrong_variables, empty_variables
|
||||
)
|
||||
|
||||
def build_error_message(input_variables, invalid_chars, wrong_variables, fixed_variables, empty_variables):
|
||||
input_variables_str = ", ".join([f"'{var}'" for var in input_variables])
|
||||
error_string = f"Invalid input variables: {input_variables_str}. "
|
||||
|
||||
if wrong_variables and invalid_chars:
|
||||
# fix the wrong variables replacing invalid chars and find them in the fixed variables
|
||||
error_string_vars = "You can fix them by replacing the invalid characters: "
|
||||
wvars = wrong_variables.copy()
|
||||
for i, wrong_var in enumerate(wvars):
|
||||
for char in invalid_chars:
|
||||
wrong_var = wrong_var.replace(char, "")
|
||||
if wrong_var in fixed_variables:
|
||||
error_string_vars += f"'{wrong_variables[i]}' -> '{wrong_var}'"
|
||||
error_string += error_string_vars
|
||||
elif empty_variables:
|
||||
error_string += f" There are {len(empty_variables)} empty variable{'s' if len(empty_variables) > 1 else ''}."
|
||||
elif len(set(fixed_variables)) != len(fixed_variables):
|
||||
error_string += "There are duplicate variables."
|
||||
return error_string
|
||||
return fixed_variables
|
||||
|
|
|
|||
|
|
@ -1,128 +1,31 @@
|
|||
import asyncio
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
from langchain.schema import AgentAction, AgentFinish
|
||||
from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
|
||||
from langflow.api.v1.schemas import ChatResponse, PromptResponse
|
||||
from langflow.services.deps import get_chat_service
|
||||
from langflow.utils.util import remove_ansi_escape_codes
|
||||
from langchain_core.callbacks.base import AsyncCallbackHandler
|
||||
from loguru import logger
|
||||
|
||||
from langflow.api.v1.schemas import ChatResponse, PromptResponse
|
||||
from langflow.services.deps import get_chat_service, get_socket_service
|
||||
from langflow.utils.util import remove_ansi_escape_codes
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langflow.services.socket.service import SocketIOService
|
||||
|
||||
|
||||
class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
|
||||
"""Callback handler for streaming LLM responses."""
|
||||
|
||||
def __init__(self, client_id: str = None):
|
||||
self.chat_service = get_chat_service()
|
||||
self.client_id = client_id
|
||||
self.websocket = self.chat_service.active_connections[self.client_id]
|
||||
|
||||
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
||||
resp = ChatResponse(message=token, type="stream", intermediate_steps="")
|
||||
await self.websocket.send_json(resp.model_dump())
|
||||
|
||||
async def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> Any:
|
||||
"""Run when tool starts running."""
|
||||
resp = ChatResponse(
|
||||
message="",
|
||||
type="stream",
|
||||
intermediate_steps=f"Tool input: {input_str}",
|
||||
)
|
||||
await self.websocket.send_json(resp.model_dump())
|
||||
|
||||
async def on_tool_end(self, output: str, **kwargs: Any) -> Any:
|
||||
"""Run when tool ends running."""
|
||||
observation_prefix = kwargs.get("observation_prefix", "Tool output: ")
|
||||
split_output = output.split()
|
||||
first_word = split_output[0]
|
||||
rest_of_output = split_output[1:]
|
||||
# Create a formatted message.
|
||||
intermediate_steps = f"{observation_prefix}{first_word}"
|
||||
|
||||
# Create a ChatResponse instance.
|
||||
resp = ChatResponse(
|
||||
message="",
|
||||
type="stream",
|
||||
intermediate_steps=intermediate_steps,
|
||||
)
|
||||
rest_of_resps = [
|
||||
ChatResponse(
|
||||
message="",
|
||||
type="stream",
|
||||
intermediate_steps=f"{word}",
|
||||
)
|
||||
for word in rest_of_output
|
||||
]
|
||||
resps = [resp] + rest_of_resps
|
||||
# Try to send the response, handle potential errors.
|
||||
|
||||
try:
|
||||
# This is to emulate the stream of tokens
|
||||
for resp in resps:
|
||||
await self.websocket.send_json(resp.model_dump())
|
||||
except Exception as exc:
|
||||
logger.error(f"Error sending response: {exc}")
|
||||
|
||||
async def on_tool_error(
|
||||
self,
|
||||
error: BaseException,
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
|
||||
async def on_text(self, text: str, **kwargs: Any) -> Any:
|
||||
"""Run on arbitrary text."""
|
||||
# This runs when first sending the prompt
|
||||
# to the LLM, adding it will send the final prompt
|
||||
# to the frontend
|
||||
if "Prompt after formatting" in text:
|
||||
text = text.replace("Prompt after formatting:\n", "")
|
||||
text = remove_ansi_escape_codes(text)
|
||||
resp = PromptResponse(
|
||||
prompt=text,
|
||||
)
|
||||
await self.websocket.send_json(resp.model_dump())
|
||||
self.chat_service.chat_history.add_message(self.client_id, resp)
|
||||
|
||||
async def on_agent_action(self, action: AgentAction, **kwargs: Any):
|
||||
log = f"Thought: {action.log}"
|
||||
# if there are line breaks, split them and send them
|
||||
# as separate messages
|
||||
if "\n" in log:
|
||||
logs = log.split("\n")
|
||||
for log in logs:
|
||||
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
|
||||
await self.websocket.send_json(resp.model_dump())
|
||||
else:
|
||||
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
|
||||
await self.websocket.send_json(resp.model_dump())
|
||||
|
||||
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
|
||||
"""Run on agent end."""
|
||||
resp = ChatResponse(
|
||||
message="",
|
||||
type="stream",
|
||||
intermediate_steps=finish.log,
|
||||
)
|
||||
await self.websocket.send_json(resp.model_dump())
|
||||
|
||||
|
||||
# https://github.com/hwchase17/chat-langchain/blob/master/callback.py
|
||||
class AsyncStreamingLLMCallbackHandleSIO(AsyncCallbackHandler):
|
||||
"""Callback handler for streaming LLM responses."""
|
||||
|
||||
@property
|
||||
def ignore_chain(self) -> bool:
|
||||
"""Whether to ignore chain callbacks."""
|
||||
return False
|
||||
|
||||
def __init__(self, session_id: str):
|
||||
self.chat_service = get_chat_service()
|
||||
self.client_id = session_id
|
||||
self.socketio_service: "SocketIOService" = self.chat_service.socketio_service
|
||||
self.socketio_service: "SocketIOService" = get_socket_service()
|
||||
self.sid = session_id
|
||||
# self.socketio_service = self.chat_service.active_connections[self.client_id]
|
||||
|
||||
|
|
@ -130,7 +33,9 @@ class AsyncStreamingLLMCallbackHandleSIO(AsyncCallbackHandler):
|
|||
resp = ChatResponse(message=token, type="stream", intermediate_steps="")
|
||||
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
|
||||
|
||||
async def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> Any:
|
||||
async def on_tool_start(
|
||||
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Run when tool starts running."""
|
||||
resp = ChatResponse(
|
||||
message="",
|
||||
|
|
@ -168,7 +73,9 @@ class AsyncStreamingLLMCallbackHandleSIO(AsyncCallbackHandler):
|
|||
try:
|
||||
# This is to emulate the stream of tokens
|
||||
for resp in resps:
|
||||
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
|
||||
await self.socketio_service.emit_token(
|
||||
to=self.sid, data=resp.model_dump()
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error(f"Error sending response: {exc}")
|
||||
|
||||
|
|
@ -194,8 +101,9 @@ class AsyncStreamingLLMCallbackHandleSIO(AsyncCallbackHandler):
|
|||
resp = PromptResponse(
|
||||
prompt=text,
|
||||
)
|
||||
await self.socketio_service.emit_message(to=self.sid, data=resp.model_dump())
|
||||
self.chat_service.chat_history.add_message(self.client_id, resp)
|
||||
await self.socketio_service.emit_message(
|
||||
to=self.sid, data=resp.model_dump()
|
||||
)
|
||||
|
||||
async def on_agent_action(self, action: AgentAction, **kwargs: Any):
|
||||
log = f"Thought: {action.log}"
|
||||
|
|
@ -205,7 +113,9 @@ class AsyncStreamingLLMCallbackHandleSIO(AsyncCallbackHandler):
|
|||
logs = log.split("\n")
|
||||
for log in logs:
|
||||
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
|
||||
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
|
||||
await self.socketio_service.emit_token(
|
||||
to=self.sid, data=resp.model_dump()
|
||||
)
|
||||
else:
|
||||
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
|
||||
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
|
||||
|
|
@ -218,19 +128,3 @@ class AsyncStreamingLLMCallbackHandleSIO(AsyncCallbackHandler):
|
|||
intermediate_steps=finish.log,
|
||||
)
|
||||
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
|
||||
|
||||
|
||||
class StreamingLLMCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback handler for streaming LLM responses."""
|
||||
|
||||
def __init__(self, client_id: str):
|
||||
self.chat_service = get_chat_service()
|
||||
self.client_id = client_id
|
||||
self.socketio_service = self.chat_service.active_connections[self.client_id]
|
||||
|
||||
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
||||
resp = ChatResponse(message=token, type="stream", intermediate_steps="")
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
coroutine = self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
|
||||
asyncio.run_coroutine_threadsafe(coroutine, loop)
|
||||
|
|
|
|||
|
|
@ -1,72 +1,35 @@
|
|||
import time
|
||||
from typing import Optional
|
||||
import uuid
|
||||
from typing import TYPE_CHECKING, Annotated, Optional
|
||||
|
||||
from fastapi import (
|
||||
APIRouter,
|
||||
BackgroundTasks,
|
||||
Depends,
|
||||
HTTPException,
|
||||
WebSocket,
|
||||
WebSocketException,
|
||||
status,
|
||||
)
|
||||
from fastapi import APIRouter, BackgroundTasks, Body, Depends, HTTPException
|
||||
from fastapi.responses import StreamingResponse
|
||||
from loguru import logger
|
||||
from sqlmodel import Session
|
||||
|
||||
from langflow.api.utils import build_and_cache_graph, format_elapsed_time
|
||||
from langflow.api.utils import (
|
||||
build_and_cache_graph,
|
||||
format_elapsed_time,
|
||||
format_exception_message,
|
||||
)
|
||||
from langflow.api.v1.schemas import (
|
||||
ResultData,
|
||||
InputValueRequest,
|
||||
ResultDataResponse,
|
||||
StreamData,
|
||||
VertexBuildResponse,
|
||||
VerticesOrderResponse,
|
||||
)
|
||||
from langflow.graph.graph.base import Graph
|
||||
from langflow.graph.vertex.types import RoutingVertex
|
||||
from langflow.services.auth.utils import (
|
||||
get_current_active_user,
|
||||
get_current_user_for_websocket,
|
||||
)
|
||||
from langflow.services.auth.utils import get_current_active_user
|
||||
from langflow.services.chat.service import ChatService
|
||||
from langflow.services.deps import get_chat_service, get_session
|
||||
from langflow.services.deps import get_chat_service, get_session, get_session_service
|
||||
from langflow.services.monitor.utils import log_vertex_build
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langflow.graph.vertex.types import ChatVertex
|
||||
from langflow.services.session.service import SessionService
|
||||
|
||||
router = APIRouter(tags=["Chat"])
|
||||
|
||||
|
||||
@router.websocket("/chat/{client_id}")
|
||||
async def chat(
|
||||
client_id: str,
|
||||
websocket: WebSocket,
|
||||
db: Session = Depends(get_session),
|
||||
chat_service: "ChatService" = Depends(get_chat_service),
|
||||
):
|
||||
"""Websocket endpoint for chat."""
|
||||
try:
|
||||
user = await get_current_user_for_websocket(websocket, db)
|
||||
await websocket.accept()
|
||||
if not user:
|
||||
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
|
||||
elif not user.is_active:
|
||||
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
|
||||
|
||||
if client_id in chat_service.cache_service:
|
||||
await chat_service.handle_websocket(client_id, websocket)
|
||||
else:
|
||||
# We accept the connection but close it immediately
|
||||
# if the flow is not built yet
|
||||
message = "Please, build the flow before sending messages"
|
||||
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=message)
|
||||
except WebSocketException as exc:
|
||||
logger.error(f"Websocket exrror: {exc}")
|
||||
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=str(exc))
|
||||
except Exception as exc:
|
||||
logger.error(f"Error in chat websocket: {exc}")
|
||||
messsage = exc.detail if isinstance(exc, HTTPException) else str(exc)
|
||||
if "Could not validate credentials" in str(exc):
|
||||
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
|
||||
else:
|
||||
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=messsage)
|
||||
|
||||
|
||||
async def try_running_celery_task(vertex, user_id):
|
||||
# Try running the task in celery
|
||||
# and set the task_id to the local vertex
|
||||
|
|
@ -95,7 +58,7 @@ async def get_vertices(
|
|||
# First, we need to check if the flow_id is in the cache
|
||||
graph = None
|
||||
if cache := chat_service.get_cache(flow_id):
|
||||
graph: Graph = cache.get("result")
|
||||
graph = cache.get("result")
|
||||
graph = build_and_cache_graph(flow_id, session, chat_service, graph)
|
||||
if stop_component_id:
|
||||
try:
|
||||
|
|
@ -109,7 +72,9 @@ async def get_vertices(
|
|||
# Now vertices is a list of lists
|
||||
# We need to get the id of each vertex
|
||||
# and return the same structure but only with the ids
|
||||
return VerticesOrderResponse(ids=vertices)
|
||||
run_id = uuid.uuid4()
|
||||
graph.set_run_id(run_id)
|
||||
return VerticesOrderResponse(ids=vertices, run_id=run_id)
|
||||
|
||||
except Exception as exc:
|
||||
logger.error(f"Error checking build status: {exc}")
|
||||
|
|
@ -122,87 +87,169 @@ async def build_vertex(
|
|||
flow_id: str,
|
||||
vertex_id: str,
|
||||
background_tasks: BackgroundTasks,
|
||||
inputs: Annotated[Optional[InputValueRequest], Body(embed=True)] = None,
|
||||
chat_service: "ChatService" = Depends(get_chat_service),
|
||||
current_user=Depends(get_current_active_user),
|
||||
):
|
||||
"""Build a vertex instead of the entire graph."""
|
||||
{"inputs": {"input_value": "some value"}}
|
||||
start_time = time.perf_counter()
|
||||
next_vertices_ids = []
|
||||
try:
|
||||
start_time = time.perf_counter()
|
||||
cache = chat_service.get_cache(flow_id)
|
||||
if not cache:
|
||||
# If there's no cache
|
||||
logger.warning(f"No cache found for {flow_id}. Building graph starting at {vertex_id}")
|
||||
graph = build_and_cache_graph(flow_id=flow_id, session=next(get_session()), chat_service=chat_service)
|
||||
logger.warning(
|
||||
f"No cache found for {flow_id}. Building graph starting at {vertex_id}"
|
||||
)
|
||||
graph = build_and_cache_graph(
|
||||
flow_id=flow_id, session=next(get_session()), chat_service=chat_service
|
||||
)
|
||||
else:
|
||||
graph = cache.get("result")
|
||||
result_dict = {}
|
||||
result_data_response = ResultDataResponse(results={})
|
||||
duration = ""
|
||||
|
||||
vertex = graph.get_vertex(vertex_id)
|
||||
try:
|
||||
if not vertex.pinned or not vertex._built:
|
||||
await vertex.build(user_id=current_user.id)
|
||||
params = vertex._built_object_repr()
|
||||
valid = True
|
||||
result_dict = vertex.get_built_result()
|
||||
# We need to set the artifacts to pass information
|
||||
# to the frontend
|
||||
vertex.set_artifacts()
|
||||
artifacts = vertex.artifacts
|
||||
result_dict = ResultData(
|
||||
results=result_dict,
|
||||
artifacts=artifacts,
|
||||
)
|
||||
vertex.set_result(result_dict)
|
||||
elif vertex.result is not None:
|
||||
inputs_dict = inputs.model_dump() if inputs else {}
|
||||
await vertex.build(user_id=current_user.id, inputs=inputs_dict)
|
||||
|
||||
if vertex.result is not None:
|
||||
params = vertex._built_object_repr()
|
||||
valid = True
|
||||
result_dict = vertex.result
|
||||
artifacts = vertex.artifacts
|
||||
else:
|
||||
raise ValueError(f"No result found for vertex {vertex_id}")
|
||||
if isinstance(vertex, RoutingVertex):
|
||||
if vertex._built_object is True:
|
||||
next_vertex_id = next(graph.next_vertex_to_build())
|
||||
else:
|
||||
next_vertex_id = None
|
||||
|
||||
chat_service.set_cache(flow_id, graph)
|
||||
result_data_response = ResultDataResponse(**result_dict.model_dump())
|
||||
|
||||
except Exception as exc:
|
||||
params = str(exc)
|
||||
logger.exception(f"Error building vertex: {exc}")
|
||||
params = format_exception_message(exc)
|
||||
valid = False
|
||||
result_dict = ResultData(results={})
|
||||
result_data_response = ResultDataResponse(results={})
|
||||
artifacts = {}
|
||||
# If there's an error building the vertex
|
||||
# we need to clear the cache
|
||||
chat_service.clear_cache(flow_id)
|
||||
|
||||
# Log the vertex build
|
||||
background_tasks.add_task(
|
||||
log_vertex_build,
|
||||
flow_id=flow_id,
|
||||
vertex_id=vertex_id,
|
||||
valid=valid,
|
||||
params=params,
|
||||
data=result_dict,
|
||||
artifacts=artifacts,
|
||||
)
|
||||
if not vertex.will_stream:
|
||||
background_tasks.add_task(
|
||||
log_vertex_build,
|
||||
flow_id=flow_id,
|
||||
vertex_id=vertex_id,
|
||||
valid=valid,
|
||||
params=params,
|
||||
data=result_data_response,
|
||||
artifacts=artifacts,
|
||||
)
|
||||
|
||||
timedelta = time.perf_counter() - start_time
|
||||
duration = format_elapsed_time(timedelta)
|
||||
result_dict.duration = duration
|
||||
result_dict.timedelta = timedelta
|
||||
result_data_response.duration = duration
|
||||
result_data_response.timedelta = timedelta
|
||||
vertex.add_build_time(timedelta)
|
||||
inactive_vertices = None
|
||||
if graph.inactive_vertices:
|
||||
inactive_vertices = list(graph.inactive_vertices)
|
||||
graph.reset_inactive_vertices()
|
||||
chat_service.set_cache(flow_id, graph)
|
||||
|
||||
return VertexBuildResponse(
|
||||
next_vertex_id=next_vertex_id,
|
||||
build_response = VertexBuildResponse(
|
||||
next_vertices_ids=next_vertices_ids,
|
||||
inactive_vertices=inactive_vertices,
|
||||
valid=valid,
|
||||
params=params,
|
||||
id=vertex.id,
|
||||
data=result_dict,
|
||||
data=result_data_response,
|
||||
)
|
||||
return build_response
|
||||
except Exception as exc:
|
||||
logger.error(f"Error building vertex: {exc}")
|
||||
logger.exception(exc)
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
|
||||
|
||||
# Now onto an endpoint that is an SSE endpoint
|
||||
# it will receive a component_id and a flow_id
|
||||
#
|
||||
@router.get("/build/{flow_id}/{vertex_id}/stream", response_class=StreamingResponse)
|
||||
async def build_vertex_stream(
|
||||
flow_id: str,
|
||||
vertex_id: str,
|
||||
session_id: Optional[str] = None,
|
||||
chat_service: "ChatService" = Depends(get_chat_service),
|
||||
session_service: "SessionService" = Depends(get_session_service),
|
||||
):
|
||||
"""Build a vertex instead of the entire graph."""
|
||||
try:
|
||||
|
||||
async def stream_vertex():
|
||||
try:
|
||||
if not session_id:
|
||||
cache = chat_service.get_cache(flow_id)
|
||||
if not cache:
|
||||
# If there's no cache
|
||||
raise ValueError(f"No cache found for {flow_id}.")
|
||||
else:
|
||||
graph = cache.get("result")
|
||||
else:
|
||||
session_data = await session_service.load_session(
|
||||
session_id, flow_id=flow_id
|
||||
)
|
||||
graph, artifacts = session_data if session_data else (None, None)
|
||||
if not graph:
|
||||
raise ValueError(f"No graph found for {flow_id}.")
|
||||
|
||||
vertex: "ChatVertex" = graph.get_vertex(vertex_id)
|
||||
if not hasattr(vertex, "stream"):
|
||||
raise ValueError(f"Vertex {vertex_id} does not support streaming")
|
||||
if isinstance(vertex._built_result, str) and vertex._built_result:
|
||||
stream_data = StreamData(
|
||||
event="message",
|
||||
data={"message": f"Streaming vertex {vertex_id}"},
|
||||
)
|
||||
yield str(stream_data)
|
||||
stream_data = StreamData(
|
||||
event="message",
|
||||
data={"chunk": vertex._built_result},
|
||||
)
|
||||
yield str(stream_data)
|
||||
|
||||
elif not vertex.pinned or not vertex._built:
|
||||
logger.debug(f"Streaming vertex {vertex_id}")
|
||||
stream_data = StreamData(
|
||||
event="message",
|
||||
data={"message": f"Streaming vertex {vertex_id}"},
|
||||
)
|
||||
yield str(stream_data)
|
||||
async for chunk in vertex.stream():
|
||||
stream_data = StreamData(
|
||||
event="message",
|
||||
data={"chunk": chunk},
|
||||
)
|
||||
yield str(stream_data)
|
||||
elif vertex.result is not None:
|
||||
stream_data = StreamData(
|
||||
event="message",
|
||||
data={"chunk": vertex._built_result},
|
||||
)
|
||||
yield str(stream_data)
|
||||
else:
|
||||
raise ValueError(f"No result found for vertex {vertex_id}")
|
||||
|
||||
except Exception as exc:
|
||||
logger.error(f"Error building vertex: {exc}")
|
||||
yield str(StreamData(event="error", data={"error": str(exc)}))
|
||||
finally:
|
||||
logger.debug("Closing stream")
|
||||
yield str(StreamData(event="close", data={"message": "Stream closed"}))
|
||||
|
||||
return StreamingResponse(stream_vertex(), media_type="text/event-stream")
|
||||
except Exception as exc:
|
||||
raise HTTPException(status_code=500, detail="Error building vertex") from exc
|
||||
|
|
|
|||
|
|
@ -3,112 +3,39 @@ from typing import Annotated, Any, List, Optional, Union
|
|||
|
||||
import sqlalchemy as sa
|
||||
from fastapi import APIRouter, Body, Depends, HTTPException, UploadFile, status
|
||||
from loguru import logger
|
||||
from sqlmodel import Session, select
|
||||
|
||||
from langflow.api.utils import update_frontend_node_with_template_values
|
||||
from langflow.api.v1.schemas import (
|
||||
CustomComponentCode,
|
||||
PreloadResponse,
|
||||
InputValueRequest,
|
||||
ProcessResponse,
|
||||
TaskResponse,
|
||||
RunResponse,
|
||||
TaskStatusResponse,
|
||||
UploadFileResponse,
|
||||
)
|
||||
from langflow.interface.custom.custom_component import CustomComponent
|
||||
from langflow.interface.custom.directory_reader import DirectoryReader
|
||||
from langflow.interface.custom.utils import build_custom_component_template
|
||||
from langflow.processing.process import build_graph_and_generate_result, process_graph_cached, process_tweaks
|
||||
from langflow.processing.process import process_tweaks, run_graph
|
||||
from langflow.services.auth.utils import api_key_security, get_current_active_user
|
||||
from langflow.services.cache.utils import save_uploaded_file
|
||||
from langflow.services.database.models.flow import Flow
|
||||
from langflow.services.database.models.user.model import User
|
||||
from langflow.services.deps import get_session, get_session_service, get_settings_service, get_task_service
|
||||
from langflow.services.deps import (
|
||||
get_session,
|
||||
get_session_service,
|
||||
get_settings_service,
|
||||
get_task_service,
|
||||
)
|
||||
from langflow.services.session.service import SessionService
|
||||
from loguru import logger
|
||||
from sqlmodel import select
|
||||
|
||||
try:
|
||||
from langflow.worker import process_graph_cached_task
|
||||
except ImportError:
|
||||
|
||||
def process_graph_cached_task(*args, **kwargs):
|
||||
raise NotImplementedError("Celery is not installed")
|
||||
|
||||
|
||||
from langflow.services.task.service import TaskService
|
||||
from sqlmodel import Session
|
||||
|
||||
# build router
|
||||
router = APIRouter(tags=["Base"])
|
||||
|
||||
|
||||
async def process_graph_data(
|
||||
graph_data: dict,
|
||||
inputs: Optional[Union[List[dict], dict]] = None,
|
||||
tweaks: Optional[dict] = None,
|
||||
clear_cache: bool = False,
|
||||
session_id: Optional[str] = None,
|
||||
task_service: "TaskService" = Depends(get_task_service),
|
||||
sync: bool = True,
|
||||
):
|
||||
task_result: Any = None
|
||||
task_status = None
|
||||
if tweaks:
|
||||
try:
|
||||
graph_data = process_tweaks(graph_data, tweaks)
|
||||
except Exception as exc:
|
||||
logger.error(f"Error processing tweaks: {exc}")
|
||||
if sync:
|
||||
result = await process_graph_cached(
|
||||
graph_data,
|
||||
inputs,
|
||||
clear_cache,
|
||||
session_id,
|
||||
)
|
||||
task_id = str(id(result))
|
||||
if isinstance(result, dict) and "result" in result:
|
||||
task_result = result["result"]
|
||||
session_id = result["session_id"]
|
||||
elif hasattr(result, "result") and hasattr(result, "session_id"):
|
||||
task_result = result.result
|
||||
|
||||
session_id = result.session_id
|
||||
else:
|
||||
task_result = result
|
||||
else:
|
||||
logger.warning(
|
||||
"This is an experimental feature and may not work as expected."
|
||||
"Please report any issues to our GitHub repository."
|
||||
)
|
||||
if session_id is None:
|
||||
# Generate a session ID
|
||||
session_id = get_session_service().generate_key(session_id=session_id, data_graph=graph_data)
|
||||
task_id, task = await task_service.launch_task(
|
||||
process_graph_cached_task if task_service.use_celery else process_graph_cached,
|
||||
graph_data,
|
||||
inputs,
|
||||
clear_cache,
|
||||
session_id,
|
||||
)
|
||||
task_status = task.status
|
||||
if task.status == "FAILURE":
|
||||
logger.error(f"Task {task_id} failed: {task.traceback}")
|
||||
task_result = str(task._exception)
|
||||
else:
|
||||
task_result = task.result
|
||||
|
||||
if task_id:
|
||||
task_response = TaskResponse(id=task_id, href=f"api/v1/task/{task_id}")
|
||||
else:
|
||||
task_response = None
|
||||
|
||||
return ProcessResponse(
|
||||
result=task_result,
|
||||
status=task_status,
|
||||
task=task_response,
|
||||
session_id=session_id,
|
||||
backend=task_service.backend_name,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/all", dependencies=[Depends(get_current_active_user)])
|
||||
def get_all(
|
||||
settings_service=Depends(get_settings_service),
|
||||
|
|
@ -117,84 +44,91 @@ def get_all(
|
|||
|
||||
logger.debug("Building langchain types dict")
|
||||
try:
|
||||
return get_all_types_dict(settings_service)
|
||||
all_types_dict = get_all_types_dict(settings_service)
|
||||
return all_types_dict
|
||||
except Exception as exc:
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
|
||||
|
||||
@router.post("/process/json", response_model=ProcessResponse)
|
||||
async def process_json(
|
||||
session: Annotated[Session, Depends(get_session)],
|
||||
data: dict,
|
||||
inputs: Optional[dict] = None,
|
||||
tweaks: Optional[dict] = None,
|
||||
clear_cache: Annotated[bool, Body(embed=True)] = False, # noqa: F821
|
||||
session_id: Annotated[Union[None, str], Body(embed=True)] = None, # noqa: F821
|
||||
task_service: "TaskService" = Depends(get_task_service),
|
||||
sync: Annotated[bool, Body(embed=True)] = True, # noqa: F821
|
||||
):
|
||||
try:
|
||||
return await process_graph_data(
|
||||
graph_data=data,
|
||||
inputs=inputs,
|
||||
tweaks=tweaks,
|
||||
clear_cache=clear_cache,
|
||||
session_id=session_id,
|
||||
task_service=task_service,
|
||||
sync=sync,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception(exc)
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
|
||||
|
||||
# Endpoint to preload a graph
|
||||
@router.post("/process/preload/{flow_id}", response_model=PreloadResponse)
|
||||
async def preload_flow(
|
||||
@router.post(
|
||||
"/run/{flow_id}", response_model=RunResponse, response_model_exclude_none=True
|
||||
)
|
||||
async def run_flow_with_caching(
|
||||
session: Annotated[Session, Depends(get_session)],
|
||||
flow_id: str,
|
||||
session_id: Optional[str] = None,
|
||||
session_service: SessionService = Depends(get_session_service),
|
||||
inputs: Optional[InputValueRequest] = None,
|
||||
tweaks: Optional[dict] = None,
|
||||
stream: Annotated[bool, Body(embed=True)] = False, # noqa: F821
|
||||
session_id: Annotated[Union[None, str], Body(embed=True)] = None, # noqa: F821
|
||||
api_key_user: User = Depends(api_key_security),
|
||||
clear_session: Annotated[bool, Body(embed=True)] = False, # noqa: F821
|
||||
session_service: SessionService = Depends(get_session_service),
|
||||
):
|
||||
try:
|
||||
# Get the flow that matches the flow_id and belongs to the user
|
||||
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
|
||||
if clear_session:
|
||||
session_service.clear_session(session_id)
|
||||
# Check if the session exists
|
||||
session_data = await session_service.load_session(session_id)
|
||||
# Session data is a tuple of (graph, artifacts)
|
||||
# or (None, None) if the session is empty
|
||||
if isinstance(session_data, tuple):
|
||||
graph, artifacts = session_data
|
||||
is_clear = graph is None and artifacts is None
|
||||
else:
|
||||
is_clear = session_data is None
|
||||
return PreloadResponse(session_id=session_id, is_clear=is_clear)
|
||||
if inputs is not None:
|
||||
input_values_dict: dict[str, Union[str, list[str]]] = inputs.model_dump()
|
||||
else:
|
||||
if session_id is None:
|
||||
session_id = flow_id
|
||||
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
|
||||
input_values_dict = {}
|
||||
|
||||
if session_id:
|
||||
session_data = await session_service.load_session(
|
||||
session_id, flow_id=flow_id
|
||||
)
|
||||
graph, artifacts = session_data if session_data else (None, None)
|
||||
task_result: Any = None
|
||||
if not graph:
|
||||
raise ValueError("Graph not found in the session")
|
||||
task_result, session_id = await run_graph(
|
||||
graph=graph,
|
||||
flow_id=flow_id,
|
||||
session_id=session_id,
|
||||
inputs=input_values_dict,
|
||||
artifacts=artifacts,
|
||||
session_service=session_service,
|
||||
stream=stream,
|
||||
)
|
||||
|
||||
else:
|
||||
# Get the flow that matches the flow_id and belongs to the user
|
||||
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
|
||||
flow = session.exec(
|
||||
select(Flow)
|
||||
.where(Flow.id == flow_id)
|
||||
.where(Flow.user_id == api_key_user.id)
|
||||
).first()
|
||||
if flow is None:
|
||||
raise ValueError(f"Flow {flow_id} not found")
|
||||
|
||||
if flow.data is None:
|
||||
raise ValueError(f"Flow {flow_id} has no data")
|
||||
graph_data = flow.data
|
||||
session_service.clear_session(session_id)
|
||||
# Load the graph using SessionService
|
||||
session_data = await session_service.load_session(session_id, graph_data)
|
||||
graph, artifacts = session_data if session_data else (None, None)
|
||||
if not graph:
|
||||
raise ValueError("Graph not found in the session")
|
||||
_ = await graph.build()
|
||||
session_service.update_session(session_id, (graph, artifacts))
|
||||
return PreloadResponse(session_id=session_id)
|
||||
except Exception as exc:
|
||||
logger.exception(exc)
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
graph_data = process_tweaks(graph_data, tweaks or {})
|
||||
task_result, session_id = await run_graph(
|
||||
graph=graph_data,
|
||||
flow_id=flow_id,
|
||||
session_id=session_id,
|
||||
inputs=input_values_dict,
|
||||
artifacts={},
|
||||
session_service=session_service,
|
||||
stream=stream,
|
||||
)
|
||||
|
||||
return RunResponse(outputs=task_result, session_id=session_id)
|
||||
except sa.exc.StatementError as exc:
|
||||
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
|
||||
if "badly formed hexadecimal UUID string" in str(exc):
|
||||
# This means the Flow ID is not a valid UUID which means it can't find the flow
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)
|
||||
) from exc
|
||||
except ValueError as exc:
|
||||
if f"Flow {flow_id} not found" in str(exc):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)
|
||||
) from exc
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)
|
||||
) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
|
|
@ -221,84 +155,16 @@ async def process(
|
|||
"""
|
||||
Endpoint to process an input with a given flow_id.
|
||||
"""
|
||||
|
||||
try:
|
||||
if session_id:
|
||||
session_data = await session_service.load_session(session_id)
|
||||
graph, artifacts = session_data if session_data else (None, None)
|
||||
task_result: Any = None
|
||||
task_status = None
|
||||
task_id = None
|
||||
if not graph:
|
||||
raise ValueError("Graph not found in the session")
|
||||
result = await build_graph_and_generate_result(
|
||||
graph=graph,
|
||||
inputs=inputs,
|
||||
artifacts=artifacts,
|
||||
session_id=session_id,
|
||||
session_service=session_service,
|
||||
)
|
||||
task_id = str(id(result))
|
||||
if isinstance(result, dict) and "result" in result:
|
||||
task_result = result["result"]
|
||||
session_id = result["session_id"]
|
||||
elif hasattr(result, "result") and hasattr(result, "session_id"):
|
||||
task_result = result.result
|
||||
|
||||
session_id = result.session_id
|
||||
else:
|
||||
task_result = result
|
||||
if task_id:
|
||||
task_response = TaskResponse(id=task_id, href=f"api/v1/task/{task_id}")
|
||||
else:
|
||||
task_response = None
|
||||
return ProcessResponse(
|
||||
result=task_result,
|
||||
status=task_status,
|
||||
task=task_response,
|
||||
session_id=session_id,
|
||||
backend=task_service.backend_name,
|
||||
)
|
||||
|
||||
else:
|
||||
if api_key_user is None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API Key",
|
||||
)
|
||||
|
||||
# Get the flow that matches the flow_id and belongs to the user
|
||||
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
|
||||
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
|
||||
if flow is None:
|
||||
raise ValueError(f"Flow {flow_id} not found")
|
||||
|
||||
if flow.data is None:
|
||||
raise ValueError(f"Flow {flow_id} has no data")
|
||||
graph_data = flow.data
|
||||
return await process_graph_data(
|
||||
graph_data=graph_data,
|
||||
inputs=inputs,
|
||||
tweaks=tweaks,
|
||||
clear_cache=clear_cache,
|
||||
session_id=session_id,
|
||||
task_service=task_service,
|
||||
sync=sync,
|
||||
)
|
||||
except sa.exc.StatementError as exc:
|
||||
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
|
||||
if "badly formed hexadecimal UUID string" in str(exc):
|
||||
# This means the Flow ID is not a valid UUID which means it can't find the flow
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
|
||||
except ValueError as exc:
|
||||
if f"Flow {flow_id} not found" in str(exc):
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
|
||||
else:
|
||||
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)) from exc
|
||||
except Exception as e:
|
||||
# Log stack trace
|
||||
logger.exception(e)
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
# Raise a depreciation warning
|
||||
logger.warning(
|
||||
"The /process endpoint is deprecated and will be removed in a future version. "
|
||||
"Please use /run instead."
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="The /process endpoint is deprecated and will be removed in a future version. "
|
||||
"Please use /run instead.",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/task/{task_id}", response_model=TaskStatusResponse)
|
||||
|
|
@ -364,12 +230,16 @@ async def custom_component(
|
|||
|
||||
built_frontend_node = build_custom_component_template(component, user_id=user.id)
|
||||
|
||||
built_frontend_node = update_frontend_node_with_template_values(built_frontend_node, raw_code.frontend_node)
|
||||
built_frontend_node = update_frontend_node_with_template_values(
|
||||
built_frontend_node, raw_code.frontend_node
|
||||
)
|
||||
return built_frontend_node
|
||||
|
||||
|
||||
@router.post("/custom_component/reload", status_code=HTTPStatus.OK)
|
||||
async def reload_custom_component(path: str, user: User = Depends(get_current_active_user)):
|
||||
async def reload_custom_component(
|
||||
path: str, user: User = Depends(get_current_active_user)
|
||||
):
|
||||
from langflow.interface.custom.utils import build_custom_component_template
|
||||
|
||||
try:
|
||||
|
|
@ -391,6 +261,8 @@ async def custom_component_update(
|
|||
):
|
||||
component = CustomComponent(code=raw_code.code)
|
||||
|
||||
component_node = build_custom_component_template(component, user_id=user.id, update_field=raw_code.field)
|
||||
component_node = build_custom_component_template(
|
||||
component, user_id=user.id, update_field=raw_code.field
|
||||
)
|
||||
# Update the field
|
||||
return component_node
|
||||
|
|
|
|||
|
|
@ -4,9 +4,8 @@ from pathlib import Path
|
|||
from typing import Any, Dict, List, Optional, Union
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, Field, field_serializer, field_validator
|
||||
from pydantic import BaseModel, Field, field_validator, model_serializer
|
||||
|
||||
from langflow.api.utils import serialize_field
|
||||
from langflow.services.database.models.api_key.model import ApiKeyRead
|
||||
from langflow.services.database.models.base import orjson_dumps
|
||||
from langflow.services.database.models.flow import FlowCreate, FlowRead
|
||||
|
|
@ -22,26 +21,6 @@ class BuildStatus(Enum):
|
|||
IN_PROGRESS = "in_progress"
|
||||
|
||||
|
||||
class GraphData(BaseModel):
|
||||
"""Data inside the exported flow."""
|
||||
|
||||
nodes: List[Dict[str, Any]]
|
||||
edges: List[Dict[str, Any]]
|
||||
|
||||
|
||||
class ExportedFlow(BaseModel):
|
||||
"""Exported flow from Langflow."""
|
||||
|
||||
description: str
|
||||
name: str
|
||||
id: str
|
||||
data: GraphData
|
||||
|
||||
|
||||
class InputRequest(BaseModel):
|
||||
input: dict
|
||||
|
||||
|
||||
class TweaksRequest(BaseModel):
|
||||
tweaks: Optional[Dict[str, Dict[str, str]]] = Field(default_factory=dict)
|
||||
|
||||
|
|
@ -67,6 +46,26 @@ class ProcessResponse(BaseModel):
|
|||
backend: Optional[str] = None
|
||||
|
||||
|
||||
class RunResponse(BaseModel):
|
||||
"""Run response schema."""
|
||||
|
||||
outputs: Optional[List[Any]] = None
|
||||
session_id: Optional[str] = None
|
||||
|
||||
@model_serializer(mode="wrap")
|
||||
def serialize(self, handler):
|
||||
# Serialize all the outputs if they are base models
|
||||
if self.outputs:
|
||||
serialized_outputs = []
|
||||
for output in self.outputs:
|
||||
if isinstance(output, BaseModel):
|
||||
serialized_outputs.append(output.model_dump(exclude_none=True))
|
||||
else:
|
||||
serialized_outputs.append(output)
|
||||
self.outputs = serialized_outputs
|
||||
return handler(self)
|
||||
|
||||
|
||||
class PreloadResponse(BaseModel):
|
||||
"""Preload response schema."""
|
||||
|
||||
|
|
@ -74,9 +73,6 @@ class PreloadResponse(BaseModel):
|
|||
is_clear: Optional[bool] = None
|
||||
|
||||
|
||||
# TaskStatusResponse(
|
||||
# status=task.status, result=task.result if task.ready() else None
|
||||
# )
|
||||
class TaskStatusResponse(BaseModel):
|
||||
"""Task status response schema."""
|
||||
|
||||
|
|
@ -162,7 +158,9 @@ class StreamData(BaseModel):
|
|||
data: dict
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"event: {self.event}\ndata: {orjson_dumps(self.data, indent_2=False)}\n\n"
|
||||
return (
|
||||
f"event: {self.event}\ndata: {orjson_dumps(self.data, indent_2=False)}\n\n"
|
||||
)
|
||||
|
||||
|
||||
class CustomComponentCode(BaseModel):
|
||||
|
|
@ -219,28 +217,24 @@ class ApiKeyCreateRequest(BaseModel):
|
|||
|
||||
class VerticesOrderResponse(BaseModel):
|
||||
ids: List[List[str]]
|
||||
run_id: UUID
|
||||
|
||||
|
||||
class ResultData(BaseModel):
|
||||
class ResultDataResponse(BaseModel):
|
||||
results: Optional[Any] = Field(default_factory=dict)
|
||||
artifacts: Optional[Any] = Field(default_factory=dict)
|
||||
timedelta: Optional[float] = None
|
||||
duration: Optional[str] = None
|
||||
|
||||
@field_serializer("results")
|
||||
def serialize_results(self, value):
|
||||
if isinstance(value, dict):
|
||||
return {key: serialize_field(val) for key, val in value.items()}
|
||||
return serialize_field(value)
|
||||
|
||||
|
||||
class VertexBuildResponse(BaseModel):
|
||||
id: Optional[str] = None
|
||||
next_vertex_id: Optional[str] = None
|
||||
next_verteices_ids: Optional[List[str]] = None
|
||||
inactive_vertices: Optional[List[str]] = None
|
||||
valid: bool
|
||||
params: Optional[str]
|
||||
"""JSON string of the params."""
|
||||
data: ResultData
|
||||
data: ResultDataResponse
|
||||
"""Mapping of vertex ids to result dict containing the param name and result value."""
|
||||
timestamp: Optional[datetime] = Field(default_factory=datetime.utcnow)
|
||||
"""Timestamp of the build."""
|
||||
|
|
@ -248,3 +242,7 @@ class VertexBuildResponse(BaseModel):
|
|||
|
||||
class VerticesBuiltResponse(BaseModel):
|
||||
vertices: List[VertexBuildResponse]
|
||||
|
||||
|
||||
class InputValueRequest(BaseModel):
|
||||
input_value: str
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ from langflow.field_typing.range_spec import RangeSpec
|
|||
class ConversationalAgent(CustomComponent):
|
||||
display_name: str = "OpenAI Conversational Agent"
|
||||
description: str = "Conversational Agent that can use OpenAI's function calling API"
|
||||
icon = "OpenAI"
|
||||
|
||||
def build_config(self):
|
||||
openai_function_models = [
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from typing import Callable, Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
from langchain.chains import ConversationChain
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, Chain, Text
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, Text
|
||||
|
||||
|
||||
class ConversationChainComponent(CustomComponent):
|
||||
|
|
@ -23,10 +23,10 @@ class ConversationChainComponent(CustomComponent):
|
|||
|
||||
def build(
|
||||
self,
|
||||
inputs: str,
|
||||
input_value: Text,
|
||||
llm: BaseLanguageModel,
|
||||
memory: Optional[BaseMemory] = None,
|
||||
) -> Union[Chain, Callable, Text]:
|
||||
) -> Text:
|
||||
if memory is None:
|
||||
chain = ConversationChain(llm=llm)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,8 +1,14 @@
|
|||
from typing import Callable, Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
from langchain.chains import LLMChain
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, BasePromptTemplate, Chain, Text
|
||||
from langflow.field_typing import (
|
||||
BaseLanguageModel,
|
||||
BaseMemory,
|
||||
BasePromptTemplate,
|
||||
Text,
|
||||
)
|
||||
|
||||
|
||||
class LLMChainComponent(CustomComponent):
|
||||
|
|
@ -22,5 +28,10 @@ class LLMChainComponent(CustomComponent):
|
|||
prompt: BasePromptTemplate,
|
||||
llm: BaseLanguageModel,
|
||||
memory: Optional[BaseMemory] = None,
|
||||
) -> Union[Chain, Callable, Text]:
|
||||
return LLMChain(prompt=prompt, llm=llm, memory=memory)
|
||||
) -> Text:
|
||||
runnable = LLMChain(prompt=prompt, llm=llm, memory=memory)
|
||||
result_dict = runnable.invoke({})
|
||||
output_key = runnable.output_key
|
||||
result = result_dict[output_key]
|
||||
self.status = result
|
||||
return result
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
from typing import Callable, Union
|
||||
|
||||
from langchain.chains import LLMCheckerChain
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Chain
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
|
||||
|
||||
class LLMCheckerChainComponent(CustomComponent):
|
||||
|
|
@ -17,6 +16,12 @@ class LLMCheckerChainComponent(CustomComponent):
|
|||
|
||||
def build(
|
||||
self,
|
||||
input_value: Text,
|
||||
llm: BaseLanguageModel,
|
||||
) -> Union[Chain, Callable]:
|
||||
return LLMCheckerChain.from_llm(llm=llm)
|
||||
) -> Text:
|
||||
chain = LLMCheckerChain.from_llm(llm=llm)
|
||||
response = chain.invoke({chain.input_key: input_value})
|
||||
result = response.get(chain.output_key, "")
|
||||
result_str = Text(result)
|
||||
self.status = result_str
|
||||
return result_str
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from typing import Callable, Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
from langchain.chains import LLMChain, LLMMathChain
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, Chain
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, Text
|
||||
|
||||
|
||||
class LLMMathChainComponent(CustomComponent):
|
||||
|
|
@ -22,10 +22,22 @@ class LLMMathChainComponent(CustomComponent):
|
|||
|
||||
def build(
|
||||
self,
|
||||
input_value: Text,
|
||||
llm: BaseLanguageModel,
|
||||
llm_chain: LLMChain,
|
||||
input_key: str = "question",
|
||||
output_key: str = "answer",
|
||||
memory: Optional[BaseMemory] = None,
|
||||
) -> Union[LLMMathChain, Callable, Chain]:
|
||||
return LLMMathChain(llm=llm, llm_chain=llm_chain, input_key=input_key, output_key=output_key, memory=memory)
|
||||
) -> Text:
|
||||
chain = LLMMathChain(
|
||||
llm=llm,
|
||||
llm_chain=llm_chain,
|
||||
input_key=input_key,
|
||||
output_key=output_key,
|
||||
memory=memory,
|
||||
)
|
||||
response = chain.invoke({input_key: input_value})
|
||||
result = response.get(output_key)
|
||||
result_str = Text(result)
|
||||
self.status = result_str
|
||||
return result_str
|
||||
|
|
|
|||
|
|
@ -1,32 +0,0 @@
|
|||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Text
|
||||
|
||||
|
||||
class PromptRunner(CustomComponent):
|
||||
display_name: str = "Prompt Runner"
|
||||
description: str = "Run a Chain with the given PromptTemplate"
|
||||
beta: bool = True
|
||||
field_config = {
|
||||
"llm": {"display_name": "LLM"},
|
||||
"prompt": {
|
||||
"display_name": "Prompt Template",
|
||||
"info": "Make sure the prompt has all variables filled.",
|
||||
},
|
||||
"code": {"show": False},
|
||||
}
|
||||
|
||||
def build(self, llm: BaseLLM, prompt: PromptTemplate, inputs: dict = {}) -> Text:
|
||||
chain = prompt | llm
|
||||
# The input is an empty dict because the prompt is already filled
|
||||
result_message: BaseMessage = chain.invoke(input=inputs)
|
||||
if hasattr(result_message, "content"):
|
||||
result: str = result_message.content
|
||||
elif isinstance(result_message, str):
|
||||
result = result_message
|
||||
else:
|
||||
result = str(result_message)
|
||||
self.repr_value = result
|
||||
return result
|
||||
|
|
@ -1,8 +1,9 @@
|
|||
from typing import Callable, Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
|
||||
from langchain.chains.retrieval_qa.base import RetrievalQA
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseMemory, BaseRetriever, Text
|
||||
|
||||
|
|
@ -19,19 +20,22 @@ class RetrievalQAComponent(CustomComponent):
|
|||
"input_key": {"display_name": "Input Key", "advanced": True},
|
||||
"output_key": {"display_name": "Output Key", "advanced": True},
|
||||
"return_source_documents": {"display_name": "Return Source Documents"},
|
||||
"inputs": {"display_name": "Input", "input_types": ["Text", "Document"]},
|
||||
"input_value": {
|
||||
"display_name": "Input",
|
||||
"input_types": ["Text", "Document"],
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
combine_documents_chain: BaseCombineDocumentsChain,
|
||||
retriever: BaseRetriever,
|
||||
inputs: str = "",
|
||||
input_value: str = "",
|
||||
memory: Optional[BaseMemory] = None,
|
||||
input_key: str = "query",
|
||||
output_key: str = "result",
|
||||
return_source_documents: bool = True,
|
||||
) -> Union[BaseRetrievalQA, Callable, Text]:
|
||||
) -> Text:
|
||||
runnable = RetrievalQA(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
retriever=retriever,
|
||||
|
|
@ -40,11 +44,19 @@ class RetrievalQAComponent(CustomComponent):
|
|||
output_key=output_key,
|
||||
return_source_documents=return_source_documents,
|
||||
)
|
||||
if isinstance(inputs, Document):
|
||||
inputs = inputs.page_content
|
||||
if isinstance(input_value, Document):
|
||||
input_value = input_value.page_content
|
||||
self.status = runnable
|
||||
result = runnable.invoke({input_key: inputs})
|
||||
result = runnable.invoke({input_key: input_value})
|
||||
result = result.content if hasattr(result, "content") else result
|
||||
# Result is a dict with keys "query", "result" and "source_documents"
|
||||
# for now we just return the result
|
||||
return result.get("result")
|
||||
records = self.to_records(result.get("source_documents"))
|
||||
references_str = ""
|
||||
if return_source_documents:
|
||||
references_str = self.create_references_from_records(records)
|
||||
result_str = result.get("result", "")
|
||||
|
||||
final_result = "\n".join([Text(result_str), references_str])
|
||||
self.status = final_result
|
||||
return final_result # OK
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain.chains import RetrievalQAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever, Text
|
||||
|
||||
|
||||
class RetrievalQAWithSourcesChainComponent(CustomComponent):
|
||||
|
|
@ -18,25 +17,42 @@ class RetrievalQAWithSourcesChainComponent(CustomComponent):
|
|||
"chain_type": {
|
||||
"display_name": "Chain Type",
|
||||
"options": ["stuff", "map_reduce", "map_rerank", "refine"],
|
||||
"info": "The type of chain to use to combined Documents.",
|
||||
},
|
||||
"memory": {"display_name": "Memory"},
|
||||
"return_source_documents": {"display_name": "Return Source Documents"},
|
||||
"retriever": {"display_name": "Retriever"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
input_value: Text,
|
||||
retriever: BaseRetriever,
|
||||
llm: BaseLanguageModel,
|
||||
combine_documents_chain: BaseCombineDocumentsChain,
|
||||
chain_type: str,
|
||||
memory: Optional[BaseMemory] = None,
|
||||
return_source_documents: Optional[bool] = True,
|
||||
) -> BaseQAWithSourcesChain:
|
||||
return RetrievalQAWithSourcesChain.from_chain_type(
|
||||
) -> Text:
|
||||
runnable = RetrievalQAWithSourcesChain.from_chain_type(
|
||||
llm=llm,
|
||||
chain_type=chain_type,
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
memory=memory,
|
||||
return_source_documents=return_source_documents,
|
||||
retriever=retriever,
|
||||
)
|
||||
if isinstance(input_value, Document):
|
||||
input_value = input_value.page_content
|
||||
self.status = runnable
|
||||
input_key = runnable.input_keys[0]
|
||||
result = runnable.invoke({input_key: input_value})
|
||||
result = result.content if hasattr(result, "content") else result
|
||||
# Result is a dict with keys "query", "result" and "source_documents"
|
||||
# for now we just return the result
|
||||
records = self.to_records(result.get("source_documents"))
|
||||
references_str = ""
|
||||
if return_source_documents:
|
||||
references_str = self.create_references_from_records(records)
|
||||
result_str = Text(result.get("answer", ""))
|
||||
final_result = "\n".join([result_str, references_str])
|
||||
self.status = final_result
|
||||
return final_result
|
||||
|
|
|
|||
|
|
@ -1,25 +0,0 @@
|
|||
from langflow import CustomComponent
|
||||
from typing import Callable, Union
|
||||
from langflow.field_typing import BasePromptTemplate, BaseLanguageModel, Chain
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
from langchain_experimental.sql.base import SQLDatabaseChain
|
||||
|
||||
|
||||
class SQLDatabaseChainComponent(CustomComponent):
|
||||
display_name = "SQLDatabaseChain"
|
||||
description = ""
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"db": {"display_name": "Database"},
|
||||
"llm": {"display_name": "LLM"},
|
||||
"prompt": {"display_name": "Prompt"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
db: SQLDatabase,
|
||||
llm: BaseLanguageModel,
|
||||
prompt: BasePromptTemplate,
|
||||
) -> Union[Chain, Callable, SQLDatabaseChain]:
|
||||
return SQLDatabaseChain.from_llm(llm=llm, db=db, prompt=prompt)
|
||||
57
src/backend/langflow/components/chains/SQLGenerator.py
Normal file
57
src/backend/langflow/components/chains/SQLGenerator.py
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain.chains import create_sql_query_chain
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from langchain_core.runnables import Runnable
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
|
||||
|
||||
class SQLGeneratorComponent(CustomComponent):
|
||||
display_name = "Natural Language to SQL"
|
||||
description = "Generate SQL from natural language."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"db": {"display_name": "Database"},
|
||||
"llm": {"display_name": "LLM"},
|
||||
"prompt": {
|
||||
"display_name": "Prompt",
|
||||
"info": "The prompt must contain `{question}`.",
|
||||
},
|
||||
"top_k": {
|
||||
"display_name": "Top K",
|
||||
"info": "The number of results per select statement to return. If 0, no limit.",
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
input_value: Text,
|
||||
db: SQLDatabase,
|
||||
llm: BaseLanguageModel,
|
||||
top_k: int = 5,
|
||||
prompt: Optional[Text] = None,
|
||||
) -> Text:
|
||||
if prompt:
|
||||
prompt_template = PromptTemplate.from_template(template=prompt)
|
||||
else:
|
||||
prompt_template = None
|
||||
|
||||
if top_k < 1:
|
||||
raise ValueError("Top K must be greater than 0.")
|
||||
|
||||
if not prompt_template:
|
||||
sql_query_chain = create_sql_query_chain(llm=llm, db=db, k=top_k)
|
||||
else:
|
||||
# Check if {question} is in the prompt
|
||||
if "{question}" not in prompt_template.template or "question" not in prompt_template.input_variables:
|
||||
raise ValueError("Prompt must contain `{question}` to be used with Natural Language to SQL.")
|
||||
sql_query_chain = create_sql_query_chain(llm=llm, db=db, prompt=prompt_template, k=top_k)
|
||||
query_writer: Runnable = sql_query_chain | {"query": lambda x: x.replace("SQLQuery:", "").strip()}
|
||||
response = query_writer.invoke({"question": input_value})
|
||||
query = response.get("query")
|
||||
self.status = query
|
||||
return query
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
from typing import Any, Dict, List
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.directory import DirectoryLoader
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class DirectoryLoaderComponent(CustomComponent):
|
||||
display_name = "DirectoryLoader"
|
||||
description = "Load from a directory."
|
||||
|
||||
def build_config(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"glob": {"display_name": "Glob Pattern", "value": "**/*.txt"},
|
||||
"load_hidden": {"display_name": "Load Hidden Files", "value": False, "advanced": True},
|
||||
"max_concurrency": {"display_name": "Max Concurrency", "value": 10, "advanced": True},
|
||||
"metadata": {"display_name": "Metadata", "value": {}},
|
||||
"path": {"display_name": "Local Directory"},
|
||||
"recursive": {"display_name": "Recursive", "value": True, "advanced": True},
|
||||
"silent_errors": {"display_name": "Silent Errors", "value": False, "advanced": True},
|
||||
"use_multithreading": {"display_name": "Use Multithreading", "value": True, "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
glob: str,
|
||||
path: str,
|
||||
max_concurrency: int = 2,
|
||||
load_hidden: bool = False,
|
||||
recursive: bool = True,
|
||||
silent_errors: bool = False,
|
||||
use_multithreading: bool = True,
|
||||
) -> List[Document]:
|
||||
return DirectoryLoader(
|
||||
glob=glob,
|
||||
path=path,
|
||||
load_hidden=load_hidden,
|
||||
max_concurrency=max_concurrency,
|
||||
recursive=recursive,
|
||||
silent_errors=silent_errors,
|
||||
use_multithreading=use_multithreading,
|
||||
).load()
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
from langchain_core.documents import Document
|
||||
from typing import List
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.schema import Record
|
||||
from langflow.utils.constants import LOADERS_INFO
|
||||
|
||||
|
||||
|
|
@ -9,7 +11,9 @@ class FileLoaderComponent(CustomComponent):
|
|||
beta = True
|
||||
|
||||
def build_config(self):
|
||||
loader_options = ["Automatic"] + [loader_info["name"] for loader_info in LOADERS_INFO]
|
||||
loader_options = ["Automatic"] + [
|
||||
loader_info["name"] for loader_info in LOADERS_INFO
|
||||
]
|
||||
|
||||
file_types = []
|
||||
suffixes = []
|
||||
|
|
@ -38,6 +42,7 @@ class FileLoaderComponent(CustomComponent):
|
|||
"srt",
|
||||
"eml",
|
||||
"md",
|
||||
"mdx",
|
||||
"pptx",
|
||||
"docx",
|
||||
],
|
||||
|
|
@ -55,6 +60,7 @@ class FileLoaderComponent(CustomComponent):
|
|||
".srt",
|
||||
".eml",
|
||||
".md",
|
||||
".mdx",
|
||||
".pptx",
|
||||
".docx",
|
||||
],
|
||||
|
|
@ -71,10 +77,10 @@ class FileLoaderComponent(CustomComponent):
|
|||
"code": {"show": False},
|
||||
}
|
||||
|
||||
def build(self, file_path: str, loader: str) -> Document:
|
||||
def build(self, file_path: str, loader: str) -> List[Record]:
|
||||
file_type = file_path.split(".")[-1]
|
||||
|
||||
# Mapeie o nome do loader selecionado para suas informações
|
||||
# Map the loader to the correct loader class
|
||||
selected_loader_info = None
|
||||
for loader_info in LOADERS_INFO:
|
||||
if loader_info["name"] == loader:
|
||||
|
|
@ -85,7 +91,7 @@ class FileLoaderComponent(CustomComponent):
|
|||
raise ValueError(f"Loader {loader} not found in the loader info list")
|
||||
|
||||
if loader == "Automatic":
|
||||
# Determine o loader automaticamente com base na extensão do arquivo
|
||||
# Determine the loader based on the file type
|
||||
default_loader_info = None
|
||||
for info in LOADERS_INFO:
|
||||
if "defaultFor" in info and file_type in info["defaultFor"]:
|
||||
|
|
@ -99,15 +105,20 @@ class FileLoaderComponent(CustomComponent):
|
|||
if isinstance(selected_loader_info, dict):
|
||||
loader_import: str = selected_loader_info["import"]
|
||||
else:
|
||||
raise ValueError(f"Loader info for {loader} is not a dict\nLoader info:\n{selected_loader_info}")
|
||||
raise ValueError(
|
||||
f"Loader info for {loader} is not a dict\nLoader info:\n{selected_loader_info}"
|
||||
)
|
||||
module_name, class_name = loader_import.rsplit(".", 1)
|
||||
|
||||
try:
|
||||
# Importe o loader dinamicamente
|
||||
# Import the loader class
|
||||
loader_module = __import__(module_name, fromlist=[class_name])
|
||||
loader_instance = getattr(loader_module, class_name)
|
||||
except ImportError as e:
|
||||
raise ValueError(f"Loader {loader} could not be imported\nLoader info:\n{selected_loader_info}") from e
|
||||
raise ValueError(
|
||||
f"Loader {loader} could not be imported\nLoader info:\n{selected_loader_info}"
|
||||
) from e
|
||||
|
||||
result = loader_instance(file_path=file_path)
|
||||
return result.load()
|
||||
docs = result.load()
|
||||
return self.to_records(docs)
|
||||
|
|
|
|||
161
src/backend/langflow/components/documentloaders/GatherRecords.py
Normal file
161
src/backend/langflow/components/documentloaders/GatherRecords.py
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
from concurrent import futures
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Text
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.schema import Record
|
||||
|
||||
|
||||
class GatherRecordsComponent(CustomComponent):
|
||||
display_name = "Gather Records"
|
||||
description = "Gather records from a directory."
|
||||
|
||||
def build_config(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"path": {"display_name": "Path"},
|
||||
"types": {
|
||||
"display_name": "Types",
|
||||
"info": "File types to load. Leave empty to load all types.",
|
||||
},
|
||||
"depth": {"display_name": "Depth", "info": "Depth to search for files."},
|
||||
"max_concurrency": {"display_name": "Max Concurrency", "advanced": True},
|
||||
"load_hidden": {
|
||||
"display_name": "Load Hidden",
|
||||
"advanced": True,
|
||||
"info": "If true, hidden files will be loaded.",
|
||||
},
|
||||
"recursive": {
|
||||
"display_name": "Recursive",
|
||||
"advanced": True,
|
||||
"info": "If true, the search will be recursive.",
|
||||
},
|
||||
"silent_errors": {
|
||||
"display_name": "Silent Errors",
|
||||
"advanced": True,
|
||||
"info": "If true, errors will not raise an exception.",
|
||||
},
|
||||
"use_multithreading": {
|
||||
"display_name": "Use Multithreading",
|
||||
"advanced": True,
|
||||
},
|
||||
}
|
||||
|
||||
def is_hidden(self, path: Path) -> bool:
|
||||
return path.name.startswith(".")
|
||||
|
||||
def retrieve_file_paths(
|
||||
self,
|
||||
path: str,
|
||||
types: List[str],
|
||||
load_hidden: bool,
|
||||
recursive: bool,
|
||||
depth: int,
|
||||
) -> List[str]:
|
||||
path_obj = Path(path)
|
||||
if not path_obj.exists() or not path_obj.is_dir():
|
||||
raise ValueError(f"Path {path} must exist and be a directory.")
|
||||
|
||||
def match_types(p: Path) -> bool:
|
||||
return any(p.suffix == f".{t}" for t in types) if types else True
|
||||
|
||||
def is_not_hidden(p: Path) -> bool:
|
||||
return not self.is_hidden(p) or load_hidden
|
||||
|
||||
def walk_level(directory: Path, max_depth: int):
|
||||
directory = directory.resolve()
|
||||
prefix_length = len(directory.parts)
|
||||
for p in directory.rglob("*" if recursive else "[!.]*"):
|
||||
if len(p.parts) - prefix_length <= max_depth:
|
||||
yield p
|
||||
|
||||
glob = "**/*" if recursive else "*"
|
||||
paths = walk_level(path_obj, depth) if depth else path_obj.glob(glob)
|
||||
file_paths = [
|
||||
Text(p)
|
||||
for p in paths
|
||||
if p.is_file() and match_types(p) and is_not_hidden(p)
|
||||
]
|
||||
|
||||
return file_paths
|
||||
|
||||
def parse_file_to_record(
|
||||
self, file_path: str, silent_errors: bool
|
||||
) -> Optional[Record]:
|
||||
# Use the partition function to load the file
|
||||
from unstructured.partition.auto import partition # type: ignore
|
||||
|
||||
try:
|
||||
elements = partition(file_path)
|
||||
except Exception as e:
|
||||
if not silent_errors:
|
||||
raise ValueError(f"Error loading file {file_path}: {e}") from e
|
||||
return None
|
||||
|
||||
# Create a Record
|
||||
text = "\n\n".join([Text(el) for el in elements])
|
||||
metadata = elements.metadata if hasattr(elements, "metadata") else {}
|
||||
metadata["file_path"] = file_path
|
||||
record = Record(text=text, data=metadata)
|
||||
return record
|
||||
|
||||
def get_elements(
|
||||
self,
|
||||
file_paths: List[str],
|
||||
silent_errors: bool,
|
||||
max_concurrency: int,
|
||||
use_multithreading: bool,
|
||||
) -> List[Optional[Record]]:
|
||||
if use_multithreading:
|
||||
records = self.parallel_load_records(
|
||||
file_paths, silent_errors, max_concurrency
|
||||
)
|
||||
else:
|
||||
records = [
|
||||
self.parse_file_to_record(file_path, silent_errors)
|
||||
for file_path in file_paths
|
||||
]
|
||||
records = list(filter(None, records))
|
||||
return records
|
||||
|
||||
def parallel_load_records(
|
||||
self, file_paths: List[str], silent_errors: bool, max_concurrency: int
|
||||
) -> List[Optional[Record]]:
|
||||
with futures.ThreadPoolExecutor(max_workers=max_concurrency) as executor:
|
||||
loaded_files = executor.map(
|
||||
lambda file_path: self.parse_file_to_record(file_path, silent_errors),
|
||||
file_paths,
|
||||
)
|
||||
# loaded_files is an iterator, so we need to convert it to a list
|
||||
return list(loaded_files)
|
||||
|
||||
def build(
|
||||
self,
|
||||
path: str,
|
||||
types: Optional[List[str]] = None,
|
||||
depth: int = 0,
|
||||
max_concurrency: int = 2,
|
||||
load_hidden: bool = False,
|
||||
recursive: bool = True,
|
||||
silent_errors: bool = False,
|
||||
use_multithreading: bool = True,
|
||||
) -> List[Optional[Record]]:
|
||||
if types is None:
|
||||
types = []
|
||||
resolved_path = self.resolve_path(path)
|
||||
file_paths = self.retrieve_file_paths(
|
||||
resolved_path, types, load_hidden, recursive, depth
|
||||
)
|
||||
loaded_records = []
|
||||
|
||||
if use_multithreading:
|
||||
loaded_records = self.parallel_load_records(
|
||||
file_paths, silent_errors, max_concurrency
|
||||
)
|
||||
else:
|
||||
loaded_records = [
|
||||
self.parse_file_to_record(file_path, silent_errors)
|
||||
for file_path in file_paths
|
||||
]
|
||||
loaded_records = list(filter(None, loaded_records))
|
||||
self.status = loaded_records
|
||||
return loaded_records
|
||||
|
|
@ -1,7 +1,9 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain.embeddings import BedrockEmbeddings
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain_community.embeddings import BedrockEmbeddings
|
||||
|
||||
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ class AzureOpenAIEmbeddingsComponent(CustomComponent):
|
|||
description: str = "Embeddings model from Azure OpenAI."
|
||||
documentation: str = "https://python.langchain.com/docs/integrations/text_embedding/azureopenai"
|
||||
beta = False
|
||||
icon = "Azure"
|
||||
|
||||
API_VERSION_OPTIONS = [
|
||||
"2022-12-01",
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ class HuggingFaceEmbeddingsComponent(CustomComponent):
|
|||
documentation = (
|
||||
"https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/sentence_transformers"
|
||||
)
|
||||
icon = "HuggingFace"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ class HuggingFaceInferenceAPIEmbeddingsComponent(CustomComponent):
|
|||
display_name = "HuggingFaceInferenceAPIEmbeddings"
|
||||
description = "HuggingFace sentence_transformers embedding models, API version."
|
||||
documentation = "https://github.com/huggingface/text-embeddings-inference"
|
||||
icon = "HuggingFace"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.embeddings import VertexAIEmbeddings
|
||||
from langchain_community.embeddings import VertexAIEmbeddings
|
||||
from typing import Optional, List
|
||||
|
||||
|
||||
|
|
@ -9,17 +9,45 @@ class VertexAIEmbeddingsComponent(CustomComponent):
|
|||
|
||||
def build_config(self):
|
||||
return {
|
||||
"credentials": {"display_name": "Credentials", "value": "", "file_types": [".json"], "field_type": "file"},
|
||||
"instance": {"display_name": "instance", "advanced": True, "field_type": "dict"},
|
||||
"location": {"display_name": "Location", "value": "us-central1", "advanced": True},
|
||||
"credentials": {
|
||||
"display_name": "Credentials",
|
||||
"value": "",
|
||||
"file_types": [".json"],
|
||||
"field_type": "file",
|
||||
},
|
||||
"instance": {
|
||||
"display_name": "instance",
|
||||
"advanced": True,
|
||||
"field_type": "dict",
|
||||
},
|
||||
"location": {
|
||||
"display_name": "Location",
|
||||
"value": "us-central1",
|
||||
"advanced": True,
|
||||
},
|
||||
"max_output_tokens": {"display_name": "Max Output Tokens", "value": 128},
|
||||
"max_retries": {"display_name": "Max Retries", "value": 6, "advanced": True},
|
||||
"model_name": {"display_name": "Model Name", "value": "textembedding-gecko"},
|
||||
"max_retries": {
|
||||
"display_name": "Max Retries",
|
||||
"value": 6,
|
||||
"advanced": True,
|
||||
},
|
||||
"model_name": {
|
||||
"display_name": "Model Name",
|
||||
"value": "textembedding-gecko",
|
||||
},
|
||||
"n": {"display_name": "N", "value": 1, "advanced": True},
|
||||
"project": {"display_name": "Project", "advanced": True},
|
||||
"request_parallelism": {"display_name": "Request Parallelism", "value": 5, "advanced": True},
|
||||
"request_parallelism": {
|
||||
"display_name": "Request Parallelism",
|
||||
"value": 5,
|
||||
"advanced": True,
|
||||
},
|
||||
"stop": {"display_name": "Stop", "advanced": True},
|
||||
"streaming": {"display_name": "Streaming", "value": False, "advanced": True},
|
||||
"streaming": {
|
||||
"display_name": "Streaming",
|
||||
"value": False,
|
||||
"advanced": True,
|
||||
},
|
||||
"temperature": {"display_name": "Temperature", "value": 0.0},
|
||||
"top_k": {"display_name": "Top K", "value": 40, "advanced": True},
|
||||
"top_p": {"display_name": "Top P", "value": 0.95, "advanced": True},
|
||||
|
|
|
|||
|
|
@ -1,61 +1,26 @@
|
|||
from typing import Optional, Union
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.components.io.base.chat import ChatComponent
|
||||
from langflow.field_typing import Text
|
||||
from langflow.schema import Record
|
||||
|
||||
|
||||
class ChatInput(CustomComponent):
|
||||
class ChatInput(ChatComponent):
|
||||
display_name = "Chat Input"
|
||||
description = "Used to get user input from the chat."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"message": {
|
||||
"input_types": ["Text"],
|
||||
"display_name": "Message",
|
||||
"multiline": True,
|
||||
},
|
||||
"sender": {
|
||||
"options": ["Machine", "User"],
|
||||
"display_name": "Sender Type",
|
||||
},
|
||||
"sender_name": {"display_name": "Sender Name"},
|
||||
"session_id": {
|
||||
"display_name": "Session ID",
|
||||
"info": "Session ID of the chat history.",
|
||||
},
|
||||
"as_record": {
|
||||
"display_name": "As Record",
|
||||
"info": "If true, the message will be returned as a Record.",
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
sender: Optional[str] = "User",
|
||||
sender_name: Optional[str] = "User",
|
||||
message: Optional[str] = None,
|
||||
as_record: Optional[bool] = False,
|
||||
input_value: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
return_record: Optional[bool] = False,
|
||||
) -> Union[Text, Record]:
|
||||
self.status = message
|
||||
if as_record:
|
||||
if isinstance(message, Record):
|
||||
# Update the data of the record
|
||||
message.data["sender"] = sender
|
||||
message.data["sender_name"] = sender_name
|
||||
message.data["session_id"] = session_id
|
||||
return message
|
||||
return Record(
|
||||
text=message,
|
||||
data={
|
||||
"sender": sender,
|
||||
"sender_name": sender_name,
|
||||
"session_id": session_id,
|
||||
},
|
||||
)
|
||||
if not message:
|
||||
message = ""
|
||||
self.status = message
|
||||
return message
|
||||
return super().build(
|
||||
sender=sender,
|
||||
sender_name=sender_name,
|
||||
input_value=input_value,
|
||||
session_id=session_id,
|
||||
return_record=return_record,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,65 +1,26 @@
|
|||
from typing import Optional, Union
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.components.io.base.chat import ChatComponent
|
||||
from langflow.field_typing import Text
|
||||
from langflow.schema import Record
|
||||
|
||||
|
||||
class ChatOutput(CustomComponent):
|
||||
class ChatOutput(ChatComponent):
|
||||
display_name = "Chat Output"
|
||||
description = "Used to send a message to the chat."
|
||||
|
||||
field_config = {
|
||||
"code": {
|
||||
"show": True,
|
||||
}
|
||||
}
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"message": {"input_types": ["Text"], "display_name": "Message"},
|
||||
"sender": {
|
||||
"options": ["Machine", "User"],
|
||||
"display_name": "Sender Type",
|
||||
},
|
||||
"sender_name": {"display_name": "Sender Name"},
|
||||
"session_id": {
|
||||
"display_name": "Session ID",
|
||||
"info": "Session ID of the chat history.",
|
||||
"input_types": ["Text"],
|
||||
},
|
||||
"as_record": {
|
||||
"display_name": "As Record",
|
||||
"info": "If true, the message will be returned as a Record.",
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
sender: Optional[str] = "Machine",
|
||||
sender_name: Optional[str] = "AI",
|
||||
input_value: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
message: Optional[str] = None,
|
||||
as_record: Optional[bool] = False,
|
||||
return_record: Optional[bool] = False,
|
||||
) -> Union[Text, Record]:
|
||||
self.status = message
|
||||
if as_record:
|
||||
if isinstance(message, Record):
|
||||
# Update the data of the record
|
||||
message.data["sender"] = sender
|
||||
message.data["sender_name"] = sender_name
|
||||
message.data["session_id"] = session_id
|
||||
|
||||
return message
|
||||
return Record(
|
||||
text=message,
|
||||
data={
|
||||
"sender": sender,
|
||||
"sender_name": sender_name,
|
||||
"session_id": session_id,
|
||||
},
|
||||
)
|
||||
if not message:
|
||||
message = ""
|
||||
self.status = message
|
||||
return message
|
||||
return super().build(
|
||||
sender=sender,
|
||||
sender_name=sender_name,
|
||||
input_value=input_value,
|
||||
session_id=session_id,
|
||||
return_record=return_record,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,71 +0,0 @@
|
|||
from typing import List, Optional
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Text
|
||||
from langflow.memory import add_messages
|
||||
from langflow.schema import Record
|
||||
|
||||
|
||||
class StoreMessages(CustomComponent):
|
||||
display_name = "Store Messages"
|
||||
description = "Used to store messages."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"records": {
|
||||
"display_name": "Records",
|
||||
"info": "The list of records to store. Each record should contain the keys 'sender', 'sender_name', and 'session_id'.",
|
||||
},
|
||||
"texts": {
|
||||
"display_name": "Texts",
|
||||
"info": "The list of texts to store. If records is not provided, texts must be provided.",
|
||||
},
|
||||
"session_id": {
|
||||
"display_name": "Session ID",
|
||||
"info": "The session ID to store.",
|
||||
},
|
||||
"sender": {
|
||||
"display_name": "Sender",
|
||||
"info": "The sender to store.",
|
||||
},
|
||||
"sender_name": {
|
||||
"display_name": "Sender Name",
|
||||
"info": "The sender name to store.",
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
records: Optional[List[Record]] = None,
|
||||
texts: Optional[List[Text]] = None,
|
||||
session_id: Optional[str] = None,
|
||||
sender: Optional[str] = None,
|
||||
sender_name: Optional[str] = None,
|
||||
) -> List[Record]:
|
||||
# Records is the main way to store messages
|
||||
# If records is not provided, we can use texts
|
||||
# but we need to create the records from the texts
|
||||
# and the other parameters
|
||||
if not texts and not records:
|
||||
raise ValueError("Either texts or records must be provided.")
|
||||
|
||||
if not records:
|
||||
records = []
|
||||
if not session_id or not sender or not sender_name:
|
||||
raise ValueError("If passing texts, session_id, sender, and sender_name must be provided.")
|
||||
for text in texts:
|
||||
record = Record(
|
||||
text=text,
|
||||
data={
|
||||
"session_id": session_id,
|
||||
"sender": sender,
|
||||
"sender_name": sender_name,
|
||||
},
|
||||
)
|
||||
records.append(record)
|
||||
elif isinstance(records, Record):
|
||||
records = [records]
|
||||
|
||||
self.status = records
|
||||
records = add_messages(records)
|
||||
return records
|
||||
|
|
@ -1,22 +1,12 @@
|
|||
from typing import Optional
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.components.io.base.text import TextComponent
|
||||
from langflow.field_typing import Text
|
||||
|
||||
|
||||
class TextInput(CustomComponent):
|
||||
class TextInput(TextComponent):
|
||||
display_name = "Text Input"
|
||||
description = "Used to pass text input to the next component."
|
||||
|
||||
field_config = {
|
||||
"code": {
|
||||
"show": False,
|
||||
},
|
||||
"value": {"display_name": "Value"},
|
||||
}
|
||||
|
||||
def build(self, value: Optional[str] = "") -> Text:
|
||||
self.status = value
|
||||
if not value:
|
||||
value = ""
|
||||
return value
|
||||
def build(self, input_value: Optional[str] = "") -> Text:
|
||||
return super().build(input_value=input_value)
|
||||
|
|
|
|||
16
src/backend/langflow/components/io/TextOutput.py
Normal file
16
src/backend/langflow/components/io/TextOutput.py
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
from typing import Optional
|
||||
|
||||
from langflow.components.io.base.text import TextComponent
|
||||
from langflow.field_typing import Text
|
||||
|
||||
|
||||
class TextOutput(TextComponent):
|
||||
display_name = "Text Output"
|
||||
description = "Used to pass text output to the next component."
|
||||
|
||||
field_config = {
|
||||
"input_value": {"display_name": "Value"},
|
||||
}
|
||||
|
||||
def build(self, input_value: Optional[Text] = "") -> Text:
|
||||
return super().build(input_value=input_value)
|
||||
107
src/backend/langflow/components/io/base/chat.py
Normal file
107
src/backend/langflow/components/io/base/chat.py
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
import warnings
|
||||
from typing import Optional, Union
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Text
|
||||
from langflow.memory import add_messages
|
||||
from langflow.schema import Record
|
||||
|
||||
|
||||
class ChatComponent(CustomComponent):
|
||||
display_name = "Chat Component"
|
||||
description = "Use as base for chat components."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"input_value": {
|
||||
"input_types": ["Text"],
|
||||
"display_name": "Message",
|
||||
"multiline": True,
|
||||
},
|
||||
"sender": {
|
||||
"options": ["Machine", "User"],
|
||||
"display_name": "Sender Type",
|
||||
},
|
||||
"sender_name": {"display_name": "Sender Name"},
|
||||
"session_id": {
|
||||
"display_name": "Session ID",
|
||||
"info": "If provided, the message will be stored in the memory.",
|
||||
},
|
||||
"return_record": {
|
||||
"display_name": "Return Record",
|
||||
"info": "Return the message as a record containing the sender, sender_name, and session_id.",
|
||||
},
|
||||
}
|
||||
|
||||
def store_message(
|
||||
self,
|
||||
message: Union[str, Text, Record],
|
||||
session_id: Optional[str] = None,
|
||||
sender: Optional[str] = None,
|
||||
sender_name: Optional[str] = None,
|
||||
) -> list[Record]:
|
||||
if not message:
|
||||
warnings.warn("No message provided.")
|
||||
return []
|
||||
|
||||
if not session_id or not sender or not sender_name:
|
||||
raise ValueError(
|
||||
"All of session_id, sender, and sender_name must be provided."
|
||||
)
|
||||
if isinstance(message, Record):
|
||||
record = message
|
||||
record.data.update(
|
||||
{
|
||||
"session_id": session_id,
|
||||
"sender": sender,
|
||||
"sender_name": sender_name,
|
||||
}
|
||||
)
|
||||
else:
|
||||
record = Record(
|
||||
text=message,
|
||||
data={
|
||||
"session_id": session_id,
|
||||
"sender": sender,
|
||||
"sender_name": sender_name,
|
||||
},
|
||||
)
|
||||
|
||||
self.status = record
|
||||
records = add_messages([record])
|
||||
return records[0]
|
||||
|
||||
def build(
|
||||
self,
|
||||
sender: Optional[str] = "User",
|
||||
sender_name: Optional[str] = "User",
|
||||
input_value: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
return_record: Optional[bool] = False,
|
||||
) -> Union[Text, Record]:
|
||||
input_value_record: Optional[Record] = None
|
||||
if return_record:
|
||||
if isinstance(input_value, Record):
|
||||
# Update the data of the record
|
||||
input_value.data["sender"] = sender
|
||||
input_value.data["sender_name"] = sender_name
|
||||
input_value.data["session_id"] = session_id
|
||||
else:
|
||||
input_value_record = Record(
|
||||
text=input_value,
|
||||
data={
|
||||
"sender": sender,
|
||||
"sender_name": sender_name,
|
||||
"session_id": session_id,
|
||||
},
|
||||
)
|
||||
if not input_value:
|
||||
input_value = ""
|
||||
if return_record and input_value_record:
|
||||
result: Union[Text, Record] = input_value_record
|
||||
else:
|
||||
result = input_value
|
||||
self.status = result
|
||||
if session_id:
|
||||
self.store_message(result, session_id, sender, sender_name)
|
||||
return result
|
||||
19
src/backend/langflow/components/io/base/text.py
Normal file
19
src/backend/langflow/components/io/base/text.py
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
from typing import Optional
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Text
|
||||
|
||||
|
||||
class TextComponent(CustomComponent):
|
||||
display_name = "Text Component"
|
||||
description = "Used to pass text to the next component."
|
||||
|
||||
field_config = {
|
||||
"input_value": {"display_name": "Value", "multiline": True},
|
||||
}
|
||||
|
||||
def build(self, input_value: Optional[str] = "") -> Text:
|
||||
self.status = input_value
|
||||
if not input_value:
|
||||
input_value = ""
|
||||
return input_value
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue