Release -> Output Inspection, Session Management, General Bug Fixing and UI Improvements (#2104)

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-06-11 09:08:24 -07:00 committed by GitHub
commit aa94e42e0d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
443 changed files with 20610 additions and 11379 deletions

1
.gitattributes vendored
View file

@ -32,3 +32,4 @@ Dockerfile text
*.mp4 binary
*.svg binary
*.csv binary

8
.vscode/launch.json vendored
View file

@ -3,7 +3,7 @@
"configurations": [
{
"name": "Debug Backend",
"type": "python",
"type": "debugpy",
"request": "launch",
"module": "uvicorn",
"args": [
@ -26,7 +26,7 @@
},
{
"name": "Debug CLI",
"type": "python",
"type": "debugpy",
"request": "launch",
"module": "langflow",
"args": [
@ -43,7 +43,7 @@
},
{
"name": "Python: Remote Attach",
"type": "python",
"type": "debugpy",
"request": "attach",
"justMyCode": true,
"connect": {
@ -65,7 +65,7 @@
},
{
"name": "Python: Debug Tests",
"type": "python",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"purpose": ["debug-test"],

View file

@ -44,7 +44,8 @@ coverage:
poetry run pytest --cov \
--cov-config=.coveragerc \
--cov-report xml \
--cov-report term-missing:skip-covered
--cov-report term-missing:skip-covered \
--cov-report lcov:coverage/lcov-pytest.info
# allow passing arguments to pytest
tests:

View file

@ -181,9 +181,8 @@ Use the widget API to customize your Chat Widget:
format &#123;<span>"key":"value"</span>&#125;.
</Admonition>
| Prop | Type | Required | Description |
|-----------------------|---------|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| --------------------- | ------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| bot_message_style | JSON | No | Applies custom formatting to bot messages. |
| chat_input_field | String | Yes | Defines the type of the input field for chat messages. |
| chat_inputs | JSON | Yes | Determines the chat input elements and their respective values. |
@ -207,4 +206,3 @@ Use the widget API to customize your Chat Widget:
| user_message_style | JSON | No | Determines the formatting for user messages in the chat window. |
| width | Number | No | Sets the width of the chat window in pixels. |
| window_title | String | No | Sets the title displayed in the chat window's header or title bar. |

View file

@ -4,12 +4,12 @@ Langflow's Command Line Interface (CLI) is a powerful tool that allows you to in
The available commands are below. Navigate to their individual sections of this page to see the parameters.
* [langflow](#overview)
* [langflow api-key](#langflow-api-key)
* [langflow copy-db](#langflow-copy-db)
* [langflow migration](#langflow-migration)
* [langflow run](#langflow-run)
* [langflow superuser](#langflow-superuser)
- [langflow](#overview)
- [langflow api-key](#langflow-api-key)
- [langflow copy-db](#langflow-copy-db)
- [langflow migration](#langflow-migration)
- [langflow run](#langflow-run)
- [langflow superuser](#langflow-superuser)
## Overview
@ -23,21 +23,21 @@ langflow --help
python -m langflow
```
| Command | Description |
| ------- | ----------- |
| `api-key` | Creates an API key for the default superuser if AUTO_LOGIN is enabled. |
| `copy-db` | Copy the database files to the current directory (`which langflow`). |
| `migration` | Run or test migrations. |
| `run` | Run the Langflow. |
| `superuser` | Create a superuser. |
| Command | Description |
| ----------- | ---------------------------------------------------------------------- |
| `api-key` | Creates an API key for the default superuser if AUTO_LOGIN is enabled. |
| `copy-db` | Copy the database files to the current directory (`which langflow`). |
| `migration` | Run or test migrations. |
| `run` | Run the Langflow. |
| `superuser` | Create a superuser. |
### Options
| Option | Description |
| ------ | ----------- |
| `--install-completion` | Install completion for the current shell. |
| `--show-completion` | Show completion for the current shell, to copy it or customize the installation. |
| `--help` | Show this message and exit. |
| Option | Description |
| ---------------------- | -------------------------------------------------------------------------------- |
| `--install-completion` | Install completion for the current shell. |
| `--show-completion` | Show completion for the current shell, to copy it or customize the installation. |
| `--help` | Show this message and exit. |
## langflow api-key
@ -61,10 +61,10 @@ python -m langflow api-key
### Options
| Option | Type | Description |
|------------------|------|-------------------------------------------------------------|
| --log-level | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
| --help | | Show this message and exit. |
| Option | Type | Description |
| ----------- | ---- | ------------------------------------------------------------- |
| --log-level | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
| --help | | Show this message and exit. |
## langflow copy-db
@ -87,12 +87,12 @@ python -m langflow migration
```
### Options
| Option | Description |
|-----------------|-------------------------------------------------------------|
| `--test, --no-test` | Run migrations in test mode. [default: test] |
| `--fix, --no-fix` | Fix migrations. This is a destructive operation, and should only be used if you know what you are doing. [default: no-fix] |
| `--help` | Show this message and exit. |
| Option | Description |
| ------------------- | -------------------------------------------------------------------------------------------------------------------------- |
| `--test, --no-test` | Run migrations in test mode. [default: test] |
| `--fix, --no-fix` | Fix migrations. This is a destructive operation, and should only be used if you know what you are doing. [default: no-fix] |
| `--help` | Show this message and exit. |
## langflow run
@ -106,26 +106,26 @@ python -m langflow run
### Options
| Option | Description |
|-------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `--help` | Displays all available options. |
| `--host` | Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`. |
| `--workers` | Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`. |
| `--timeout` | Sets the worker timeout in seconds. The default is `60`. |
| `--port` | Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`. |
| `--env-file` | Specifies the path to the .env file containing environment variables. The default is `.env`. |
| `--log-level` | Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`. |
| `--components-path` | Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`. |
| `--log-file` | Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`. |
| `--cache` | Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`. |
| `--dev`/`--no-dev` | Toggles the development mode. The default is `no-dev`. |
| `--path` | Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable. |
| `--open-browser`/`--no-open-browser`| Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`. |
| `--remove-api-keys`/`--no-remove-api-keys`| Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`. |
| `--install-completion [bash\|zsh\|fish\|powershell\|pwsh]`| Installs completion for the specified shell. |
| `--show-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Shows completion for the specified shell, allowing you to copy it or customize the installation. |
| `--backend-only` | This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable. For more, see [Backend-only](../deployment/backend-only.md).|
| `--store` | This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable. |
| Option | Description |
| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `--help` | Displays all available options. |
| `--host` | Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`. |
| `--workers` | Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`. |
| `--timeout` | Sets the worker timeout in seconds. The default is `60`. |
| `--port` | Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`. |
| `--env-file` | Specifies the path to the .env file containing environment variables. The default is `.env`. |
| `--log-level` | Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`. |
| `--components-path` | Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`. |
| `--log-file` | Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`. |
| `--cache` | Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`. |
| `--dev`/`--no-dev` | Toggles the development mode. The default is `no-dev`. |
| `--path` | Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable. |
| `--open-browser`/`--no-open-browser` | Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`. |
| `--remove-api-keys`/`--no-remove-api-keys` | Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`. |
| `--install-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Installs completion for the specified shell. |
| `--show-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Shows completion for the specified shell, allowing you to copy it or customize the installation. |
| `--backend-only` | This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable. For more, see [Backend-only](../deployment/backend-only.md). |
| `--store` | This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable. |
#### CLI environment variables
@ -145,10 +145,9 @@ python -m langflow superuser
### Options
| Option | Type | Description |
|----------------|-------|-------------------------------------------------------------|
| `--username` | TEXT | Username for the superuser. [default: None] [required] |
| `--password` | TEXT | Password for the superuser. [default: None] [required] |
| `--log-level` | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
| `--help` | | Show this message and exit. |
| Option | Type | Description |
| ------------- | ---- | ------------------------------------------------------------- |
| `--username` | TEXT | Username for the superuser. [default: None] [required] |
| `--password` | TEXT | Password for the superuser. [default: None] [required] |
| `--log-level` | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
| `--help` | | Show this message and exit. |

View file

@ -86,7 +86,7 @@ With _`LANGFLOW_AUTO_LOGIN`_ set to _`False`_, Langflow requires users to sign u
light: useBaseUrl("img/sign-up.png"),
dark: useBaseUrl("img/sign-up.png"),
}}
style={{ width: "40%", margin: "20px auto" }}
style={{ width: "40%", margin: "20px auto" }}
/>
## Profile settings

View file

@ -40,14 +40,13 @@ The Playground's appearance changes depending on what components are in your can
Adding or removing any of the below components modifies your Playground so you can monitor the inputs and outputs.
* Chat Input
* Text Input
* Chat Output
* Text Output
* Records Output
* Inspect Memory
- Chat Input
- Text Input
- Chat Output
- Text Output
- Records Output
- Inspect Memory
You can also select **Options** > **Logs** to see your flow's logs.
For more information, see [Inputs and Outputs](../components/inputs-and-outputs.mdx).

View file

@ -1,4 +1,4 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Agents
@ -81,4 +81,4 @@ The `ZeroShotAgent` uses the ReAct framework to decide which tool to use based o
**Parameters**:
- **Allowed Tools:** The tools accessible to the agent.
- **LLM Chain:** The LLM Chain used by the agent.
- **LLM Chain:** The LLM Chain used by the agent.

View file

@ -1,4 +1,4 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Data

View file

@ -4,113 +4,113 @@
Used to load embedding models from [Amazon Bedrock](https://aws.amazon.com/bedrock/).
| **Parameter** | **Type** | **Description** | **Default** |
|-----------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------|-------------|
| `credentials_profile_name` | `str` | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information. | |
| `model_id` | `str` | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API. | |
| `endpoint_url` | `str` | URL to set a specific service endpoint other than the default AWS endpoint. | |
| `region_name` | `str` | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided. | |
| **Parameter** | **Type** | **Description** | **Default** |
| -------------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
| `credentials_profile_name` | `str` | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information. | |
| `model_id` | `str` | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API. | |
| `endpoint_url` | `str` | URL to set a specific service endpoint other than the default AWS endpoint. | |
| `region_name` | `str` | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided. | |
## Cohere Embeddings
Used to load embedding models from [Cohere](https://cohere.com/).
| **Parameter** | **Type** | **Description** | **Default** |
|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------|
| `cohere_api_key` | `str` | API key required to authenticate with the Cohere service. | |
| `model` | `str` | Language model used for embedding text documents and performing queries. | `embed-english-v2.0` |
| `truncate` | `bool` | Whether to truncate the input text to fit within the model's constraints. | `False` |
| **Parameter** | **Type** | **Description** | **Default** |
| ---------------- | -------- | ------------------------------------------------------------------------- | -------------------- |
| `cohere_api_key` | `str` | API key required to authenticate with the Cohere service. | |
| `model` | `str` | Language model used for embedding text documents and performing queries. | `embed-english-v2.0` |
| `truncate` | `bool` | Whether to truncate the input text to fit within the model's constraints. | `False` |
## Azure OpenAI Embeddings
Generate embeddings using Azure OpenAI models.
| **Parameter** | **Type** | **Description** | **Default** |
|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------|
| `Azure Endpoint` | `str` | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | |
| `Deployment Name` | `str` | The name of the deployment. | |
| `API Version` | `str` | The API version to use, options include various dates. | |
| `API Key` | `str` | The API key to access the Azure OpenAI service. | |
| **Parameter** | **Type** | **Description** | **Default** |
| ----------------- | -------- | -------------------------------------------------------------------------------------------------- | ----------- |
| `Azure Endpoint` | `str` | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | |
| `Deployment Name` | `str` | The name of the deployment. | |
| `API Version` | `str` | The API version to use, options include various dates. | |
| `API Key` | `str` | The API key to access the Azure OpenAI service. | |
## Hugging Face API Embeddings
Generate embeddings using Hugging Face Inference API models.
| **Parameter** | **Type** | **Description** | **Default** |
|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------|
| `API Key` | `str` | API key for accessing the Hugging Face Inference API. | |
| `API URL` | `str` | URL of the Hugging Face Inference API. | `http://localhost:8080` |
| `Model Name` | `str` | Name of the model to use for embeddings. | `BAAI/bge-large-en-v1.5` |
| `Cache Folder` | `str` | Folder path to cache Hugging Face models. | |
| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | |
| `Model Kwargs` | `dict` | Additional arguments for the model. | |
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
| **Parameter** | **Type** | **Description** | **Default** |
| --------------- | -------- | ----------------------------------------------------- | ------------------------ |
| `API Key` | `str` | API key for accessing the Hugging Face Inference API. | |
| `API URL` | `str` | URL of the Hugging Face Inference API. | `http://localhost:8080` |
| `Model Name` | `str` | Name of the model to use for embeddings. | `BAAI/bge-large-en-v1.5` |
| `Cache Folder` | `str` | Folder path to cache Hugging Face models. | |
| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | |
| `Model Kwargs` | `dict` | Additional arguments for the model. | |
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
## Hugging Face Embeddings
Used to load embedding models from [HuggingFace](https://huggingface.co).
| **Parameter** | **Type** | **Description** | **Default** |
|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------|-----------------------|
| `Cache Folder` | `str` | Folder path to cache HuggingFace models. | |
| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | |
| `Model Kwargs` | `dict` | Additional arguments for the model. | |
| `Model Name` | `str` | Name of the HuggingFace model to use. | `sentence-transformers/all-mpnet-base-v2` |
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
| **Parameter** | **Type** | **Description** | **Default** |
| --------------- | -------- | ---------------------------------------------- | ----------------------------------------- |
| `Cache Folder` | `str` | Folder path to cache HuggingFace models. | |
| `Encode Kwargs` | `dict` | Additional arguments for the encoding process. | |
| `Model Kwargs` | `dict` | Additional arguments for the model. | |
| `Model Name` | `str` | Name of the HuggingFace model to use. | `sentence-transformers/all-mpnet-base-v2` |
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
## OpenAI Embeddings
Used to load embedding models from [OpenAI](https://openai.com/).
| **Parameter** | **Type** | **Description** | **Default** |
|-----------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------|
| `OpenAI API Key` | `str` | The API key to use for accessing the OpenAI API. | |
| `Default Headers` | `Dict[str, str]` | Default headers for the HTTP requests. | |
| `Default Query` | `NestedDict` | Default query parameters for the HTTP requests. | |
| `Allowed Special` | `List[str]` | Special tokens allowed for processing. | `[]` |
| `Disallowed Special` | `List[str]` | Special tokens disallowed for processing. | `["all"]` |
| `Chunk Size` | `int` | Chunk size for processing. | `1000` |
| `Client` | `Any` | HTTP client for making requests. | |
| `Deployment` | `str` | Deployment name for the model. | `text-embedding-3-small` |
| `Embedding Context Length` | `int` | Length of embedding context. | `8191` |
| `Max Retries` | `int` | Maximum number of retries for failed requests. | `6` |
| `Model` | `str` | Name of the model to use. | `text-embedding-3-small` |
| `Model Kwargs` | `NestedDict` | Additional keyword arguments for the model. | |
| `OpenAI API Base` | `str` | Base URL of the OpenAI API. | |
| `OpenAI API Type` | `str` | Type of the OpenAI API. | |
| `OpenAI API Version` | `str` | Version of the OpenAI API. | |
| `OpenAI Organization` | `str` | Organization associated with the API key. | |
| `OpenAI Proxy` | `str` | Proxy server for the requests. | |
| `Request Timeout` | `float` | Timeout for the HTTP requests. | |
| `Show Progress Bar` | `bool` | Whether to show a progress bar for processing. | `False` |
| `Skip Empty` | `bool` | Whether to skip empty inputs. | `False` |
| `TikToken Enable` | `bool` | Whether to enable TikToken. | `True` |
| `TikToken Model Name` | `str` | Name of the TikToken model. | |
| **Parameter** | **Type** | **Description** | **Default** |
| -------------------------- | ---------------- | ------------------------------------------------ | ------------------------ |
| `OpenAI API Key` | `str` | The API key to use for accessing the OpenAI API. | |
| `Default Headers` | `Dict[str, str]` | Default headers for the HTTP requests. | |
| `Default Query` | `NestedDict` | Default query parameters for the HTTP requests. | |
| `Allowed Special` | `List[str]` | Special tokens allowed for processing. | `[]` |
| `Disallowed Special` | `List[str]` | Special tokens disallowed for processing. | `["all"]` |
| `Chunk Size` | `int` | Chunk size for processing. | `1000` |
| `Client` | `Any` | HTTP client for making requests. | |
| `Deployment` | `str` | Deployment name for the model. | `text-embedding-3-small` |
| `Embedding Context Length` | `int` | Length of embedding context. | `8191` |
| `Max Retries` | `int` | Maximum number of retries for failed requests. | `6` |
| `Model` | `str` | Name of the model to use. | `text-embedding-3-small` |
| `Model Kwargs` | `NestedDict` | Additional keyword arguments for the model. | |
| `OpenAI API Base` | `str` | Base URL of the OpenAI API. | |
| `OpenAI API Type` | `str` | Type of the OpenAI API. | |
| `OpenAI API Version` | `str` | Version of the OpenAI API. | |
| `OpenAI Organization` | `str` | Organization associated with the API key. | |
| `OpenAI Proxy` | `str` | Proxy server for the requests. | |
| `Request Timeout` | `float` | Timeout for the HTTP requests. | |
| `Show Progress Bar` | `bool` | Whether to show a progress bar for processing. | `False` |
| `Skip Empty` | `bool` | Whether to skip empty inputs. | `False` |
| `TikToken Enable` | `bool` | Whether to enable TikToken. | `True` |
| `TikToken Model Name` | `str` | Name of the TikToken model. | |
## Ollama Embeddings
Generate embeddings using Ollama models.
| **Parameter** | **Type** | **Description** | **Default** |
|---------------------|-------------------|--------------------------------------------------------------------------------------------------------------------|---------------------------|
| `Ollama Model` | `str` | Name of the Ollama model to use. | `llama2` |
| `Ollama Base URL` | `str` | Base URL of the Ollama API. | `http://localhost:11434` |
| `Model Temperature` | `float` | Temperature parameter for the model. Adjusts the randomness in the generated embeddings. | |
| **Parameter** | **Type** | **Description** | **Default** |
| ------------------- | -------- | ---------------------------------------------------------------------------------------- | ------------------------ |
| `Ollama Model` | `str` | Name of the Ollama model to use. | `llama2` |
| `Ollama Base URL` | `str` | Base URL of the Ollama API. | `http://localhost:11434` |
| `Model Temperature` | `float` | Temperature parameter for the model. Adjusts the randomness in the generated embeddings. | |
## VertexAI Embeddings
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings).
| **Parameter** | **Type** | **Description** | **Default** |
|-----------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------|
| `credentials` | `Credentials` | The default custom credentials to use. | |
| `location` | `str` | The default location to use when making API calls. | `us-central1`|
| `max_output_tokens` | `int` | Token limit determines the maximum amount of text output from one prompt. | `128` |
| `model_name` | `str` | The name of the Vertex AI large language model. | `text-bison`|
| `project` | `str` | The default GCP project to use when making Vertex API calls. | |
| `request_parallelism` | `int` | The amount of parallelism allowed for requests issued to VertexAI models. | `5` |
| `temperature` | `float` | Tunes the degree of randomness in text generations. Should be a non-negative value. | `0` |
| `top_k` | `int` | How the model selects tokens for output, the next token is selected from the top `k` tokens. | `40` |
| `top_p` | `float` | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value. | `0.95` |
| `tuned_model_name` | `str` | The name of a tuned model. If provided, `model_name` is ignored. | |
| `verbose` | `bool` | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug. | `False` |
| **Parameter** | **Type** | **Description** | **Default** |
| --------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------------- |
| `credentials` | `Credentials` | The default custom credentials to use. | |
| `location` | `str` | The default location to use when making API calls. | `us-central1` |
| `max_output_tokens` | `int` | Token limit determines the maximum amount of text output from one prompt. | `128` |
| `model_name` | `str` | The name of the Vertex AI large language model. | `text-bison` |
| `project` | `str` | The default GCP project to use when making Vertex API calls. | |
| `request_parallelism` | `int` | The amount of parallelism allowed for requests issued to VertexAI models. | `5` |
| `temperature` | `float` | Tunes the degree of randomness in text generations. Should be a non-negative value. | `0` |
| `top_k` | `int` | How the model selects tokens for output, the next token is selected from the top `k` tokens. | `40` |
| `top_p` | `float` | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value. | `0.95` |
| `tuned_model_name` | `str` | The name of a tuned model. If provided, `model_name` is ignored. | |
| `verbose` | `bool` | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug. | `False` |

View file

@ -1,4 +1,4 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Experimental
@ -31,10 +31,12 @@ This component extracts specified keys from a record.
**Parameters**
- **Record:**
- **Display Name:** Record
- **Info:** The record from which to extract keys.
- **Keys:**
- **Display Name:** Keys
- **Info:** The keys to be extracted.
@ -56,6 +58,7 @@ This component turns a function running a flow into a Tool.
**Parameters**
- **Flow Name:**
- **Display Name:** Flow Name
- **Info:** Select the flow to run.
- **Options:** List of available flows.
@ -63,10 +66,12 @@ This component turns a function running a flow into a Tool.
- **Refresh Button:** True
- **Name:**
- **Display Name:** Name
- **Description:** The tool's name.
- **Description:**
- **Display Name:** Description
- **Description:** Describes the tool.
@ -129,10 +134,12 @@ This component generates a notification.
**Parameters**
- **Name:**
- **Display Name:** Name
- **Info:** The notification's name.
- **Record:**
- **Display Name:** Record
- **Info:** Optionally, a record to store in the notification.
@ -153,10 +160,12 @@ This component runs a specified flow.
**Parameters**
- **Input Value:**
- **Display Name:** Input Value
- **Multiline:** True
- **Flow Name:**
- **Display Name:** Flow Name
- **Info:** Select the flow to run.
- **Options:** List of available flows.
@ -179,14 +188,17 @@ This component executes a specified runnable.
**Parameters**
- **Input Key:**
- **Display Name:** Input Key
- **Info:** The input key.
- **Inputs:**
- **Display Name:** Inputs
- **Info:** Inputs for the runnable.
- **Runnable:**
- **Display Name:** Runnable
- **Info:** The runnable to execute.
@ -207,14 +219,17 @@ This component executes an SQL query.
**Parameters**
- **Database URL:**
- **Display Name:** Database URL
- **Info:** The database's URL.
- **Include Columns:**
- **Display Name:** Include Columns
- **Info:** Whether to include columns in the result.
- **Passthrough:**
- **Display Name:** Passthrough
- **Info:** Returns the query instead of raising an exception if an error occurs.
@ -235,10 +250,12 @@ This component dynamically generates a tool from a flow.
**Parameters**
- **Input Value:**
- **Display Name:** Input Value
- **Multiline:** True
- **Flow Name:**
- **Display Name:** Flow Name
- **Info:** Select the flow to run.
- **Options:** List of available flows.

View file

@ -1,4 +1,4 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Helpers
@ -49,9 +49,10 @@ Use this component as a template to create your custom component.
- **Parameter:** Describe the purpose of this parameter.
<Admonition type="info" title="Info">
<p>
Customize the <code>build_config</code> and <code>build</code> methods according to your requirements.
</p>
<p>
Customize the <code>build_config</code> and <code>build</code> methods
according to your requirements.
</p>
</Admonition>
Learn more about creating custom components at [Custom Component](http://docs.langflow.org/components/custom).

View file

@ -48,8 +48,8 @@ One significant capability of the Chat Input component is its ability to transfo
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: ("img/playground-chat.png"),
dark: ("img/playground-chat.png"),
light: "img/playground-chat.png",
dark: "img/playground-chat.png",
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>

View file

@ -1,11 +1,13 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Memories
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
Thanks for your patience as we improve our documentation—it might have some rough edges. Share your feedback or report issues to help us enhance it! 🛠️📝
</p>
<p>
Thanks for your patience as we improve our documentation—it might have some
rough edges. Share your feedback or report issues to help us enhance it!
🛠️📝
</p>
</Admonition>
Memory is a concept in chat-based applications that allows the system to remember previous interactions. This capability helps maintain the context of the conversation and enables the system to understand new messages in light of past messages.
@ -24,9 +26,13 @@ This component retrieves stored messages using various filters such as sender ty
- **number_of_messages**: Specifies the number of messages to retrieve. Defaults to `5`. Determines the number of recent messages from the chat history to fetch.
<Admonition type="note" title="Note">
<p>
The component retrieves messages based on the provided criteria, including the specific file path for stored messages. If no specific criteria are provided, it returns the most recent messages up to the specified limit. This component can be used to review past interactions and analyze conversation flows.
</p>
<p>
The component retrieves messages based on the provided criteria, including
the specific file path for stored messages. If no specific criteria are
provided, it returns the most recent messages up to the specified limit.
This component can be used to review past interactions and analyze
conversation flows.
</p>
</Admonition>
### ConversationBufferMemory
@ -84,7 +90,8 @@ The `ConversationKGMemory` utilizes a knowledge graph to enhance memory capabili
- **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`.
- **output_key**: Identifies the key under which the generated response
is stored, enabling retrieval using this key.
is stored, enabling retrieval using this key.
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.
---
@ -124,4 +131,4 @@ The `VectorRetrieverMemory` retrieves vectors based on queries, facilitating vec
- **Retriever**: The tool used to fetch documents.
- **input_key**: Identifies where input messages are stored in the memory object, facilitating their retrieval and manipulation.
- **memory_key**: Specifies the prompt variable name where the memory stores and retrieves chat messages. Defaults to `chat_history`.
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.
- **return_messages**: Controls whether the history is returned as a string or as a list of messages. Defaults to `False`.

View file

@ -1,4 +1,4 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Large Language Models (LLMs)
@ -31,7 +31,9 @@ This is a wrapper for Anthropic's large language model designed for chat-based i
`CTransformers` provides access to Transformer models implemented in C/C++ using the [GGML](https://github.com/ggerganov/ggml) library.
<Admonition type="info">
Ensure the `ctransformers` Python package is installed. Discover more about installation, supported models, and usage [here](https://github.com/marella/ctransformers).
Ensure the `ctransformers` Python package is installed. Discover more about
installation, supported models, and usage
[here](https://github.com/marella/ctransformers).
</Admonition>
- **config:** This configuration is for the Transformer models. Check the default settings and possible configurations at [config](https://github.com/marella/ctransformers#config).
@ -122,7 +124,8 @@ This component integrates with [Google Vertex AI](https://cloud.google.com/verte
- **credentials**: Custom
credentials used for API interactions.
credentials used for API interactions.
- **location**: The default location for API calls, defaulting to `us-central1`.
- **max_output_tokens**: Limits the output tokens per prompt, defaulting to `128`.
- **model_name**: The name of the Vertex AI model in use, defaulting to `text-bison`.
@ -134,4 +137,4 @@ This component integrates with [Google Vertex AI](https://cloud.google.com/verte
- **tuned_model_name**: Specifies a tuned model name, which overrides the default model name if provided.
- **verbose**: Controls the output verbosity to assist in debugging and understanding the operational details, defaulting to `False`.
---
---

View file

@ -1,4 +1,4 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Retrievers

View file

@ -1,9 +1,11 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Toolkits
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
</Admonition>
<p>
We appreciate your understanding as we polish our documentation - it may
contain some rough edges. Share your feedback or report issues to help us
improve! 🛠️📝
</p>
</Admonition>

View file

@ -1,4 +1,4 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Tools

View file

@ -80,7 +80,11 @@ Generates a unique identifier (UUID) for each instance it is invoked, providing
- Returns a unique identifier (UUID) as a string. This UUID is generated using Python's `uuid` module, ensuring that each identifier is unique and can be used as a reliable reference in your application.
<Admonition type="note" title="Note">
The Unique ID Generator is crucial for scenarios requiring distinct identifiers, such as session management, transaction tracking, or any context where different instances or entities must be uniquely identified. The generated UUID is provided as a hexadecimal string, offering a high level of uniqueness and security for identification purposes.
The Unique ID Generator is crucial for scenarios requiring distinct
identifiers, such as session management, transaction tracking, or any context
where different instances or entities must be uniquely identified. The
generated UUID is provided as a hexadecimal string, offering a high level of
uniqueness and security for identification purposes.
</Admonition>
For additional information and examples, please consult the [Langflow Components Custom Documentation](http://docs.langflow.org/components/custom).

View file

@ -1,24 +1,26 @@
# Backend-only
You can run Langflow in `--backend-only` mode to expose your Langflow app as an API, without running the frontend UI.
Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
The terminal prints ` Welcome to ⛓ Langflow `, and a blank window opens at `http://127.0.0.1:7864/all`.
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
Langflow will now serve requests to its API without the frontend running.
## Prerequisites
* [Langflow installed](../getting-started/install-langflow.mdx)
- [Langflow installed](../getting-started/install-langflow.mdx)
* [OpenAI API key](https://platform.openai.com)
- [OpenAI API key](https://platform.openai.com)
* [A Langflow flow created](../starter-projects/basic-prompting.mdx)
- [A Langflow flow created](../starter-projects/basic-prompting.mdx)
## Download your flow's curl call
1. Click API.
2. Click **curl** > **Copy code** and save the code to your local machine.
It will look something like this:
It will look something like this:
```curl
curl -X POST \
"http://127.0.0.1:7864/api/v1/run/ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef?stream=false" \
@ -33,19 +35,22 @@ curl -X POST \
"ChatInput-xXC4F": {}
}}'
```
Note the flow ID of `ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef`. You can find this ID in the UI as well to ensure you're querying the right flow.
## Start Langflow in backend-only mode
1. Stop Langflow with Ctrl+C.
2. Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
The terminal prints ` Welcome to ⛓ Langflow `, and a blank window opens at `http://127.0.0.1:7864/all`.
Langflow will now serve requests to its API.
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
Langflow will now serve requests to its API.
3. Run the curl code you copied from the UI.
You should get a result like this:
You should get a result like this:
```bash
{"session_id":"ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880","outputs":[{"inputs":{"input_value":"hi, are you there?"},"outputs":[{"results":{"result":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?"},"artifacts":{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI"},"messages":[{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI","component_id":"ChatOutput-ktwdw"}],"component_display_name":"Chat Output","component_id":"ChatOutput-ktwdw","used_frozen_result":false}]}]}%
```
Again, note that the flow ID matches.
Langflow is receiving your POST request, running the flow, and returning the result, all without running the frontend. Cool!
@ -55,7 +60,8 @@ Instead of using curl, you can download your flow as a Python API call instead.
1. Click API.
2. Click **Python API** > **Copy code** and save the code to your local machine.
The code will look something like this:
The code will look something like this:
```python
import requests
from typing import Optional
@ -99,15 +105,19 @@ message = "message"
print(run_flow(message=message, flow_id=FLOW_ID))
```
3. Run your Python app:
```python
python3 app.py
```
The result is similar to the curl call:
```bash
{'session_id': 'ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880', 'outputs': [{'inputs': {'input_value': 'message'}, 'outputs': [{'results': {'result': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!"}, 'artifacts': {'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI'}, 'messages': [{'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI', 'component_id': 'ChatOutput-ktwdw'}], 'component_display_name': 'Chat Output', 'component_id': 'ChatOutput-ktwdw', 'used_frozen_result': False}]}]}
```
Your Python app POSTs to your Langflow server, and the server runs the flow and returns the result.
See [API](../administration/api.mdx) for more ways to interact with your headless Langflow server.
See [API](../administration/api.mdx) for more ways to interact with your headless Langflow server.

View file

@ -38,7 +38,6 @@ For example, the [Basic prompting](../starter-projects/basic-prompting.mdx) flow
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
In this flow, the **OpenAI LLM component** receives input (left side) and produces output (right side) - in this case, receiving input from the **Chat Input** and **Prompt** components and producing output to the **Chat Output** component.
## Component

View file

@ -86,7 +86,7 @@ You'll be presented with the following screen:
light: "img/duplicate-space.png",
dark: "img/duplicate-space.png",
}}
style={{ width: "80%", maxWidth: "800px", margin: "0 auto" }}
style={{ width: "80%", maxWidth: "800px", margin: "0 auto" }}
/>
Name your Space, define the visibility (Public or Private), and click on **Duplicate Space** to start the installation process. When installation is finished, you'll be redirected to the Space's main page to start using Langflow right away!

View file

@ -32,8 +32,9 @@ There are two possible reasons for this error:
Clear the cache by deleting the contents of the cache folder.
This folder can be found at:
- **Linux or WSL2 on Windows**: `home/<username>/.cache/langflow/`
- **MacOS**: `/Users/<username>/Library/Caches/langflow/`
- **Linux or WSL2 on Windows**: `home/<username>/.cache/langflow/`
- **MacOS**: `/Users/<username>/Library/Caches/langflow/`
This error can occur during Langflow upgrades when the new version can't override `langflow-pre.db` in `.cache/langflow/`. Clearing the cache removes this file but will also erase your settings.

View file

@ -33,7 +33,7 @@ Build a question-and-answer chatbot with a document loaded from local memory.
light: "img/document-qa.png",
dark: "img/document-qa.png",
}}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
style={{ width: "100%", maxWidth: "800px", margin: "0 auto" }}
/>
This flow creates a basic chatbot with the **Chat Input**, **Prompt**, **OpenAI**, and **Chat Output** components.

View file

@ -13,7 +13,8 @@ In this guide, we will use Astra DB as a vector store to store and retrieve the
<Admonition type="tip">
This guide assumes that you have Langflow up and running. If you are new to
Langflow, you can check out the [Getting Started](../getting-started/install-langflow.mdx) guide.
Langflow, you can check out the [Getting
Started](../getting-started/install-langflow.mdx) guide.
</Admonition>
TLDR;

View file

@ -114,7 +114,8 @@ module.exports = {
type: "category",
label: "Deployment",
collapsed: true,
items: ["deployment/docker",
items: [
"deployment/docker",
"deployment/backend-only",
"deployment/gcp-deployment",
],

20
poetry.lock generated
View file

@ -1608,6 +1608,23 @@ files = [
[package.dependencies]
packaging = "*"
[[package]]
name = "dictdiffer"
version = "0.9.0"
description = "Dictdiffer is a library that helps you to diff and patch dictionaries."
optional = false
python-versions = "*"
files = [
{file = "dictdiffer-0.9.0-py2.py3-none-any.whl", hash = "sha256:442bfc693cfcadaf46674575d2eba1c53b42f5e404218ca2c2ff549f2df56595"},
{file = "dictdiffer-0.9.0.tar.gz", hash = "sha256:17bacf5fbfe613ccf1b6d512bd766e6b21fb798822a133aa86098b8ac9997578"},
]
[package.extras]
all = ["Sphinx (>=3)", "check-manifest (>=0.42)", "mock (>=1.3.0)", "numpy (>=1.13.0)", "numpy (>=1.15.0)", "numpy (>=1.18.0)", "numpy (>=1.20.0)", "pytest (==5.4.3)", "pytest (>=6)", "pytest-cov (>=2.10.1)", "pytest-isort (>=1.2.0)", "pytest-pycodestyle (>=2)", "pytest-pycodestyle (>=2.2.0)", "pytest-pydocstyle (>=2)", "pytest-pydocstyle (>=2.2.0)", "sphinx (>=3)", "sphinx-rtd-theme (>=0.2)", "tox (>=3.7.0)"]
docs = ["Sphinx (>=3)", "sphinx-rtd-theme (>=0.2)"]
numpy = ["numpy (>=1.13.0)", "numpy (>=1.15.0)", "numpy (>=1.18.0)", "numpy (>=1.20.0)"]
tests = ["check-manifest (>=0.42)", "mock (>=1.3.0)", "pytest (==5.4.3)", "pytest (>=6)", "pytest-cov (>=2.10.1)", "pytest-isort (>=1.2.0)", "pytest-pycodestyle (>=2)", "pytest-pycodestyle (>=2.2.0)", "pytest-pydocstyle (>=2)", "pytest-pydocstyle (>=2.2.0)", "sphinx (>=3)", "tox (>=3.7.0)"]
[[package]]
name = "dill"
version = "0.3.7"
@ -10445,6 +10462,7 @@ test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
[extras]
cassio = ["cassio"]
couchbase = ["couchbase"]
deploy = ["celery", "flower", "redis"]
local = ["ctransformers", "llama-cpp-python", "sentence-transformers"]
@ -10452,4 +10470,4 @@ local = ["ctransformers", "llama-cpp-python", "sentence-transformers"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.13"
content-hash = "4e16ddf83311fa2c894623b76832a9dda98eec2b88975c087297364954dbdac6"
content-hash = "6cfa9f164710bf283b50a8e12e9d0c91c58f5d0176bbb277b5f5d3630ec8b2cb"

View file

@ -86,7 +86,7 @@ youtube-transcript-api = "^0.6.2"
markdown = "^3.6"
langchain-chroma = "^0.1.1"
upstash-vector = "^0.4.0"
cassio = "^0.1.7"
cassio = { extras = ["cassio"], version = "^0.1.7", optional = true }
unstructured = {extras = ["docx", "md", "pptx"], version = "^0.14.4"}
@ -117,10 +117,12 @@ pytest-asyncio = "^0.23.0"
pytest-profiling = "^1.7.0"
pre-commit = "^3.7.0"
vulture = "^2.11"
dictdiffer = "^0.9.0"
[tool.poetry.extras]
deploy = ["celery", "redis", "flower"]
couchbase = ["couchbase"]
cassio = ["cassio"]
local = ["llama-cpp-python", "sentence-transformers", "ctransformers"]

View file

@ -86,6 +86,10 @@ def update_frontend_node_with_template_values(frontend_node, raw_frontend_node):
update_template_values(frontend_node["template"], raw_frontend_node["template"])
old_code = raw_frontend_node["template"]["code"]["value"]
new_code = frontend_node["template"]["code"]["value"]
frontend_node["edited"] = old_code != new_code
return frontend_node
@ -204,16 +208,18 @@ def format_elapsed_time(elapsed_time: float) -> str:
return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}"
async def build_and_cache_graph_from_db(
flow_id: str,
session: Session,
chat_service: "ChatService",
):
async def build_and_cache_graph_from_db(flow_id: str, session: Session, chat_service: "ChatService"):
"""Build and cache the graph."""
flow: Optional[Flow] = session.get(Flow, flow_id)
if not flow or not flow.data:
raise ValueError("Invalid flow ID")
graph = Graph.from_payload(flow.data, flow_id)
for vertex_id in graph._has_session_id_vertices:
vertex = graph.get_vertex(vertex_id)
if vertex is None:
raise ValueError(f"Vertex {vertex_id} not found")
if not vertex._raw_params.get("session_id"):
vertex.update_raw_params({"session_id": flow_id}, overwrite=True)
await chat_service.set_cache(flow_id, graph)
return graph
@ -317,3 +323,4 @@ def parse_exception(exc):
if hasattr(exc, "body"):
return exc.body["message"]
return str(exc)
return str(exc)

View file

@ -22,6 +22,7 @@ from langflow.api.v1.schemas import (
VertexBuildResponse,
VerticesOrderResponse,
)
from langflow.schema.schema import Log
from langflow.services.auth.utils import get_current_active_user
from langflow.services.chat.service import ChatService
from langflow.services.deps import get_chat_service, get_session, get_session_service
@ -123,6 +124,7 @@ async def build_vertex(
vertex_id: str,
background_tasks: BackgroundTasks,
inputs: Annotated[Optional[InputValueRequest], Body(embed=True)] = None,
files: Optional[list[str]] = None,
chat_service: "ChatService" = Depends(get_chat_service),
current_user=Depends(get_current_active_user),
):
@ -159,6 +161,7 @@ async def build_vertex(
else:
graph = cache.get("result")
vertex = graph.get_vertex(vertex_id)
try:
lock = chat_service._cache_locks[flow_id_str]
(
@ -175,19 +178,25 @@ async def build_vertex(
vertex_id=vertex_id,
user_id=current_user.id,
inputs_dict=inputs.model_dump() if inputs else {},
files=files,
)
log_obj = Log(message=vertex.artifacts_raw, type=vertex.artifacts_type)
result_data_response = ResultDataResponse(**result_dict.model_dump())
except Exception as exc:
logger.exception(f"Error building vertex: {exc}")
params = format_exception_message(exc)
valid = False
log_obj = Log(message=params, type="error")
result_data_response = ResultDataResponse(results={})
artifacts = {}
# If there's an error building the vertex
# we need to clear the cache
await chat_service.clear_cache(flow_id_str)
result_data_response.message = artifacts
result_data_response.logs.append(log_obj)
# Log the vertex build
if not vertex.will_stream:
background_tasks.add_task(

View file

@ -2,6 +2,7 @@ import hashlib
from http import HTTPStatus
from io import BytesIO
from uuid import UUID
from pathlib import Path
from fastapi import APIRouter, Depends, HTTPException, UploadFile
from fastapi.responses import StreamingResponse
@ -99,6 +100,46 @@ async def download_image(file_name: str, flow_id: UUID, storage_service: Storage
raise HTTPException(status_code=500, detail=str(e))
@router.get("/profile_pictures/{folder_name}/{file_name}")
async def download_profile_picture(
folder_name: str,
file_name: str,
storage_service: StorageService = Depends(get_storage_service),
):
try:
extension = file_name.split(".")[-1]
config_dir = get_storage_service().settings_service.settings.config_dir
config_path = Path(config_dir)
folder_path = config_path / "profile_pictures" / folder_name
content_type = build_content_type_from_extension(extension)
file_content = await storage_service.get_file(flow_id=folder_path, file_name=file_name)
return StreamingResponse(BytesIO(file_content), media_type=content_type)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/profile_pictures/list")
async def list_profile_pictures(storage_service: StorageService = Depends(get_storage_service)):
try:
config_dir = get_storage_service().settings_service.settings.config_dir
config_path = Path(config_dir)
people_path = config_path / "profile_pictures/People"
space_path = config_path / "profile_pictures/Space"
people = await storage_service.list_files(flow_id=people_path)
space = await storage_service.list_files(flow_id=space_path)
files = [Path("People") / i for i in people]
files += [Path("Space") / i for i in space]
return {"files": files}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/list/{flow_id}")
async def list_files(
flow_id: UUID = Depends(get_flow_id), storage_service: StorageService = Depends(get_storage_service)

View file

@ -31,25 +31,60 @@ def create_flow(
flow: FlowCreate,
current_user: User = Depends(get_current_active_user),
):
"""Create a new flow."""
if flow.user_id is None:
flow.user_id = current_user.id
try:
"""Create a new flow."""
if flow.user_id is None:
flow.user_id = current_user.id
db_flow = Flow.model_validate(flow, from_attributes=True)
db_flow.updated_at = datetime.now(timezone.utc)
# First check if the flow.name is unique
# there might be flows with name like: "MyFlow", "MyFlow (1)", "MyFlow (2)"
# so we need to check if the name is unique with `like` operator
# if we find a flow with the same name, we add a number to the end of the name
# based on the highest number found
if session.exec(select(Flow).where(Flow.name == flow.name).where(Flow.user_id == current_user.id)).first():
flows = session.exec(
select(Flow).where(Flow.name.like(f"{flow.name} (%")).where(Flow.user_id == current_user.id)
).all()
if flows:
numbers = [int(flow.name.split("(")[1].split(")")[0]) for flow in flows]
flow.name = f"{flow.name} ({max(numbers) + 1})"
else:
flow.name = f"{flow.name} (1)"
if db_flow.folder_id is None:
# Make sure flows always have a folder
default_folder = session.exec(
select(Folder).where(Folder.name == DEFAULT_FOLDER_NAME, Folder.user_id == current_user.id)
).first()
if default_folder:
db_flow.folder_id = default_folder.id
db_flow = Flow.model_validate(flow, from_attributes=True)
db_flow.updated_at = datetime.now(timezone.utc)
session.add(db_flow)
session.commit()
session.refresh(db_flow)
return db_flow
if db_flow.folder_id is None:
# Make sure flows always have a folder
default_folder = session.exec(
select(Folder).where(Folder.name == DEFAULT_FOLDER_NAME, Folder.user_id == current_user.id)
).first()
if default_folder:
db_flow.folder_id = default_folder.id
session.add(db_flow)
session.commit()
session.refresh(db_flow)
return db_flow
except Exception as e:
# If it is a validation error, return the error message
if hasattr(e, "errors"):
raise HTTPException(status_code=400, detail=str(e)) from e
elif "UNIQUE constraint failed" in str(e):
# Get the name of the column that failed
columns = str(e).split("UNIQUE constraint failed: ")[1].split(".")[1].split("\n")[0]
# UNIQUE constraint failed: flow.user_id, flow.name
# or UNIQUE constraint failed: flow.name
# if the column has id in it, we want the other column
column = columns.split(",")[1] if "id" in columns.split(",")[0] else columns.split(",")[0]
raise HTTPException(
status_code=400, detail=f"{column.capitalize().replace('_', ' ')} must be unique"
) from e
elif isinstance(e, HTTPException):
raise e
else:
raise HTTPException(status_code=500, detail=str(e)) from e
@router.get("/", response_model=list[FlowRead], status_code=200)

View file

@ -7,6 +7,8 @@ from sqlmodel import Session, select
from langflow.api.v1.flows import create_flows
from langflow.api.v1.schemas import FlowListCreate, FlowListReadWithFolderName
from langflow.helpers.flow import generate_unique_flow_name
from langflow.helpers.folders import generate_unique_folder_name
from langflow.services.auth.utils import get_current_active_user
from langflow.services.database.models.flow.model import Flow, FlowCreate, FlowRead
from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME
@ -33,17 +35,27 @@ def create_folder(
try:
new_folder = Folder.model_validate(folder, from_attributes=True)
new_folder.user_id = current_user.id
folder_results = session.exec(
select(Folder).where(
Folder.name.like(f"{new_folder.name}%"), # type: ignore
Folder.user_id == current_user.id,
# First check if the folder.name is unique
# there might be flows with name like: "MyFlow", "MyFlow (1)", "MyFlow (2)"
# so we need to check if the name is unique with `like` operator
# if we find a flow with the same name, we add a number to the end of the name
# based on the highest number found
if session.exec(
statement=select(Folder).where(Folder.name == new_folder.name).where(Folder.user_id == current_user.id)
).first():
folder_results = session.exec(
select(Folder).where(
Folder.name.like(f"{new_folder.name}%"), # type: ignore
Folder.user_id == current_user.id,
)
)
)
existing_folder_names = [folder.name for folder in folder_results]
if existing_folder_names:
new_folder.name = f"{new_folder.name} ({len(existing_folder_names) + 1})"
if folder_results:
folder_names = [folder.name for folder in folder_results]
folder_numbers = [int(name.split("(")[-1].split(")")[0]) for name in folder_names if "(" in name]
if folder_numbers:
new_folder.name = f"{new_folder.name} ({max(folder_numbers) + 1})"
else:
new_folder.name = f"{new_folder.name} (1)"
session.add(new_folder)
session.commit()
@ -203,16 +215,9 @@ async def upload_file(
if not data:
raise HTTPException(status_code=400, detail="No flows found in the file")
folder_results = session.exec(
select(Folder).where(
Folder.name == data["folder_name"],
Folder.user_id == current_user.id,
)
)
existing_folder_names = [folder.name for folder in folder_results]
folder_name = generate_unique_folder_name(data["folder_name"], current_user.id, session)
if existing_folder_names:
data["folder_name"] = f"{data['folder_name']} ({len(existing_folder_names) + 1})"
data["folder_name"] = folder_name
folder = FolderCreate(name=data["folder_name"], description=data["folder_description"])
@ -232,6 +237,8 @@ async def upload_file(
raise HTTPException(status_code=400, detail="No flows found in the data")
# Now we set the user_id for all flows
for flow in flow_list.flows:
flow_name = generate_unique_flow_name(flow.name, current_user.id, session)
flow.name = flow_name
flow.user_id = current_user.id
flow.folder_id = new_folder.id

View file

@ -1,4 +1,5 @@
from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from langflow.services.deps import get_monitor_service
@ -79,7 +80,7 @@ async def delete_messages(
@router.post("/messages/{message_id}", response_model=MessageModelResponse)
async def update_message(
message_id: str,
message_id: int,
message: MessageModelRequest,
monitor_service: MonitorService = Depends(get_monitor_service),
):
@ -117,6 +118,22 @@ async def get_transactions(
dicts = monitor_service.get_transactions(
source=source, target=target, status=status, order_by=order_by, flow_id=flow_id
)
return [TransactionModelResponse(**d) for d in dicts]
result = []
for d in dicts:
d = TransactionModelResponse(
index=d["index"],
timestamp=d["timestamp"],
vertex_id=d["vertex_id"],
inputs=d["inputs"],
outputs=d["outputs"],
status=d["status"],
error=d["error"],
flow_id=d["flow_id"],
source=d["vertex_id"],
target=d["target_id"],
)
result.append(d)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
raise HTTPException(status_code=500, detail=str(e))

View file

@ -9,7 +9,7 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator, model_serial
from langflow.graph.schema import RunOutputs
from langflow.schema import dotdict
from langflow.schema.graph import Tweaks
from langflow.schema.schema import InputType, OutputType
from langflow.schema.schema import InputType, Log, OutputType
from langflow.services.database.models.api_key.model import ApiKeyRead
from langflow.services.database.models.base import orjson_dumps
from langflow.services.database.models.flow import FlowCreate, FlowRead
@ -245,6 +245,8 @@ class VerticesOrderResponse(BaseModel):
class ResultDataResponse(BaseModel):
results: Optional[Any] = Field(default_factory=dict)
logs: List[Log | None] = Field(default_factory=list)
message: Optional[Any] = Field(default_factory=dict)
artifacts: Optional[Any] = Field(default_factory=dict)
timedelta: Optional[float] = None
duration: Optional[str] = None

View file

@ -7,7 +7,7 @@ from langchain_core.runnables import Runnable
from langflow.base.agents.utils import get_agents_list, records_to_messages
from langflow.custom import CustomComponent
from langflow.field_typing import Text, Tool
from langflow.schema.schema import Record
from langflow.schema import Record
class LCAgentComponent(CustomComponent):

View file

@ -13,7 +13,7 @@ from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langflow.schema.schema import Record
from langflow.schema import Record
from .default_prompts import XML_AGENT_PROMPT

View file

@ -7,9 +7,11 @@ Constants:
- FIELD_FORMAT_ATTRIBUTES: A list of attributes used for formatting fields.
"""
import orjson
STREAM_INFO_TEXT = "Stream the response from the model. Streaming works only in Chat."
NODE_FORMAT_ATTRIBUTES = ["beta", "icon", "display_name", "description"]
NODE_FORMAT_ATTRIBUTES = ["beta", "icon", "display_name", "description", "output_types"]
FIELD_FORMAT_ATTRIBUTES = [
@ -27,3 +29,5 @@ FIELD_FORMAT_ATTRIBUTES = [
"refresh_button_text",
"options",
]
ORJSON_OPTIONS = orjson.OPT_INDENT_2 | orjson.OPT_SORT_KEYS | orjson.OPT_OMIT_MICROSECONDS

View file

@ -1,13 +1,14 @@
import json
import unicodedata
import xml.etree.ElementTree as ET
from concurrent import futures
from pathlib import Path
from typing import Callable, List, Optional, Text
import chardet
import orjson
import yaml
from langflow.schema.schema import Record
from langflow.schema import Record
# Types of files that can be read simply by file.read()
# and have 100% to be completely readable
@ -32,6 +33,17 @@ TEXT_FILE_TYPES = [
"tsx",
]
IMG_FILE_TYPES = [
"jpg",
"jpeg",
"png",
"bmp",
]
def normalize_text(text):
return unicodedata.normalize("NFKD", text)
def is_hidden(path: Path) -> bool:
return path.name.startswith(".")
@ -125,9 +137,15 @@ def parse_text_file_to_record(file_path: str, silent_errors: bool) -> Optional[R
text = read_docx_file(file_path)
else:
text = read_text_file(file_path)
# if file is json, yaml, or xml, we can parse it
if file_path.endswith(".json"):
text = json.loads(text)
text = orjson.loads(text)
if isinstance(text, dict):
text = {k: normalize_text(v) if isinstance(v, str) else v for k, v in text.items()}
elif isinstance(text, list):
text = [normalize_text(item) if isinstance(item, str) else item for item in text]
elif file_path.endswith(".yaml") or file_path.endswith(".yml"):
text = yaml.safe_load(text)
elif file_path.endswith(".xml"):

View file

@ -1,7 +1,7 @@
from typing import List
from langflow.graph.schema import ResultData, RunOutputs
from langflow.schema.schema import Record
from langflow.schema import Record
def build_records_from_run_outputs(run_outputs: RunOutputs) -> List[Record]:

View file

@ -1,10 +1,10 @@
from typing import Optional, Union
from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES
from langflow.custom import CustomComponent
from langflow.field_typing import Text
from langflow.helpers.record import records_to_text
from langflow.memory import store_message
from langflow.schema import Record
from langflow.schema.message import Message
class ChatComponent(CustomComponent):
@ -15,7 +15,7 @@ class ChatComponent(CustomComponent):
return {
"input_value": {
"input_types": ["Text"],
"display_name": "Message",
"display_name": "Text",
"multiline": True,
},
"sender": {
@ -23,7 +23,7 @@ class ChatComponent(CustomComponent):
"display_name": "Sender Type",
"advanced": True,
},
"sender_name": {"display_name": "Sender Name"},
"sender_name": {"display_name": "Sender Name", "advanced": True},
"session_id": {
"display_name": "Session ID",
"info": "If provided, the message will be stored in the memory.",
@ -40,98 +40,45 @@ class ChatComponent(CustomComponent):
"info": "In case of Message being a Record, this template will be used to convert it to text.",
"advanced": True,
},
"files": {
"field_type": "file",
"display_name": "Files",
"file_types": TEXT_FILE_TYPES + IMG_FILE_TYPES,
"info": "Files to be sent with the message.",
"advanced": True,
},
}
def store_message(
self,
message: Union[str, Text, Record],
session_id: Optional[str] = None,
sender: Optional[str] = None,
sender_name: Optional[str] = None,
) -> list[Record]:
records = store_message(
message: Message,
) -> list[Message]:
messages = store_message(
message,
session_id=session_id,
sender=sender,
sender_name=sender_name,
flow_id=self.graph.flow_id,
)
self.status = records
return records
self.status = messages
return messages
def build_with_record(
self,
sender: Optional[str] = "User",
sender_name: Optional[str] = "User",
input_value: Optional[Union[str, Record]] = None,
input_value: Optional[Union[str, Record, Message]] = None,
files: Optional[list[str]] = None,
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
record_template: str = "Text: {text}\nData: {data}",
) -> Union[Text, Record]:
input_value_record: Optional[Record] = None
if return_record:
if isinstance(input_value, Record):
# Update the data of the record
input_value.data["sender"] = sender
input_value.data["sender_name"] = sender_name
input_value.data["session_id"] = session_id
else:
input_value_record = Record(
text=input_value,
data={
"sender": sender,
"sender_name": sender_name,
"session_id": session_id,
},
)
elif isinstance(input_value, Record):
input_value = records_to_text(template=record_template, records=input_value)
if not input_value:
input_value = ""
if return_record and input_value_record:
result: Union[Text, Record] = input_value_record
else:
result = input_value
self.status = result
if session_id and isinstance(result, (Record, str)):
self.store_message(result, session_id, sender, sender_name)
return result
) -> Message:
message: Message | None = None
def build_no_record(
self,
sender: Optional[str] = "User",
sender_name: Optional[str] = "User",
input_value: Optional[str] = None,
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
record_template: str = "Text: {text}\nData: {data}",
) -> Union[Text, Record]:
input_value_record: Optional[Record] = None
if return_record:
if isinstance(input_value, Record):
# Update the data of the record
input_value.data["sender"] = sender
input_value.data["sender_name"] = sender_name
input_value.data["session_id"] = session_id
else:
input_value_record = Record(
text=input_value,
data={
"sender": sender,
"sender_name": sender_name,
"session_id": session_id,
},
)
elif isinstance(input_value, Record):
input_value = records_to_text(template=record_template, records=input_value)
if not input_value:
input_value = ""
if return_record and input_value_record:
result: Union[Text, Record] = input_value_record
if isinstance(input_value, Record):
# Update the data of the record
message = Message.from_record(input_value)
else:
result = input_value
self.status = result
if session_id and isinstance(result, (Record, str)):
self.store_message(result, session_id, sender, sender_name)
return result
message = Message(
text=input_value, sender=sender, sender_name=sender_name, files=files, session_id=session_id
)
self.status = message
if session_id and isinstance(message, Message) and isinstance(message.text, str):
self.store_message(message)
return message

View file

@ -3,7 +3,7 @@ from typing import Optional
from langflow.custom import CustomComponent
from langflow.field_typing import Text
from langflow.helpers.record import records_to_text
from langflow.schema.schema import Record
from langflow.schema import Record
class TextComponent(CustomComponent):

View file

@ -1,7 +1,7 @@
from typing import Optional
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
from langflow.schema import Record
class BaseMemoryComponent(CustomComponent):

View file

@ -1,3 +1,4 @@
import warnings
from typing import Optional, Union
from langchain_core.language_models.chat_models import BaseChatModel
@ -5,6 +6,7 @@ from langchain_core.language_models.llms import LLM
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langflow.custom import CustomComponent
from langflow.field_typing.prompt import Prompt
class LCModelComponent(CustomComponent):
@ -53,19 +55,28 @@ class LCModelComponent(CustomComponent):
key in response_metadata["token_usage"] for key in inner_openai_keys
):
token_usage = response_metadata["token_usage"]
completion_tokens = token_usage["completion_tokens"]
prompt_tokens = token_usage["prompt_tokens"]
total_tokens = token_usage["total_tokens"]
finish_reason = response_metadata["finish_reason"]
status_message = f"Tokens:\nInput: {prompt_tokens}\nOutput: {completion_tokens}\nTotal Tokens: {total_tokens}\nStop Reason: {finish_reason}\nResponse: {content}"
status_message = {
"tokens": {
"input": token_usage["prompt_tokens"],
"output": token_usage["completion_tokens"],
"total": token_usage["total_tokens"],
"stop_reason": response_metadata["finish_reason"],
"response": content,
}
}
elif all(key in response_metadata for key in anthropic_keys) and all(
key in response_metadata["usage"] for key in inner_anthropic_keys
):
usage = response_metadata["usage"]
input_tokens = usage["input_tokens"]
output_tokens = usage["output_tokens"]
stop_reason = response_metadata["stop_reason"]
status_message = f"Tokens:\nInput: {input_tokens}\nOutput: {output_tokens}\nStop Reason: {stop_reason}\nResponse: {content}"
status_message = {
"tokens": {
"input": usage["input_tokens"],
"output": usage["output_tokens"],
"stop_reason": response_metadata["stop_reason"],
"response": content,
}
}
else:
status_message = f"Response: {content}"
else:
@ -73,7 +84,7 @@ class LCModelComponent(CustomComponent):
return status_message
def get_chat_result(
self, runnable: BaseChatModel, stream: bool, input_value: str, system_message: Optional[str] = None
self, runnable: BaseChatModel, stream: bool, input_value: str | Prompt, system_message: Optional[str] = None
):
messages: list[Union[HumanMessage, SystemMessage]] = []
if not input_value and not system_message:
@ -81,11 +92,21 @@ class LCModelComponent(CustomComponent):
if system_message:
messages.append(SystemMessage(content=system_message))
if input_value:
messages.append(HumanMessage(content=input_value))
if isinstance(input_value, Prompt):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if "prompt" in input_value:
prompt = input_value.load_lc_prompt()
runnable = prompt | runnable
else:
messages.append(input_value.to_lc_message())
else:
messages.append(HumanMessage(content=input_value))
inputs = messages or {}
if stream:
return runnable.stream(messages)
return runnable.stream(inputs)
else:
message = runnable.invoke(messages)
message = runnable.invoke(inputs)
result = message.content
if isinstance(message, AIMessage):
status_message = self.build_status_message(message)

View file

@ -1,9 +1,9 @@
from copy import deepcopy
from langchain_core.documents import Document
from langflow.schema import Record
from langflow.schema.message import Message
def record_to_string(record: Record) -> str:
@ -35,10 +35,14 @@ def dict_values_to_string(d: dict) -> dict:
# it could be a list of records or documents or strings
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, Record):
if isinstance(item, Message):
d_copy[key][i] = item.text
elif isinstance(item, Record):
d_copy[key][i] = record_to_string(item)
elif isinstance(item, Document):
d_copy[key][i] = document_to_string(item)
elif isinstance(value, Message):
d_copy[key] = value.text
elif isinstance(value, Record):
d_copy[key] = record_to_string(value)
elif isinstance(value, Document):

View file

@ -0,0 +1,24 @@
from langflow.schema import Record
def chroma_collection_to_records(collection_dict: dict):
"""
Converts a collection of chroma vectors into a list of records.
Args:
collection_dict (dict): A dictionary containing the collection of chroma vectors.
Returns:
list: A list of records, where each record represents a document in the collection.
"""
records = []
for i, doc in enumerate(collection_dict["documents"]):
record_dict = {
"id": collection_dict["ids"][i],
"text": doc,
}
if "metadatas" in collection_dict:
for key, value in collection_dict["metadatas"][i].items():
record_dict[key] = value
records.append(Record(**record_dict))
return records

View file

@ -5,7 +5,7 @@ from langchain_core.prompts import ChatPromptTemplate
from langflow.base.agents.agent import LCAgentComponent
from langflow.field_typing import BaseLanguageModel, Text, Tool
from langflow.schema.schema import Record
from langflow.schema import Record
class ToolCallingAgentComponent(LCAgentComponent):

View file

@ -3,10 +3,9 @@ from typing import List, Optional
from langchain.agents import create_xml_agent
from langchain_core.prompts import ChatPromptTemplate
from langflow.base.agents.agent import LCAgentComponent
from langflow.field_typing import BaseLanguageModel, Text, Tool
from langflow.schema.schema import Record
from langflow.schema import Record
class XMLAgentComponent(LCAgentComponent):

View file

@ -5,7 +5,7 @@ from langchain_core.documents import Document
from langflow.custom import CustomComponent
from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever, Text
from langflow.schema.schema import Record
from langflow.schema import Record
class RetrievalQAComponent(CustomComponent):

View file

@ -20,7 +20,7 @@ class URLComponent(CustomComponent):
self,
urls: list[str],
) -> list[Record]:
loader = WebBaseLoader(web_paths=urls)
loader = WebBaseLoader(web_paths=[url for url in urls if url])
docs = loader.load()
records = self.to_records(docs)
self.status = records

View file

@ -3,8 +3,8 @@ import uuid
from typing import Any, Optional
from langflow.custom import CustomComponent
from langflow.schema import Record
from langflow.schema.dotdict import dotdict
from langflow.schema.schema import Record
class WebhookComponent(CustomComponent):

View file

@ -6,8 +6,8 @@ from langchain_core.prompts.chat import HumanMessagePromptTemplate, SystemMessag
from langflow.base.agents.agent import LCAgentComponent
from langflow.base.agents.utils import AGENTS, AgentSpec, get_agents_list
from langflow.field_typing import BaseLanguageModel, Text, Tool
from langflow.schema import Record
from langflow.schema.dotdict import dotdict
from langflow.schema.schema import Record
class AgentComponent(LCAgentComponent):

View file

@ -0,0 +1,15 @@
from langflow.custom import CustomComponent
from langflow.schema import Record
from langflow.field_typing import Embeddings
class EmbedComponent(CustomComponent):
display_name = "Embed Texts"
def build_config(self):
return {"texts": {"display_name": "Texts"}, "embbedings": {"display_name": "Embeddings"}}
def build(self, texts: list[str], embbedings: Embeddings) -> Embeddings:
vectors = Record(vector=embbedings.embed_documents(texts))
self.status = vectors
return vectors

View file

@ -7,8 +7,8 @@ from langflow.custom import CustomComponent
from langflow.field_typing import Tool
from langflow.graph.graph.base import Graph
from langflow.helpers.flow import get_flow_inputs
from langflow.schema import Record
from langflow.schema.dotdict import dotdict
from langflow.schema.schema import Record
class FlowToolComponent(CustomComponent):

View file

@ -0,0 +1,38 @@
from typing import Optional
from langflow.custom import CustomComponent
from langflow.schema.message import Message
class MessageComponent(CustomComponent):
display_name = "Message"
description = "Creates a Message object given a Session ID."
def build_config(self):
return {
"sender": {
"options": ["Machine", "User"],
"display_name": "Sender Type",
},
"sender_name": {"display_name": "Sender Name"},
"text": {"display_name": "Text"},
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
"input_types": ["Text"],
},
}
def build(
self,
sender: str = "User",
sender_name: Optional[str] = None,
session_id: Optional[str] = None,
text: str = "",
) -> Message:
message = Message(
text=text, sender=sender, sender_name=sender_name, flow_id=self.graph.flow_id, session_id=session_id
)
self.status = message
return message

View file

@ -1,43 +1,22 @@
from typing import List, Optional
from langflow.custom import CustomComponent
from langflow.memory import get_messages, store_message
from langflow.schema import Record
from langflow.schema.message import Message
class StoreMessageComponent(CustomComponent):
display_name = "Store Message"
description = "Stores a chat message given a Session ID."
beta: bool = True
description = "Stores a chat message."
def build_config(self):
return {
"sender": {
"options": ["Machine", "User"],
"display_name": "Sender Type",
},
"sender_name": {"display_name": "Sender Name"},
"message": {"display_name": "Message"},
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
"input_types": ["Text"],
},
}
def build(
self,
sender: str = "User",
sender_name: Optional[str] = None,
session_id: Optional[str] = None,
message: str = "",
) -> List[Record]:
store_message(
sender=sender,
sender_name=sender_name,
session_id=session_id,
message=message,
)
message: Message,
) -> Message:
store_message(message, flow_id=self.graph.flow_id)
self.status = get_messages()
self.status = get_messages(session_id=session_id)
return get_messages(session_id=session_id)
return message

View file

@ -2,9 +2,9 @@ from typing import Optional
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.field_typing import Text
from langflow.helpers.record import records_to_text
from langflow.helpers.record import messages_to_text
from langflow.memory import get_messages
from langflow.schema.schema import Record
from langflow.schema.message import Message
class MemoryComponent(BaseMemoryComponent):
@ -43,7 +43,7 @@ class MemoryComponent(BaseMemoryComponent):
},
}
def get_messages(self, **kwargs) -> list[Record]:
def get_messages(self, **kwargs) -> list[Message]:
# Validate kwargs by checking if it contains the correct keys
if "sender" not in kwargs:
kwargs["sender"] = None
@ -77,6 +77,6 @@ class MemoryComponent(BaseMemoryComponent):
limit=n_messages,
order=order,
)
messages_str = records_to_text(template=record_template or "", records=messages)
messages_str = messages_to_text(template=record_template or "", messages=messages)
self.status = messages_str
return messages_str

View file

@ -6,25 +6,27 @@ from langflow.schema import Record
class MessageHistoryComponent(CustomComponent):
display_name = "Message History"
description = "Retrieves stored chat messages given a specific Session ID."
beta: bool = True
display_name = "Memory"
description = "Retrieves stored chat messages."
def build_config(self):
return {
"sender": {
"options": ["Machine", "User", "Machine and User"],
"display_name": "Sender Type",
"advanced": True,
},
"sender_name": {"display_name": "Sender Name", "advanced": True},
"n_messages": {
"display_name": "Number of Messages",
"info": "Number of messages to retrieve.",
"advanced": True,
},
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
"input_types": ["Text"],
"advanced": True,
},
"order": {
"options": ["Ascending", "Descending"],
@ -39,7 +41,7 @@ class MessageHistoryComponent(CustomComponent):
sender: Optional[str] = "Machine and User",
sender_name: Optional[str] = None,
session_id: Optional[str] = None,
n_messages: int = 5,
n_messages: int = 100,
order: Optional[str] = "Descending",
) -> List[Record]:
order = "DESC" if order == "Descending" else "ASC"

View file

@ -17,6 +17,7 @@ class RecordsToTextComponent(CustomComponent):
"template": {
"display_name": "Template",
"info": "The template to use for formatting the records. It can contain the keys {text}, {data} or any other key in the Record.",
"multiline": True,
},
}

View file

@ -1,8 +1,7 @@
from typing import Optional, Union
from typing import Optional
from langflow.base.io.chat import ChatComponent
from langflow.field_typing import Text
from langflow.schema import Record
from langflow.schema.message import Message
class ChatInput(ChatComponent):
@ -14,7 +13,7 @@ class ChatInput(ChatComponent):
build_config = super().build_config()
build_config["input_value"] = {
"input_types": [],
"display_name": "Message",
"display_name": "Text",
"multiline": True,
}
@ -25,13 +24,13 @@ class ChatInput(ChatComponent):
sender: Optional[str] = "User",
sender_name: Optional[str] = "User",
input_value: Optional[str] = None,
files: Optional[list[str]] = None,
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
) -> Union[Text, Record]:
return super().build_no_record(
) -> Message:
return super().build_with_record(
sender=sender,
sender_name=sender_name,
input_value=input_value,
files=files,
session_id=session_id,
return_record=return_record,
)

View file

@ -1,7 +1,6 @@
from langchain_core.prompts import PromptTemplate
from langflow.custom import CustomComponent
from langflow.field_typing import Prompt, TemplateField, Text
from langflow.field_typing import TemplateField
from langflow.field_typing.prompt import Prompt
class PromptComponent(CustomComponent):
@ -15,19 +14,11 @@ class PromptComponent(CustomComponent):
"code": TemplateField(advanced=True),
}
def build(
async def build(
self,
template: Prompt,
**kwargs,
) -> Text:
from langflow.base.prompts.utils import dict_values_to_string
prompt_template = PromptTemplate.from_template(Text(template))
kwargs = dict_values_to_string(kwargs)
kwargs = {k: "\n".join(v) if isinstance(v, list) else v for k, v in kwargs.items()}
try:
formated_prompt = prompt_template.format(**kwargs)
except Exception as exc:
raise ValueError(f"Error formatting prompt: {exc}") from exc
self.status = f'Prompt:\n"{formated_prompt}"'
return formated_prompt
) -> Prompt:
prompt = await Prompt.from_template_and_variables(template, kwargs)
self.status = prompt.format_text()
return prompt

View file

@ -12,7 +12,7 @@ class TextInput(TextComponent):
def build_config(self):
return {
"input_value": {
"display_name": "Value",
"display_name": "Text",
"input_types": ["Record", "Text"],
"info": "Text or Record to be passed as input.",
},

View file

@ -3,7 +3,7 @@ from typing import Optional
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
from langflow.schema import Record
from langflow.services.database.models.base import orjson_dumps

View file

@ -1,8 +1,7 @@
from typing import Optional, cast
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.schema.schema import Record
from langflow.schema.record import Record
class AstraDBMessageReaderComponent(BaseMemoryComponent):

View file

@ -1,10 +1,10 @@
from typing import Optional
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.schema.schema import Record
from langchain_core.messages import BaseMessage
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.schema.record import Record
class AstraDBMessageWriterComponent(BaseMemoryComponent):
display_name = "Astra DB Message Writer"

View file

@ -3,7 +3,7 @@ from typing import Optional, cast
from langchain_community.chat_message_histories import CassandraChatMessageHistory
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.schema.schema import Record
from langflow.schema.record import Record
class CassandraMessageReaderComponent(BaseMemoryComponent):

View file

@ -1,10 +1,10 @@
from typing import Optional
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.schema.schema import Record
from langchain_core.messages import BaseMessage
from langchain_community.chat_message_histories import CassandraChatMessageHistory
from langchain_core.messages import BaseMessage
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.schema.record import Record
class CassandraMessageWriterComponent(BaseMemoryComponent):

View file

@ -4,7 +4,7 @@ from langchain_community.chat_message_histories.zep import SearchScope, SearchTy
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.field_typing import Text
from langflow.schema.schema import Record
from langflow.schema import Record
class ZepMessageReaderComponent(BaseMemoryComponent):

View file

@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Optional
from langflow.base.memory.memory import BaseMemoryComponent
from langflow.field_typing import Text
from langflow.schema.schema import Record
from langflow.schema import Record
if TYPE_CHECKING:
from zep_python.langchain import ZepChatMessageHistory

View file

@ -58,7 +58,7 @@ class AmazonBedrockComponent(LCModelComponent):
"advanced": True,
},
"cache": {"display_name": "Cache"},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",

View file

@ -63,7 +63,7 @@ class AnthropicLLM(LCModelComponent):
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"stream": {
"display_name": "Stream",
"advanced": True,

View file

@ -78,7 +78,7 @@ class AzureChatOpenAIComponent(LCModelComponent):
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -81,7 +81,7 @@ class QianfanChatEndpointComponent(LCModelComponent):
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -111,7 +111,7 @@ class ChatLiteLLMModelComponent(LCModelComponent):
"required": False,
"default": False,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -1,10 +1,11 @@
from typing import Optional
from langchain_cohere import ChatCohere
from pydantic.v1 import SecretStr
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langchain_cohere import ChatCohere
from langflow.field_typing import Text
class CohereComponent(LCModelComponent):
@ -42,7 +43,7 @@ class CohereComponent(LCModelComponent):
"type": "float",
"show": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
@ -69,3 +70,4 @@ class CohereComponent(LCModelComponent):
temperature=temperature,
)
return self.get_chat_result(output, stream, input_value, system_message)
return self.get_chat_result(output, stream, input_value, system_message)

View file

@ -2,9 +2,10 @@ from typing import Optional
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class HuggingFaceEndpointsComponent(LCModelComponent):
@ -36,7 +37,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
"advanced": True,
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
@ -72,3 +73,4 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
output = ChatHuggingFace(llm=llm)
return self.get_chat_result(output, stream, input_value, system_message)
return self.get_chat_result(output, stream, input_value, system_message)

View file

@ -27,7 +27,7 @@ class MistralAIModelComponent(LCModelComponent):
def build_config(self):
return {
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,

View file

@ -194,7 +194,7 @@ class ChatOllamaComponent(LCModelComponent):
"info": "Template to use for generating text.",
"advanced": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -28,7 +28,7 @@ class OpenAIModelComponent(LCModelComponent):
def build_config(self):
return {
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
@ -79,7 +79,7 @@ class OpenAIModelComponent(LCModelComponent):
input_value: Text,
openai_api_key: str,
temperature: float = 0.1,
model_name: str = "gpt-4o",
model_name: str = "gpt-3.5-turbo",
max_tokens: Optional[int] = 256,
model_kwargs: NestedDict = {},
openai_api_base: Optional[str] = None,

View file

@ -1,6 +1,5 @@
from typing import Optional
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
@ -74,7 +73,7 @@ class ChatVertexAIComponent(LCModelComponent):
"value": False,
"advanced": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -1,8 +1,7 @@
from typing import Optional, Union
from typing import Optional
from langflow.base.io.chat import ChatComponent
from langflow.field_typing import Text
from langflow.schema import Record
from langflow.schema.message import Message
class ChatOutput(ChatComponent):
@ -16,14 +15,12 @@ class ChatOutput(ChatComponent):
sender_name: Optional[str] = "AI",
input_value: Optional[str] = None,
session_id: Optional[str] = None,
return_record: Optional[bool] = False,
record_template: Optional[str] = "{text}",
) -> Union[Text, Record]:
files: Optional[list[str]] = None,
) -> Message:
return super().build_with_record(
sender=sender,
sender_name=sender_name,
input_value=input_value,
session_id=session_id,
return_record=return_record,
record_template=record_template or "",
files=files,
)

View file

@ -2,9 +2,19 @@ from langflow.custom import CustomComponent
from langflow.schema import Record
class RecordsOutput(CustomComponent):
class RecordOutput(CustomComponent):
display_name = "Records Output"
description = "Display Records as a Table"
def build_config(self):
return {
"input_value": {
"display_name": "Records",
"input_types": ["Record"],
"info": "Record or Record list to be passed as input.",
},
}
def build(self, input_value: Record) -> Record:
self.status = input_value
return input_value

View file

@ -12,7 +12,7 @@ class TextOutput(TextComponent):
def build_config(self):
return {
"input_value": {
"display_name": "Value",
"display_name": "Text",
"input_types": ["Record", "Text"],
"info": "Text or Record to be passed as output.",
},

View file

@ -0,0 +1,24 @@
from langflow.custom import CustomComponent
from langflow.field_typing import TemplateField
from langflow.field_typing.prompt import Prompt
class PromptComponent(CustomComponent):
display_name: str = "Empty Prompt"
description: str = "Create a prompt template with dynamic variables."
icon = "prompts"
def build_config(self):
return {
"template": TemplateField(display_name="Template"),
"code": TemplateField(advanced=True),
}
async def build(
self,
template: Prompt,
**kwargs,
) -> Prompt:
prompt = await Prompt.from_template_and_variables(template, kwargs) # type: ignore
self.status = prompt.format_text()
return prompt

View file

@ -0,0 +1,68 @@
# from langflow.field_typing import Data
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_core.vectorstores import VectorStore
from langflow.custom import CustomComponent
from langflow.field_typing import BaseLanguageModel, Text
from langflow.schema import Record
from langflow.schema.message import Message
class SelfQueryRetrieverComponent(CustomComponent):
display_name: str = "Self Query Retriever"
description: str = "Retriever that uses a vector store and an LLM to generate the vector store queries."
icon = "LangChain"
def build_config(self):
return {
"query": {
"display_name": "Query",
"input_types": ["Message", "Text"],
"info": "Query to be passed as input.",
},
"vectorstore": {
"display_name": "Vector Store",
"info": "Vector Store to be passed as input.",
},
"attribute_infos": {
"display_name": "Metadata Field Info",
"info": "Metadata Field Info to be passed as input.",
},
"document_content_description": {
"display_name": "Document Content Description",
"info": "Document Content Description to be passed as input.",
},
"llm": {
"display_name": "LLM",
"info": "LLM to be passed as input.",
},
}
def build(
self,
query: Message,
vectorstore: VectorStore,
attribute_infos: list[Record],
document_content_description: Text,
llm: BaseLanguageModel,
) -> Record:
metadata_field_infos = [AttributeInfo(**record.data) for record in attribute_infos]
self_query_retriever = SelfQueryRetriever.from_llm(
llm=llm,
vectorstore=vectorstore,
document_contents=document_content_description,
metadata_field_info=metadata_field_infos,
enable_limit=True,
)
if isinstance(query, Message):
input_text = query.text
elif isinstance(query, str):
input_text = query
else:
raise ValueError(f"Query type {type(query)} not supported.")
documents = self_query_retriever.invoke(input=input_text)
records = [Record.from_document(document) for document in documents]
self.status = records
return records

View file

@ -3,7 +3,7 @@ from typing import List
from langchain_text_splitters import CharacterTextSplitter
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
from langflow.schema import Record
from langflow.utils.util import unescape_string

View file

@ -3,7 +3,7 @@ from typing import List, Optional
from langchain_text_splitters import Language, RecursiveCharacterTextSplitter
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
from langflow.schema import Record
class LanguageRecursiveTextSplitterComponent(CustomComponent):

View file

@ -91,4 +91,4 @@ class PythonCodeStructuredTool(CustomComponent):
tool = StructuredTool.from_function(
func=func, args_schema=_class, name=name, description=description, return_direct=return_direct
)
return tool
return tool # type: ignore

View file

@ -3,7 +3,7 @@ from typing import Optional
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
from langflow.schema import Record
from langflow.services.database.models.base import orjson_dumps

View file

@ -1,10 +1,11 @@
from typing import List, Optional
from langchain_core.embeddings import Embeddings
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.Redis import RedisComponent
from langflow.field_typing import Text
from langflow.schema import Record
from langchain_core.embeddings import Embeddings
class RedisSearchComponent(RedisComponent, LCVectorStoreComponent):

View file

@ -1,10 +1,11 @@
from typing import List, Optional
from langchain_core.embeddings import Embeddings
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.Weaviate import WeaviateVectorStoreComponent
from langflow.field_typing import Text
from langflow.schema import Record
from langchain_core.embeddings import Embeddings
class WeaviateSearchVectorStore(WeaviateVectorStoreComponent, LCVectorStoreComponent):

View file

@ -1,10 +1,11 @@
from typing import List
from langchain_core.embeddings import Embeddings
from langflow.components.vectorstores.base.model import LCVectorStoreComponent
from langflow.components.vectorstores.pgvector import PGVectorComponent
from langflow.field_typing import Text
from langflow.schema import Record
from langchain_core.embeddings import Embeddings
class PGVectorSearchComponent(PGVectorComponent, LCVectorStoreComponent):

View file

@ -163,3 +163,4 @@ class AstraDBVectorStoreComponent(CustomComponent):
)
return vector_store
return vector_store

View file

@ -1,3 +1,4 @@
from copy import deepcopy
from typing import List, Optional, Union
import chromadb
@ -7,8 +8,9 @@ from langchain_core.embeddings import Embeddings
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from langflow.base.vectorstores.utils import chroma_collection_to_records
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
from langflow.schema import Record
class ChromaComponent(CustomComponent):
@ -48,6 +50,11 @@ class ChromaComponent(CustomComponent):
"display_name": "Server SSL Enabled",
"advanced": True,
},
"allow_duplicates": {
"display_name": "Allow Duplicates",
"advanced": True,
"info": "If false, will not add documents that are already in the Vector Store.",
},
}
def build(
@ -61,6 +68,7 @@ class ChromaComponent(CustomComponent):
chroma_server_host: Optional[str] = None,
chroma_server_http_port: Optional[int] = None,
chroma_server_grpc_port: Optional[int] = None,
allow_duplicates: bool = False,
) -> Union[VectorStore, BaseRetriever]:
"""
Builds the Vector Store or BaseRetriever object.
@ -75,6 +83,7 @@ class ChromaComponent(CustomComponent):
- chroma_server_host (Optional[str]): The host for the Chroma server.
- chroma_server_http_port (Optional[int]): The HTTP port for the Chroma server.
- chroma_server_grpc_port (Optional[int]): The gRPC port for the Chroma server.
- allow_duplicates (bool): Whether to allow duplicates in the Vector Store.
Returns:
- Union[VectorStore, BaseRetriever]: The Vector Store or BaseRetriever object.
@ -93,32 +102,34 @@ class ChromaComponent(CustomComponent):
)
client = chromadb.HttpClient(settings=chroma_settings)
# If documents, then we need to create a Chroma instance using .from_documents
# Check index_directory and expand it if it is a relative path
if index_directory is not None:
index_directory = self.resolve_path(index_directory)
chroma = Chroma(
persist_directory=index_directory,
client=client,
embedding_function=embedding,
collection_name=collection_name,
)
if allow_duplicates:
stored_records = []
else:
stored_records = chroma_collection_to_records(chroma.get())
_stored_documents_without_id = []
for record in deepcopy(stored_records):
del record.id
_stored_documents_without_id.append(record)
documents = []
for _input in inputs or []:
if isinstance(_input, Record):
documents.append(_input.to_lc_document())
if _input not in _stored_documents_without_id:
documents.append(_input.to_lc_document())
else:
documents.append(_input)
if documents is not None and embedding is not None:
if len(documents) == 0:
raise ValueError("If documents are provided, there must be at least one document.")
chroma = Chroma.from_documents(
documents=documents, # type: ignore
persist_directory=index_directory,
collection_name=collection_name,
embedding=embedding,
client=client,
)
else:
chroma = Chroma(
persist_directory=index_directory,
client=client,
embedding_function=embedding,
)
raise ValueError("Inputs must be a Record objects.")
if documents and embedding is not None:
chroma.add_documents(documents)
self.status = stored_records
return chroma

View file

@ -6,7 +6,7 @@ from langchain_core.vectorstores import VectorStore
from langflow.custom import CustomComponent
from langflow.field_typing import Embeddings
from langflow.schema.schema import Record
from langflow.schema import Record
class FAISSComponent(CustomComponent):

View file

@ -4,7 +4,7 @@ from langchain_community.vectorstores.mongodb_atlas import MongoDBAtlasVectorSea
from langflow.custom import CustomComponent
from langflow.field_typing import Embeddings
from langflow.schema.schema import Record
from langflow.schema import Record
class MongoDBAtlasComponent(CustomComponent):

View file

@ -8,7 +8,7 @@ from langchain_pinecone.vectorstores import PineconeVectorStore
from langflow.custom import CustomComponent
from langflow.field_typing import Embeddings
from langflow.schema.schema import Record
from langflow.schema import Record
class PineconeComponent(CustomComponent):

View file

@ -6,7 +6,7 @@ from langchain_core.vectorstores import VectorStore
from langflow.custom import CustomComponent
from langflow.field_typing import Embeddings
from langflow.schema.schema import Record
from langflow.schema import Record
class QdrantComponent(CustomComponent):

View file

@ -6,7 +6,7 @@ from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from langflow.custom import CustomComponent
from langflow.schema.schema import Record
from langflow.schema import Record
class RedisComponent(CustomComponent):

Some files were not shown because too many files have changed in this diff Show more