Merge branch 'zustand/io/migration' into cz/new-tests

This commit is contained in:
cristhianzl 2024-04-03 16:23:24 -03:00
commit ebea3d9649
101 changed files with 5377 additions and 1784 deletions

1
.gitignore vendored
View file

@ -264,3 +264,4 @@ scratchpad*
chroma*/*
stuff/*
src/frontend/playwright-report/index.html
*.bak

17
.vscode/launch.json vendored
View file

@ -24,6 +24,23 @@
},
"envFile": "${workspaceFolder}/.env"
},
{
"name": "Debug CLI",
"type": "python",
"request": "launch",
"module": "langflow",
"args": [
"run",
"--path",
"${workspaceFolder}/src/backend/langflow/frontend"
],
"jinja": true,
"justMyCode": false,
"env": {
"LANGFLOW_LOG_LEVEL": "debug"
},
"envFile": "${workspaceFolder}/.env"
},
{
"name": "Python: Remote Attach",
"type": "python",

View file

@ -58,6 +58,9 @@ lint:
install_frontend:
cd src/frontend && npm install
install_frontendci:
cd src/frontend && npm ci
install_frontendc:
cd src/frontend && rm -rf node_modules package-lock.json && npm install
@ -127,27 +130,29 @@ frontendc:
make run_frontend
install_backend:
@echo 'Installing backend dependencies'
@echo 'Setting up the environment'
@make setup_env
@echo 'Installing backend dependencies'
@poetry install --extras deploy
backend:
make install_backend
@-kill -9 `lsof -t -i:7860`
ifeq ($(login),1)
@echo "Running backend without autologin";
poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio
ifdef login
@echo "Running backend autologin is $(login)";
LANGFLOW_AUTO_LOGIN=$(login) poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio
else
@echo "Running backend with autologin";
LANGFLOW_AUTO_LOGIN=True poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio
@echo "Running backend respecting the .env file";
poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio
endif
build_and_run:
@echo 'Removing dist folder'
@make setup_env
rm -rf dist
rm -rf src/backend/base/dist
make build
poetry run pip install dist/*.tar.gz && pip install src/backend/base/dist/*.tar.gz
poetry run pip install dist/*.tar.gz
poetry run langflow run
build_and_install:
@ -166,15 +171,22 @@ build:
make build_langflow_base
make build_langflow
build_langflow:
poetry build-rewrite-path-deps --version-pinning-strategy=semver
build_langflow_base:
make install_frontend
make install_frontendci
make build_frontend
cd src/backend/base && poetry build-rewrite-path-deps --version-pinning-strategy=semver
rm -rf src/backend/base/langflow/frontend
build_langflow_backup:
poetry lock && poetry build-rewrite-path-deps --version-pinning-strategy=semver
build_langflow:
cd ./scripts && python update_dependencies.py
poetry lock
poetry build-rewrite-path-deps --version-pinning-strategy=semver
mv pyproject.toml.bak pyproject.toml
mv poetry.lock.bak poetry.lock
dev:
make install_frontend
ifeq ($(build),1)
@ -193,10 +205,9 @@ lock_langflow:
lock:
# Run both in parallel
# cd src/backend/base && poetry lock
# poetry lock
@echo 'Locking dependencies'
@make -j2 lock_base lock_langflow
cd src/backend/base && poetry lock
poetry lock
publish_base:
make build_langflow_base
cd src/backend/base && poetry publish

View file

@ -15,11 +15,9 @@
[![GitHub fork](https://img.shields.io/github/forks/logspace-ai/langflow?style=social)](https://github.com/logspace-ai/langflow/fork)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langflow_ai.svg?style=social&label=Follow%20%40langflow_ai)](https://twitter.com/langflow_ai)
[![](https://dcbadge.vercel.app/api/server/EqksyE2EX9?compact=true&style=flat)](https://discord.com/invite/EqksyE2EX9)
[![HuggingFace Spaces](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-md.svg)](https://huggingface.co/spaces/Logspace/Langflow?duplicate=true)
[![HuggingFace Spaces](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-md.svg)](https://huggingface.co/spaces/Logspace/Langflow-Preview?duplicate=true)
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/logspace-ai/langflow)
The easiest way to create and customize your flow
<a href="https://github.com/logspace-ai/langflow">
<img width="100%" src="https://github.com/logspace-ai/langflow/blob/dev/docs/static/img/new_langflow_demo.gif"></a>
@ -64,7 +62,7 @@ langflow run # or langflow --help
### HuggingFace Spaces
You can also check it out on HuggingFace Spaces and run it in your browser for free! [Click here to duplicate the Space](https://huggingface.co/spaces/Logspace/Langflow?duplicate=true)
You can also check it out on HuggingFace Spaces and run it in your browser for free! [Click here to duplicate the Space](https://huggingface.co/spaces/Logspace/Langflow-Preview?duplicate=true)
# 🖥️ Command Line Interface (CLI)

View file

@ -62,9 +62,10 @@ RUN apt-get update \
WORKDIR /app
COPY pyproject.toml poetry.lock ./
COPY src ./src
COPY scripts ./scripts
COPY Makefile ./
COPY README.md ./
RUN curl -sSL https://install.python-poetry.org | python3 - && make build
RUN make build
# Final stage for the application
FROM python-base as final

View file

@ -0,0 +1,87 @@
import Admonition from '@theme/Admonition';
# Data
### API Request
This component makes HTTP requests to the specified URLs.
**Params**
- **URLs:** URLs to make requests to.
- **Method:** The HTTP method to use.
- **Headers:** The headers to send with the request.
- **Body:** The body to send with the request (for POST, PATCH, PUT).
- **Timeout:** The timeout to use for the request.
<Admonition type="tip" title="Tip">
<p>
Use this component to make HTTP requests to external APIs or services and retrieve data.
</p>
<p>
Ensure that you provide valid URLs and configure the method, headers, body, and timeout appropriately.
</p>
</Admonition>
---
### Directory
This component recursively loads files from a directory.
**Params**
- **Path:** The path to the directory.
- **Types:** File types to load. Leave empty to load all types.
- **Depth:** Depth to search for files.
- **Max Concurrency:** The maximum number of concurrent file loading operations.
- **Load Hidden:** If true, hidden files will be loaded.
- **Recursive:** If true, the search will be recursive.
- **Silent Errors:** If true, errors will not raise an exception.
- **Use Multithreading:** If true, use multithreading for loading files.
<Admonition type="tip" title="Tip">
<p>
Use this component to load files from a directory, such as text files, JSON files, etc.
</p>
<p>
Ensure that you provide the correct path to the directory and configure other parameters as needed.
</p>
</Admonition>
---
### File
This component loads a generic file.
**Params**
- **Path:** The path to the file.
- **Silent Errors:** If true, errors will not raise an exception.
<Admonition type="tip" title="Tip">
<p>
Use this component to load a generic file, such as a text file, JSON file, etc.
</p>
<p>
Ensure that you provide the correct path to the file and configure other parameters as needed.
</p>
</Admonition>
---
### URL
This component fetches content from one or more URLs.
**Params**
- **URLs:** The URLs from which content will be fetched.
<Admonition type="tip" title="Tip">
<p>
Ensure that you provide valid URLs and configure other parameters as needed.
</p>
</Admonition>

View file

@ -2,17 +2,57 @@ import Admonition from '@theme/Admonition';
# Helpers
### Custom Component
### Chat Memory
This component serves as a template for creating your own custom components.
This component retrieves stored chat messages given a specific Session ID.
**Params**
- **Display Name:** Parameter
- **Sender Type:** Choose the sender type from options like "Machine", "User", or "Machine and User".
- **Sender Name:** (Optional) The name of the sender.
- **Number of Messages:** Number of messages to retrieve.
- **Session ID:** The Session ID of the chat history.
- **Order:** Choose the order of the messages, either "Ascending" or "Descending".
- **Record Template:** (Optional) Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.
**Usage**
---
To use this component, provide the required parameter as specified.
### Combine Text
This component concatenates two text sources into a single text chunk using a specified delimiter.
**Params**
- **First Text:** The first text input to concatenate.
- **Second Text:** The second text input to concatenate.
- **Delimiter:** A string used to separate the two text inputs. Defaults to a whitespace.
---
### Create Record
This component dynamically creates a Record with a specified number of fields.
**Params**
- **Number of Fields:** Number of fields to be added to the record.
- **Text Key:** Key to be used as text.
---
### Custom Component
Use this component as a template to create your own custom component.
**Params**
- **Parameter:** Describe the purpose of this parameter.
<Admonition type="info" title="Info">
<p>
Customize the <code>build_config</code> and <code>build</code> methods according to your requirements.
</p>
</Admonition>
Learn more about [Custom Component](http://docs.langflow.org/components/custom).
@ -20,154 +60,68 @@ Learn more about [Custom Component](http://docs.langflow.org/components/custom).
### Documents to Records
This component converts documents to records.
Convert LangChain Documents into Records.
**Params**
**Parameters**
- **Documents:**
- **Display Name:** Documents
**Usage**
To use this component, provide a list of documents to be converted into records.
- **Documents:** Documents to be converted into Records.
---
### Unique ID Generator
### ID Generator
This component generates a unique ID.
Generates a unique ID.
**Params**
**Parameters**
- **Value:**
- **Display Name:** Value
- **Real Time Refresh:** True
**Usage**
To use this component, simply retrieve the generated unique ID from the provided value parameter.
- **Value:** Unique ID generated.
---
### Message History
This component is used to retrieve stored messages from the message history.
Retrieves stored chat messages given a specific Session ID.
**Params**
**Parameters**
- **Sender Type:**
- **Display Name:** Sender Type
- **Options:** Machine, User, Machine and User
- **Sender Name:**
- **Display Name:** Sender Name
- **Number of Messages:**
- **Display Name:** Number of Messages
- **Info:** Number of messages to retrieve.
- **Session ID:**
- **Display Name:** Session ID
- **Info:** Session ID of the chat history.
- **Input Types:** Text
**Usage**
To use this component, configure the parameters as needed to retrieve messages from the message history.
---
### Python Function
**Params**
- **Code:**
- **Display Name:** Code
- **Info:** The code for the function.
- **Show:** True
**Usage**
To use this component, provide the Python code for the function you want to define.
- **Sender Type:** Options for the sender type.
- **Sender Name:** Sender name.
- **Number of Messages:** Number of messages to retrieve.
- **Session ID:** Session ID of the chat history.
- **Order:** Order of the messages.
---
### Records to Text
This component converts records into a single piece of text using a template.
Convert Records into plain text following a specified template.
**Params**
**Parameters**
- **Records:**
- **Display Name:** Records
- **Info:** The records to convert to text.
- **Template:**
- **Display Name:** Template
- **Info:** The template to use for formatting the records. It can contain the keys `{text}`, `{data}` or any other key in the Record.
**Usage**
To use this component, provide the records you want to convert to text along with a template for formatting.
- **Records:** The records to convert to text.
- **Template:** The template to use for formatting the records. It can contain the keys `{text}`, `{data}` or any other key in the Record.
---
### SearchApi
### Split Text
This component provides access to the real-time search engine results API.
Split text into chunks of a specified length.
**Params**
**Parameters**
- **Engine:**
- **Display Name:** Engine
- **Info:** The search engine to use.
- **Parameters:**
- **Display Name:** Parameters
- **Info:** The parameters to send with the request.
- **API Key:**
- **Display Name:** API Key
- **Info:** The API key to use SearchApi.
- **Required:** True
- **Password:** True
Learn more about [SearchApi Documentation](https://www.searchapi.io/docs/google).
---
### Text to Record
This component enables the creation of a record from text data.
**Params**
- **Data:**
- **Display Name:** Data
- **Info:** The data to convert to a record.
- **Input Types:** Text
**Usage**
To use this component, provide the text data to convert into a record.
- **Texts:** Texts to split.
- **Separators:** The characters to split on. Defaults to [" "].
- **Max Chunk Size:** The maximum length (in number of characters) of each chunk.
- **Chunk Overlap:** The amount of character overlap between chunks.
- **Recursive:** Whether to split recursively.
---
### Update Record
This component updates a record with new data.
Update Record with text-based key/value pairs, similar to updating a Python dictionary.
**Params**
**Parameters**
- **Record:**
- **Display Name:** Record
- **Info:** The record to update.
- **New Data:**
- **Display Name:** New Data
- **Info:** The new data to update the record with.
- **Input Types:** Text
**Usage**
To use this component, provide the record to be updated along with the new data.
- **Record:** The record to update.
- **New Data:** The new data to update the record with.

View file

@ -22,6 +22,25 @@ This component is designed to get user input from the chat.
</p>
</Admonition>
---
### Prompt
Create a prompt template with dynamic variables.
**Parameters**
- **Template:** the template for the prompt.
<Admonition type="note" title="Note">
<p>
Prompt variables can be created with any chosen name inside curly brackets, e.g. `{variable_name}`
</p>
</Admonition>
---
### Text Input
This component is designed for simple text input, allowing users to pass textual data to subsequent components in the workflow. It's particularly useful for scenarios where a brief user input is required to initiate or influence the flow.

View file

@ -8,7 +8,7 @@ import Admonition from '@theme/Admonition';
</p>
</Admonition>
### AmazonBedrock
### Amazon Bedrock
This component facilitates the generation of text using the LLM (Large Language Model) model from Amazon Bedrock.
@ -53,7 +53,7 @@ This component facilitates the generation of text using the LLM (Large Language
---
### AnthropicLLM
### Anthropic
This component allows the generation of text using Anthropic Chat&Completion large language models.
@ -83,7 +83,7 @@ For detailed documentation and integration guides, please refer to the [Anthropi
---
### AzureChatOpenAI
### Azure OpenAI
This component allows the generation of text using the LLM (Large Language Model) model from Azure OpenAI.
@ -125,44 +125,6 @@ This component allows the generation of text using the LLM (Large Language Model
For detailed documentation and integration guides, please refer to the [Azure OpenAI Component Documentation](https://python.langchain.com/docs/integrations/llms/azure_openai).
---
### QianfanChatEndpoint
This component facilitates the generation of text using Baidu Qianfan chat models.
**Params**
- **Model Name:** Specifies the name of the Qianfan chat model to be used for text generation. Available options include:
- _`"ERNIE-Bot"`_
- _`"ERNIE-Bot-turbo"`_
- _`"BLOOMZ-7B"`_
- _`"Llama-2-7b-chat"`_
- _`"Llama-2-13b-chat"`_
- _`"Llama-2-70b-chat"`_
- _`"Qianfan-BLOOMZ-7B-compressed"`_
- _`"Qianfan-Chinese-Llama-2-7B"`_
- _`"ChatGLM2-6B-32K"`_
- _`"AquilaChat-7B"`_
- **Qianfan Ak:** Your Baidu Qianfan access key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
- **Qianfan Sk:** Your Baidu Qianfan secret key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
- **Top p (Optional):** Model parameter. Specifies the top-p value. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.8`_.
- **Temperature (Optional):** Model parameter. Specifies the sampling temperature. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.95`_.
- **Penalty Score (Optional):** Model parameter. Specifies the penalty score. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`1.0`_.
- **Endpoint (Optional):** Endpoint of the Qianfan LLM, required if custom model is used.
- **Input Value:** Specifies the input text for text generation.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** A system message to pass to the model.
---
### Cohere
@ -185,32 +147,6 @@ This component enables text generation using Cohere large language models.
---
### CTransformers
This component allows the generation of text using CTransformers large language models.
**Params**
- **Model:** Specifies the CTransformers model to be used for text generation.
- **Model File (Optional):** Path to the model file if using a custom model. Should be a _.bin_ file.
- **Model Type:** Specifies the type of the CTransformers model.
- **Config (Optional):** Additional configuration parameters for the model. It should be provided as a JSON object.
Defaults to:
`{"top_k":40,"top_p":0.95,"temperature":0.8,"repetition_penalty":1.1,"last_n_tokens":64,"seed":-1,"max_new_tokens":256,"stop":"","stream":"False","reset":"True","batch_size":8,"threads":-1,"context_length":-1,"gpu_layers":0}`.
- **Input Value:** Specifies the input text for text generation.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** A system message to pass to the model.
---
### Google Generative AI
This component enables text generation using Google Generative AI.
@ -261,151 +197,42 @@ This component facilitates text generation using LLM models from the Hugging Fac
---
### LlamaCpp
### Ollama
The `LlamaCpp` is a component for generating text using the llama.cpp model.
Generate text using Ollama Local LLMs.
**Params**
**Parameters**
- **Model Path:** The path to the llama.cpp model file. This should be provided as a file type input.
- **Input Value:** The input text for text generation.
- **Grammar (Optional):** The grammar for text generation.
- **Cache (Optional):** Specifies whether to cache the generated text.
- **Client (Optional):** The client to use for text generation.
- **Echo (Optional):** Specifies whether to echo the generated text. Defaults to _`False`_.
- **F16 KV:** Specifies whether to use F16 key-value pairs. Defaults to _`True`_.
- **Grammar Path (Optional):** The path to the grammar file.
- **Last N Tokens Size (Optional):** The size of the last N tokens. Defaults to _`64`_.
- **Logits All:** Specifies whether to include logits for all tokens. Defaults to _`False`_.
- **Logprobs (Optional):** The log probabilities for text generation.
- **Lora Base (Optional):** The base URL for Lora.
- **Lora Path (Optional):** The path for Lora.
- **Max Tokens (Optional):** The maximum number of tokens to generate. Defaults to _`256`_.
- **Metadata (Optional):** Additional metadata for the model.
- **Model Kwargs:** Additional keyword arguments for the model. Should be provided as a Python dictionary.
- **N Batch (Optional):** The batch size. Defaults to _`8`_.
- **N Ctx:** The context size. Defaults to _`512`_.
- **N GPU Layers (Optional):** The number of GPU layers.
- **N Parts:** The number of parts.
- **N Threads (Optional):** The number of threads. Defaults to _`1`_.
- **Repeat Penalty (Optional):** The repeat penalty for text generation. Defaults to _`1.1`_.
- **Rope Freq Base:** The base frequency for rope.
- **Rope Freq Scale:** The scale frequency for rope.
- **Seed:** The seed for random generation.
- **Stop (Optional):** The stop words for text generation.
- **Streaming:** Specifies whether to stream the response from the model. Defaults to _`True`_.
- **Suffix (Optional):** The suffix for text generation.
- **Tags (Optional):** The tags for text generation.
- **Temperature (Optional):** The temperature for text generation. Defaults to _`0.8`_.
- **Top K (Optional):** The top K tokens to consider for text generation. Defaults to _`40`_.
- **Top P (Optional):** The top P probability threshold for text generation. Defaults to _`0.95`_.
- **Use Mlock:** Specifies whether to use Mlock. Defaults to _`False`_.
- **Use Mmap (Optional):** Specifies whether to use Mmap. Defaults to _`True`_.
- **Verbose:** Specifies whether to enable verbose mode. Defaults to _`True`_.
- **Vocab Only:** Specifies whether to include vocabulary only.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
For more information, please refer to the [documentation](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp).
- **Base URL:** Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.
- **Model Name:** The model name to use. Refer to [Ollama Library](https://ollama.ai/library) for more models.
- **Temperature:** Controls the creativity of model responses. (Default: 0.8)
- **Cache:** Enable or disable caching. (Default: False)
- **Format:** Specify the format of the output (e.g., json). (Advanced)
- **Metadata:** Metadata to add to the run trace. (Advanced)
- **Mirostat:** Enable/disable Mirostat sampling for controlling perplexity. (Default: Disabled)
- **Mirostat Eta:** Learning rate for Mirostat algorithm. (Default: None) (Advanced)
- **Mirostat Tau:** Controls the balance between coherence and diversity of the output. (Default: None) (Advanced)
- **Context Window Size:** Size of the context window for generating tokens. (Default: None) (Advanced)
- **Number of GPUs:** Number of GPUs to use for computation. (Default: None) (Advanced)
- **Number of Threads:** Number of threads to use during computation. (Default: None) (Advanced)
- **Repeat Last N:** How far back the model looks to prevent repetition. (Default: None) (Advanced)
- **Repeat Penalty:** Penalty for repetitions in generated text. (Default: None) (Advanced)
- **TFS Z:** Tail free sampling value. (Default: None) (Advanced)
- **Timeout:** Timeout for the request stream. (Default: None) (Advanced)
- **Top K:** Limits token selection to top K. (Default: None) (Advanced)
- **Top P:** Works together with top-k. (Default: None) (Advanced)
- **Verbose:** Whether to print out response text.
- **Tags:** Tags to add to the run trace. (Advanced)
- **Stop Tokens:** List of tokens to signal the model to stop generating text. (Advanced)
- **System:** System to use for generating text. (Advanced)
- **Template:** Template to use for generating text. (Advanced)
- **Input:** The input text.
- **Stream:** Whether to stream the response.
- **System Message:** System message to pass to the model. (Advanced)
---
### ChatOllama
This component facilitates text generation using the Local LLM model for chat with Ollama.
**Params**
- **Base URL:** The endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.
- **Model Name:** The name of the model to use. Refer to [https://ollama.ai/library](https://ollama.ai/library) for more models.
- **Input Value:** The input text for text generation.
- **Mirostat:** Enable/disable Mirostat sampling for controlling perplexity.
- **Mirostat Eta (Optional):** The learning rate for the Mirostat algorithm. (Default: 0.1)
- **Mirostat Tau (Optional):** Controls the balance between coherence and diversity of the output. (Default: 5.0)
- **Repeat Last N (Optional):** How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)
- **Verbose (Optional):** Whether to print out response text.
- **Cache (Optional):** Enable or disable caching. Defaults to _`False`_.
- **Context Window Size (Optional):** Size of the context window for generating tokens. (Default: 2048)
- **Number of GPUs (Optional):** Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)
- **Format (Optional):** Specify the format of the output (e.g., json).
- **Metadata (Optional):** Metadata to add to the run trace.
- **Number of Threads (Optional):** Number of threads to use during computation. (Default: detected for optimal performance)
- **Repeat Penalty (Optional):** Penalty for repetitions in generated text. (Default: 1.1)
- **Stop Tokens (Optional):** List of tokens to signal the model to stop generating text.
- **System (Optional):** System to use for generating text.
- **Tags (Optional):** Tags to add to the run trace.
- **Temperature (Optional):** Controls the creativity of model responses. Defaults to _`0.8`_.
- **Template (Optional):** Template to use for generating text.
- **TFS Z (Optional):** Tail free sampling value. (Default: 1)
- **Timeout (Optional):** Timeout for the request stream.
- **Top K (Optional):** Limits token selection to top K. (Default: 40)
- **Top P (Optional):** Works together with top-k. (Default: 0.9)
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** System message to pass to the model.
---
### OpenAIModel
### OpenAI
This component facilitates text generation using OpenAI's models.
@ -431,14 +258,50 @@ This component facilitates text generation using OpenAI's models.
---
### ChatVertexAI
### Qianfan
This component facilitates the generation of text using Baidu Qianfan chat models.
**Params**
- **Model Name:** Specifies the name of the Qianfan chat model to be used for text generation. Available options include:
- _`"ERNIE-Bot"`_
- _`"ERNIE-Bot-turbo"`_
- _`"BLOOMZ-7B"`_
- _`"Llama-2-7b-chat"`_
- _`"Llama-2-13b-chat"`_
- _`"Llama-2-70b-chat"`_
- _`"Qianfan-BLOOMZ-7B-compressed"`_
- _`"Qianfan-Chinese-Llama-2-7B"`_
- _`"ChatGLM2-6B-32K"`_
- _`"AquilaChat-7B"`_
- **Qianfan Ak:** Your Baidu Qianfan access key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
- **Qianfan Sk:** Your Baidu Qianfan secret key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
- **Top p (Optional):** Model parameter. Specifies the top-p value. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.8`_.
- **Temperature (Optional):** Model parameter. Specifies the sampling temperature. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.95`_.
- **Penalty Score (Optional):** Model parameter. Specifies the penalty score. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`1.0`_.
- **Endpoint (Optional):** Endpoint of the Qianfan LLM, required if custom model is used.
- **Input Value:** Specifies the input text for text generation.
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_.
- **System Message (Optional):** A system message to pass to the model.
---
### Vertex AI
The `ChatVertexAI` is a component for generating text using Vertex AI Chat large language models API.
**Params**
- **Input Value:** The input text for text generation.
- **Credentials:** The JSON file containing the credentials for accessing the Vertex AI Chat API.
- **Project:** The name of the project associated with the Vertex AI Chat API.
@ -453,6 +316,8 @@ The `ChatVertexAI` is a component for generating text using Vertex AI Chat large
- **Temperature:** Controls the creativity of model responses. Defaults to _`0.0`_.
- **Input Value:** The input text for text generation.
- **Top K:** Limits token selection to top K. Defaults to _`40`_.
- **Top P:** Works together with top-k. Defaults to _`0.95`_.

View file

@ -2,7 +2,7 @@ import Admonition from '@theme/Admonition';
# Outputs
## Chat Output
### Chat Output
This component is designed to send a message to the chat.
@ -22,7 +22,7 @@ This component is designed to send a message to the chat.
</p>
</Admonition>
## Text Output
### Text Output
This component is designed to display text data to the user. It's particularly useful for scenarios where you don't want to send the text data to the chat, but still want to display it.

View file

@ -1,33 +1,16 @@
# ⛓️ Running Langflow
Langflow can be run in a variety of ways, including using the command-line interface (CLI) or HuggingFace Spaces.
```bash
python -m langflow run
```
or
```bash
langflow run # or langflow --help
```
### 🤗 HuggingFace Spaces
Check out our [guide](./hugging-face-spaces) on how to get your Langflow instance running on HuggingFace Spaces.
# 🖥️ Command Line Interface (CLI)
Langflow provides a command-line interface (CLI) for easy management and configuration.
## Usage
## Overview
You can run the Langflow using the following command:
Langflow's Command Line Interface (CLI) is a powerful tool that allows you to interact with the Langflow server from the command line. The CLI provides a wide range of commands to help you shape Langflow to your needs.
Running the CLI without any arguments will display a list of available commands and options.
```bash
langflow run [OPTIONS]
langflow --help
# or
langflow
```
Each option is detailed below:
@ -58,4 +41,4 @@ These parameters are important for users who need to customize the behavior of L
You can configure many of the CLI options using environment variables. These can be exported in your operating system or added to a `.env` file and loaded using the `--env-file` option.
A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence.
A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence.

View file

@ -2,7 +2,7 @@
## TLDR;
A fully featured version of Langflow can be accessed via [HuggingFace Spaces](https://huggingface.co/spaces/Logspace/Langflow?duplicate=true) with no installation required. All you gotta do is [duplicate the Space](https://huggingface.co/spaces/Logspace/Langflow?duplicate=true) and you'll have your own copy to play around with!
A fully featured version of Langflow can be accessed via [HuggingFace Spaces](https://huggingface.co/spaces/Logspace/Langflow-Preview?duplicate=true) with no installation required. All you gotta do is [duplicate the Space](https://huggingface.co/spaces/Logspace/Langflow-Preview?duplicate=true) and you'll have your own copy to play around with!
---
@ -10,7 +10,7 @@ A fully featured version of Langflow can be accessed via [HuggingFace Spaces](ht
HuggingFace provides great support for running Langflow in their Spaces environment. This means you can run Langflow without any installation required.
The first step is to go to the [Langflow Space](https://huggingface.co/spaces/Logspace/Langflow?duplicate=true).
The first step is to go to the [Langflow Space](https://huggingface.co/spaces/Logspace/Langflow-Preview?duplicate=true).
You'll be greeted with the following screen:

View file

@ -1,17 +0,0 @@
# 📦 How to install?
## Installation
Make sure you have **Python 3.10** installed on your system.
You can install **Langflow** using pip:
```bash
pip install langflow -U
```
Or you can install a pre-release version using:
```bash
pip install langflow --pre -U
```

View file

@ -0,0 +1,202 @@
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import DownloadableJsonFile from "/src/theme/DownloadableJsonFile.js";
import Admonition from "@theme/Admonition";
# 🌟 RAG with AstraDB
This guide will walk you through how to build a RAG (Retrieval Augmented Generation) application using **AstraDB** and **Langflow**.
AstraDB is a cloud-native database built on Apache Cassandra that is optimized for the cloud. It is a fully managed database-as-a-service that simplifies operations and reduces costs. AstraDB is built on the same technology that powers the largest Cassandra deployments in the world.
In this guide, we will use AstraDB as a vector store to store and retrieve the documents that will be used by the RAG application to generate responses.
<Admonition type="tip">
This guide assumes that you have Langflow up and running. If you are new to
Langflow, you can check out the [Getting Started](/) guide.
</Admonition>
TLDR;
- Visit the [Astra](https://astra.datastax.com) website and create a free account
- Duplicate our [Langflow 1.0 Space](https://huggingface.co/spaces/Logspace/Langflow-Preview?duplicate=true)
- Create a new database, get a **Token** and the **API Endpoint**
- <DownloadableJsonFile
title="Download AstraDB RAG Flows"
source="/data/AstraDB-RAG-Flows.json"
/>
- Import the project into Langflow by dropping it on the Canvas or My Collection page
- Update the **Token** and **API Endpoint** in the **AstraDB** components
- Update the OpenAI API key in the **OpenAI** components
- Run the ingestion flow which is the one that uses the **AstraDB** component
- Click on the ⚡ _Run_ button and start interacting with your RAG application
# First things first
## Create an AstraDB Database
To get started, you will need to create an AstraDB database. Visit the [Astra](https://astra.datastax.com) website and create a free account.
Once you have created an account, you will be taken to the AstraDB dashboard. Click on the **Create Database** button.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-create-database.png",
dark: "img/astra-create-database.png",
}}
style={{ width: "80%" }}
/>
Now you will need to configure your database. Choose the **Serverless (Vector)** deployment type, and pick a Database name, provider and region.
After you have configured your database, click on the **Create Database** button.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-configure-deployment.png",
dark: "img/astra-configure-deployment.png",
}}
style={{ width: "70%" }}
/>
Once your database is initialized, to the right of the page, you will see the _Database Details_ section which contains a button for you to copy the **API Endpoint** and another to generate a **Token**.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-generate-token.png",
dark: "img/astra-generate-token.png",
}}
style={{ width: "50%" }}
/>
Now we are all set to start building our RAG application using AstraDB and Langflow.
## (Optional) Duplicate the Langflow 1.0 HuggingFace Space
If you haven't already, now is the time to launch Langflow. To make things easier, you can duplicate our [Langflow 1.0 Space](https://huggingface.co/spaces/Logspace/Langflow-Preview?duplicate=true) which sets up a Langflow instance just for you.
You'll still need to get the Project file and import it so, let's get to that.
## Import AstraDB RAG Flows
To get started, you will need to <DownloadableJsonFile title="download the AstraDB RAG Flows project file" source="/data/AstraDB-RAG-Flows.json" />.
Once you have downloaded the project file, you can import it into Langflow by dropping it on the Canvas or My Collection page.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/drag-and-drop-flow.png",
dark: "img/drag-and-drop-flow.png",
}}
style={{ width: "90%" }}
/>
This project consists of two flows. The simpler one is the **Ingestion Flow** which is responsible for ingesting the documents into the AstraDB database.
Your first step should be to understand what each flow does and how they interact with each other.
The ingestion flow consists of:
- **Files** component that uploads a text file to Langflow
- **Recursive Character Text Splitter** component that splits the text into smaller chunks
- **OpenAIEmbeddings** component that generates embeddings for the text chunks
- **AstraDB** component that stores the text chunks in the AstraDB database
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-ingestion-flow.png",
dark: "img/astra-ingestion-flow.png",
}}
style={{ width: "90%" }}
/>
Now, let's update the **AstraDB** and **AstraDB Search** components with the **Token** and **API Endpoint** that we generated earlier, and the OpenAI Embeddings components with your OpenAI API key.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-ingestion-fields.png",
dark: "img/astra-ingestion-fields.png",
}}
style={{ width: "90%" }}
/>
And run it! This will ingest the Text data from your file into the AstraDB database.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-ingestion-run.png",
dark: "img/astra-ingestion-run.png",
}}
style={{ width: "90%" }}
/>
Now, on to the **RAG Flow**. This flow is responsible for generating responses to your queries.
The RAG flow is a bit more complex. It consists of:
- **Chat Input** component that defines where to put the user input coming from the Interaction Panel
- **OpenAI Embeddings** component that generates embeddings from the user input
- **AstraDB Search** component that retrieves the most relevant Records from the AstraDB database
- **Text Output** component that turns the Records into Text by concatenating them and also displays it in the Interaction Panel
- One interesting point you'll see here is that this component is named `Extracted Chunks`, and that is how it will appear in the Interaction Panel
- **Prompt** component that takes in the user input and the retrieved Records as text and builds a prompt for the OpenAI model
- **OpenAI** component that generates a response to the prompt
- **Chat Output** component that displays the response in the Interaction Panel
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-rag-flow.png",
dark: "img/astra-rag-flow.png",
}}
style={{ width: "90%" }}
/>
To run it all we have to do is click on the ⚡ _Run_ button and start interacting with your RAG application.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-rag-flow-run.png",
dark: "img/astra-rag-flow-run.png",
}}
style={{ width: "90%" }}
/>
This opens the Interaction Panel where you can chat your data.
Because this flow has a **Chat Input** and a **Text Output** component, the Panel displays a chat input at the bottom and the Extracted Chunks section on the left.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-rag-flow-interaction-panel.png",
dark: "img/astra-rag-flow-interaction-panel.png",
}}
style={{ width: "80%" }}
/>
Once we interact with it we get a response and the Extracted Chunks section is updated with the retrieved records.
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/astra-rag-flow-interaction-panel-interaction.png",
dark: "img/astra-rag-flow-interaction-panel-interaction.png",
}}
style={{ width: "80%" }}
/>
And that's it! You have successfully built a RAG application using AstraDB and Langflow.
# Conclusion
In this guide, we have learned how to build a RAG application using AstraDB and Langflow. We have seen how to create an AstraDB database, import the AstraDB RAG Flows project into Langflow, and run the ingestion and RAG flows.

View file

@ -1,6 +1,6 @@
# 👋 Welcome to Langflow
Langflow is an easy way to create flows. The drag-and-drop feature allows quick and effortless experimentation, while the built-in chat interface facilitates real-time interaction. It provides options to edit prompt parameters, create chains and agents, track thought processes, and export flows.
Langflow is an easy way to build from simple to complex AI applications. It is a low-code platform that allows you to integrate AI into everything you do.
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
@ -16,3 +16,79 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
}}
style={{ width: "100%" }}
/>
## 🚀 First steps
## Installation
Make sure you have **Python 3.10** installed on your system.
You can install **Langflow** with [pipx](https://pipx.pypa.io/stable/installation/) or with pip.
Pipx can fetch the missing Python version for you, but you can also install it manually.
```bash
pipx install langflow --python python3.10 --fetch-missing-python
# or
pip install langflow -U
```
Or you can install a pre-release version using:
```bash
pipx install langflow --python python3.10 --fetch-missing-python --pip-args="--pre"
# or
pip install langflow --pre -U
```
### ⛓️ Running Langflow
Langflow can be run in a variety of ways, including using the command-line interface (CLI) or HuggingFace Spaces.
```bash
langflow run # or langflow --help
```
#### 🤗 HuggingFace Spaces
Hugging Face provides a great alternative for running Langflow in their Spaces environment. This means you can run Langflow without any local installation required.
The first step is to go to the [Langflow Space](https://huggingface.co/spaces/Logspace/Langflow?duplicate=true).
Remember to use a Chromium-based browser for the best experience. You'll be presented with the following screen:
<ZoomableImage
alt="Docusaurus themed image"
sources={{
light: "img/duplicate-space.png",
dark: "img/duplicate-space.png",
}}
style={{ width: "100%" }}
/>
From here, just name your Space, define the visibility (Public or Private), and click on `Duplicate Space` to start the installation process. When that is done, you'll be redirected to the Space's main page to start using Langflow right away!
Once you get Langflow running, click on New Project in the top right corner of the screen. Langflow provides a range of example flows to help you get started.
To quickly try one of them, open a starter example, set up your API keys and click ⚡ Run, on the bottom right corner of the canvas. This will open up Langflow's Interaction Panel with the chat console, text inputs, and outputs.
### 🖥️ Command Line Interface (CLI)
Langflow provides a command-line interface (CLI) for easy management and configuration.
#### Usage
You can run the Langflow using the following command:
```bash
langflow run [OPTIONS]
```
Find more information about the available options by running:
```bash
langflow --help
```

View file

@ -10,7 +10,7 @@ We have a special channel in our Discord server dedicated to Langflow 1.0 migrat
## TLDR;
- Inputs and Outputs of Components have changed
- The composition model has been replaced with a flow of data
- We've surfaced steps that were previously run in the background
- Continued support for LangChain and new support for multiple frameworks
- Redesigned sidebar and customizable interaction panel
- New Native Categories and Components
@ -32,11 +32,11 @@ Langflow 1.0 introduces adds the concept of Inputs and Outputs to flows, allowin
[Learn more about Inputs and Outputs of Components](../migration/inputs-and-outputs)
## From Composition to Freedom
## To Compose or Not to Compose: the choice is yours
Even though composition is still possible in Langflow 1.0, the new standard is getting data moving through the flow. This allows for more flexibility and control over the data flow in your projects. Check out how to use this in new and existing projects.
Even though composition is still possible in Langflow 1.0, the new standard is getting data moving through the flow. This allows for more flexibility and control over the data flow in your projects.
[Learn more about the Flow of Data](../migration/flow-of-data)
We will create guides on how to interweave LangChain components with our Core components soon.
## Continued Support for LangChain and Multiple Frameworks

View file

@ -14,6 +14,7 @@ module.exports = {
organizationName: "logspace-ai",
projectName: "langflow",
trailingSlash: false,
staticDirectories: ["static"],
customFields: {
mendableAnonKey: process.env.MENDABLE_ANON_KEY,
},

870
docs/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -16,11 +16,11 @@
"dependencies": {
"@babel/preset-react": "^7.22.3",
"@code-hike/mdx": "^0.9.0",
"@docusaurus/core": "^3.1.1",
"@docusaurus/plugin-ideal-image": "^3.1.1",
"@docusaurus/preset-classic": "^3.1.1",
"@docusaurus/theme-classic": "^3.1.1",
"@docusaurus/theme-search-algolia": "^3.0.1",
"@docusaurus/core": "^3.2.0",
"@docusaurus/plugin-ideal-image": "^3.2.0",
"@docusaurus/preset-classic": "^3.2.0",
"@docusaurus/theme-classic": "^3.2.0",
"@docusaurus/theme-search-algolia": "^3.2.0",
"@mdx-js/react": "^2.3.0",
"@mendable/search": "^0.0.154",
"@pbe/react-yandex-maps": "^1.2.4",
@ -47,7 +47,7 @@
"tailwindcss": "^3.3.2"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "2.4.1",
"@docusaurus/module-type-aliases": "^3.2.0",
"css-loader": "^6.8.1",
"docusaurus-node-polyfills": "^1.0.0",
"node-sass": "^9.0.0",

View file

@ -2,26 +2,34 @@ module.exports = {
docs: [
{
type: "category",
label: "Getting Started",
label: " Getting Started",
collapsed: false,
items: [
"index",
"getting-started/installation",
"getting-started/usage",
"getting-started/cli",
"getting-started/hugging-face-spaces",
"getting-started/creating-flows",
],
},
{
type: "category",
label: "What's New",
label: " What's New",
collapsed: false,
items: [
"whats-new/a-new-chapter-langflow",
"whats-new/migrating-to-one-point-zero",
"whats-new/customization-control",
"whats-new/debugging-reimagined",
"whats-new/simplification-standardization",
],
},
{
type: "category",
label: " Step-by-Step Guides",
collapsed: false,
items: [
"guides/rag-with-astradb",
"guides/async-tasks",
"guides/loading_document",
"guides/chatprompttemplate_guide",
"guides/langfuse_integration",
],
},
{
@ -29,22 +37,22 @@ module.exports = {
label: "Migration Guides",
collapsed: false,
items: [
"migration/flow-of-data",
// "migration/flow-of-data",
"migration/inputs-and-outputs",
"migration/supported-frameworks",
"migration/sidebar-and-interaction-panel",
"migration/new-categories-and-components",
"migration/text-and-record",
"migration/custom-component",
// "migration/supported-frameworks",
// "migration/sidebar-and-interaction-panel",
// "migration/new-categories-and-components",
// "migration/text-and-record",
// "migration/custom-component",
"migration/compatibility",
"migration/multiple-flows",
"migration/component-status-and-data-passing",
"migration/connecting-output-components",
"migration/renaming-and-editing-components",
"migration/passing-tweaks-and-inputs",
"migration/global-variables",
"migration/experimental-components",
"migration/state-management",
// "migration/multiple-flows",
// "migration/component-status-and-data-passing",
// "migration/connecting-output-components",
// "migration/renaming-and-editing-components",
// "migration/passing-tweaks-and-inputs",
// "migration/global-variables",
// "migration/experimental-components",
// "migration/state-management",
],
},
{
@ -66,42 +74,36 @@ module.exports = {
},
{
type: "category",
label: "Component Reference",
label: "Core Components",
collapsed: false,
items: [
"components/inputs",
"components/outputs",
"components/data",
"components/prompts",
"components/models",
"components/helpers",
"components/experimental",
"components/agents",
"components/chains",
"components/custom",
"components/embeddings",
"components/model_specs",
"components/loaders",
"components/memories",
"components/prompts",
"components/retrievers",
"components/text-splitters",
"components/toolkits",
"components/tools",
"components/utilities",
"components/vector-stores",
"components/wrappers",
"components/embeddings",
],
},
{
type: "category",
label: "Step-by-Step Guides",
label: "Extended Components",
collapsed: false,
items: [
"guides/async-tasks",
"guides/loading_document",
"guides/chatprompttemplate_guide",
"guides/langfuse_integration",
"components/agents",
"components/chains",
"components/loaders",
"components/experimental",
"components/utilities",
"components/memories",
"components/model_specs",
"components/retrievers",
"components/text-splitters",
"components/toolkits",
"components/tools",
"components/wrappers",
// "components/prompts",
],
},
{

View file

@ -0,0 +1,29 @@
const DownloadableJsonFile = ({ source, title }) => {
const handleDownload = (event) => {
event.preventDefault();
fetch(source)
.then((response) => response.blob())
.then((blob) => {
const url = window.URL.createObjectURL(
new Blob([blob], { type: "application/json" })
);
const link = document.createElement("a");
link.href = url;
link.setAttribute("download", title);
document.body.appendChild(link);
link.click();
link.parentNode.removeChild(link);
})
.catch((error) => {
console.error("Error downloading file:", error);
});
};
return (
<a href={source} download={title} onClick={handleDownload}>
{title}
</a>
);
};
export default DownloadableJsonFile;

3407
docs/static/data/AstraDB-RAG-Flows.json vendored Normal file

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

BIN
docs/static/img/astra-generate-token.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 220 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

BIN
docs/static/img/astra-ingestion-flow.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

BIN
docs/static/img/astra-ingestion-run.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

BIN
docs/static/img/astra-rag-flow-dark.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 161 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 354 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

BIN
docs/static/img/astra-rag-flow-run.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 190 KiB

BIN
docs/static/img/astra-rag-flow.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

BIN
docs/static/img/drag-and-drop-canvas.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 195 KiB

BIN
docs/static/img/drag-and-drop-flow.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

After

Width:  |  Height:  |  Size: 266 KiB

Before After
Before After

500
poetry.lock generated
View file

@ -2666,6 +2666,21 @@ protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4
[package.extras]
grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"]
[[package]]
name = "gotrue"
version = "2.4.2"
description = "Python Client Library for Supabase Auth"
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "gotrue-2.4.2-py3-none-any.whl", hash = "sha256:64cd40933d1f0a5d5cc4f4bd93bc51d730b94812447b6600f774790a4901e455"},
{file = "gotrue-2.4.2.tar.gz", hash = "sha256:e100745161f1c58dd05b9c1ef8bcd4cd78cdfb38d8d2c253ade63143a3dc6aeb"},
]
[package.dependencies]
httpx = ">=0.23,<0.28"
pydantic = ">=1.10,<3"
[[package]]
name = "greenlet"
version = "3.0.3"
@ -2803,6 +2818,21 @@ files = [
[package.extras]
protobuf = ["grpcio-tools (>=1.62.1)"]
[[package]]
name = "grpcio-health-checking"
version = "1.62.1"
description = "Standard Health Checking Service for gRPC"
optional = false
python-versions = ">=3.6"
files = [
{file = "grpcio-health-checking-1.62.1.tar.gz", hash = "sha256:9e56180a941b1d32a077d7491e0611d0483c396358afd5349bf00152612e4583"},
{file = "grpcio_health_checking-1.62.1-py3-none-any.whl", hash = "sha256:9ce761c09fc383e7aa2f7e6c0b0b65d5a1157c1b98d1f5871f7c38aca47d49b9"},
]
[package.dependencies]
grpcio = ">=1.62.1"
protobuf = ">=4.21.6"
[[package]]
name = "grpcio-status"
version = "1.62.1"
@ -3039,13 +3069,13 @@ test = ["Cython (>=0.29.24,<0.30.0)"]
[[package]]
name = "httpx"
version = "0.25.2"
version = "0.27.0"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"},
{file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"},
{file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
{file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
]
[package.dependencies]
@ -3246,13 +3276,13 @@ files = [
[[package]]
name = "importlib-metadata"
version = "6.11.0"
version = "7.0.0"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"},
{file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"},
{file = "importlib_metadata-7.0.0-py3-none-any.whl", hash = "sha256:d97503976bb81f40a193d41ee6570868479c69d5068651eb039c40d850c59d67"},
{file = "importlib_metadata-7.0.0.tar.gz", hash = "sha256:7fc841f8b8332803464e5dc1c63a2e59121f46ca186c0e2e182e80bf8c1319f7"},
]
[package.dependencies]
@ -3783,13 +3813,13 @@ extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.
[[package]]
name = "langchain-core"
version = "0.1.37"
version = "0.1.38"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langchain_core-0.1.37-py3-none-any.whl", hash = "sha256:63c6aecb0f2eb1a21f8e944da622748cfeafa2cc3d94c0182ffa8038bd00fe0b"},
{file = "langchain_core-0.1.37.tar.gz", hash = "sha256:3db7008796e25aea90f98c2159dbf29bf1fd296bdcb78dc2d8183a92fdde4433"},
{file = "langchain_core-0.1.38-py3-none-any.whl", hash = "sha256:d881b2754254cb4bdb0d5bb56e5c138d032b6e75e5cb21f151b01224b322e02b"},
{file = "langchain_core-0.1.38.tar.gz", hash = "sha256:ee8da6d061c06cce7dc22fec224b6ecbc3a8de106d6dd9f409c7fe448ea41861"},
]
[package.dependencies]
@ -3890,56 +3920,57 @@ six = "*"
[[package]]
name = "langflow-base"
version = "0.0.13"
version = "0.0.16"
description = "A Python package with a built-in web application"
optional = false
python-versions = "<3.12,>=3.10"
files = [
{file = "langflow_base-0.0.13-py3-none-any.whl", hash = "sha256:86a8cd4f4dac68a1c00b4fe434cd5df1ef4af939295a1a5516e4cc14aaf79e7a"},
{file = "langflow_base-0.0.13.tar.gz", hash = "sha256:b7ee7d93e29c20bdb2a1431498d5715ed2a8fbfe9ca5d7d49926a9a10937e8c2"},
]
python-versions = ">=3.10,<3.12"
files = []
develop = true
[package.dependencies]
alembic = ">=1.13.0,<2.0.0"
alembic = "^1.13.0"
bcrypt = "4.0.1"
cachetools = ">=5.3.1,<6.0.0"
chromadb = ">=0.4.24,<0.5.0"
docstring-parser = ">=0.15,<0.16"
duckdb = ">=0.9.2,<0.10.0"
fastapi = ">=0.109.0,<0.110.0"
gunicorn = ">=21.2.0,<22.0.0"
httpx = ">=0.25,<0.26"
jq = {version = ">=1.7.0,<2.0.0", markers = "sys_platform != \"win32\""}
langchain = ">=0.1.0,<0.2.0"
langchain-anthropic = ">=0.1.4,<0.2.0"
langchain-astradb = ">=0.1.0,<0.2.0"
cachetools = "^5.3.1"
chromadb = "^0.4.24"
docstring-parser = "^0.15"
duckdb = "^0.9.2"
fastapi = "^0.109.0"
gunicorn = "^21.2.0"
httpx = "*"
jq = {version = "^1.7.0", markers = "sys_platform != \"win32\""}
langchain = "~0.1.0"
langchain-anthropic = "^0.1.4"
langchain-astradb = "^0.1.0"
langchain-experimental = "*"
loguru = ">=0.7.1,<0.8.0"
multiprocess = ">=0.70.14,<0.71.0"
opentelemetry-api = ">=1.23.0,<2.0.0"
opentelemetry-exporter-otlp = ">=1.23.0,<2.0.0"
opentelemetry-instrumentation-asgi = ">=0.44b0,<0.45"
opentelemetry-instrumentation-fastapi = ">=0.44b0,<0.45"
opentelemetry-instrumentation-httpx = ">=0.44b0,<0.45"
opentelemetry-sdk = ">=1.23.0,<2.0.0"
loguru = "^0.7.1"
multiprocess = "^0.70.14"
orjson = "3.9.15"
pandas = "2.2.0"
passlib = ">=1.7.4,<2.0.0"
pillow = ">=10.2.0,<11.0.0"
platformdirs = ">=4.2.0,<5.0.0"
pydantic = ">=2.5.0,<3.0.0"
pydantic-settings = ">=2.1.0,<3.0.0"
pypdf = ">=4.1.0,<5.0.0"
python-docx = ">=1.1.0,<2.0.0"
python-jose = ">=3.3.0,<4.0.0"
python-multipart = ">=0.0.7,<0.0.8"
python-socketio = ">=5.11.0,<6.0.0"
rich = ">=13.7.0,<14.0.0"
sqlmodel = ">=0.0.14,<0.0.15"
typer = ">=0.9.0,<0.10.0"
uvicorn = ">=0.27.0,<0.28.0"
passlib = "^1.7.4"
pillow = "^10.2.0"
platformdirs = "^4.2.0"
pydantic = "^2.5.0"
pydantic-settings = "^2.1.0"
pypdf = "^4.1.0"
python-docx = "^1.1.0"
python-jose = "^3.3.0"
python-multipart = "^0.0.7"
python-socketio = "^5.11.0"
rich = "^13.7.0"
sqlmodel = "^0.0.14"
typer = "^0.9.0"
uvicorn = "^0.27.0"
websockets = "*"
[package.extras]
all = []
deploy = []
local = []
[package.source]
type = "directory"
url = "src/backend/base"
[[package]]
name = "langfuse"
version = "2.21.1"
@ -4866,7 +4897,6 @@ files = [
{file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"},
{file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"},
{file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"},
{file = "msgpack-1.0.8-py3-none-any.whl", hash = "sha256:24f727df1e20b9876fa6e95f840a2a2651e34c0ad147676356f4bf5fbb0206ca"},
{file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"},
]
@ -5126,44 +5156,44 @@ twitter = ["twython"]
[[package]]
name = "numexpr"
version = "2.9.0"
version = "2.10.0"
description = "Fast numerical expression evaluator for NumPy"
optional = false
python-versions = ">=3.9"
files = [
{file = "numexpr-2.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c52b4ac54514f5d4d8ead66768810cd5f77aa198e6064213d9b5c7b2e1c97c35"},
{file = "numexpr-2.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50f57bc333f285e8c46b1ce61c6e94ec9bb74e4ea0d674d1c6c6f4a286f64fe4"},
{file = "numexpr-2.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:943ba141f3884ffafa3fa1a3ebf3cdda9e9688a67a3c91986e6eae13dc073d43"},
{file = "numexpr-2.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee48acd6339748a65c0e32403b802ebfadd9cb0e3b602ba5889896238eafdd61"},
{file = "numexpr-2.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:972e29b5cecc21466c5b177e38568372ab66aab1f053ae04690a49cea09e747d"},
{file = "numexpr-2.9.0-cp310-cp310-win32.whl", hash = "sha256:520e55d75bd99c76e376b6326e35ecf44c5ce2635a5caed72799a3885fc49173"},
{file = "numexpr-2.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:5615497c3f34b637fda9b571f7774b6a82f2367cc1364b7a4573068dd1aabcaa"},
{file = "numexpr-2.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bffcbc55dea5a5f5255e2586da08f00929998820e6592ee717273a08ad021eb3"},
{file = "numexpr-2.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:374dc6ca54b2af813cb15c2b34e85092dfeac1f73d51ec358dd81876bd9adcec"},
{file = "numexpr-2.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:549afc1622296cca3478a132c6e0fb5e55a19e08d32bc0d5a415434824a9c157"},
{file = "numexpr-2.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c618a5895e34db0a364dcdb9960084c080f93f9d377c45b1ca9c394c24b4e77"},
{file = "numexpr-2.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:37a7dd36fd79a2b69c3fd2bc2b51ac8270bebc69cc96e6d78f1148e147fcbfa8"},
{file = "numexpr-2.9.0-cp311-cp311-win32.whl", hash = "sha256:00dab81d49239ea5423861ad627097b44d10d802df5f883d1b00f742139c3349"},
{file = "numexpr-2.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:0e2574cafb18373774f351cac45ed23b5b360d9ecd1dbf3c12dac6d6eefefc87"},
{file = "numexpr-2.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9761195526a228e05eba400b8c484c94bbabfea853b9ea35ab8fa1bf415331b1"},
{file = "numexpr-2.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0f619e91034b346ea85a4e1856ff06011dcb7dce10a60eda75e74db90120f880"},
{file = "numexpr-2.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2749bce1c48706d58894992634a43b8458c4ba9411191471c4565fa41e9979ec"},
{file = "numexpr-2.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1c31f621a625c7be602f92b027d90f2d3d60dcbc19b106e77fb04a4362152af"},
{file = "numexpr-2.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1a78b937861d13de67d440d54c85a835faed7572be5a6fd10d4f3bd4e66e157f"},
{file = "numexpr-2.9.0-cp312-cp312-win32.whl", hash = "sha256:aa6298fb46bd7ec69911b5b80927a00663d066e719b29f48eb952d559bdd8371"},
{file = "numexpr-2.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:8efd879839572bde5a38a1aa3ac23fd4dd9b956fb969bc5e43d1c403419e1e8c"},
{file = "numexpr-2.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b04f12a6130094a251e3a8fff40130589c1c83be6d4eb223873bea14d8c8b630"},
{file = "numexpr-2.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:977537f2a1cc843f888fb5f0507626f956ada674e4b3847168214a3f3c7446fa"},
{file = "numexpr-2.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eae6c0c2d5682c02e8ac9c4287c2232c2443c9148b239df22500eaa3c5d73b7"},
{file = "numexpr-2.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fae6828042b70c2f52a132bfcb9139da704274ed11b982fbf537f91c075d2ef"},
{file = "numexpr-2.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7c77392aea53f0700d60eb270ad63174b4ff10b04f8de92861101ca2129fee51"},
{file = "numexpr-2.9.0-cp39-cp39-win32.whl", hash = "sha256:3b03a6cf37a72f5b52f2b962d7ac7f565bea8eaba83c3c4e5fcf8fbb6a938153"},
{file = "numexpr-2.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:d655b6eacc4e81006b662cba014e4615a9ddd96881b8b4db4ad0d7f6d38069af"},
{file = "numexpr-2.9.0.tar.gz", hash = "sha256:f21d12f6c432ce349089eb95342babf6629aebb3fddf187a4492d3aadaadaaf0"},
{file = "numexpr-2.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1af6dc6b3bd2e11a802337b352bf58f30df0b70be16c4f863b70a3af3a8ef95e"},
{file = "numexpr-2.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c66dc0188358cdcc9465b6ee54fd5eef2e83ac64b1d4ba9117c41df59bf6fca"},
{file = "numexpr-2.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83f1e7a7f7ee741b8dcd20c56c3f862a3a3ec26fa8b9fcadb7dcd819876d2f35"},
{file = "numexpr-2.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f0b045e1831953a47cc9fabae76a6794c69cbb60921751a5cf2d555034c55bf"},
{file = "numexpr-2.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1d8eb88b0ae3d3c609d732a17e71096779b2bf47b3a084320ffa93d9f9132786"},
{file = "numexpr-2.10.0-cp310-cp310-win32.whl", hash = "sha256:629b66cc1b750671e7fb396506b3f9410612e5bd8bc1dd55b5a0a0041d839f95"},
{file = "numexpr-2.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:78e0a8bc4417c3dedcbae3c473505b69080535246edc977c7dccf3ec8454a685"},
{file = "numexpr-2.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a602692cd52ce923ce8a0a90fb1d6cf186ebe8706eed83eee0de685e634b9aa9"},
{file = "numexpr-2.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:745b46a1fb76920a3eebfaf26e50bc94a9c13b5aee34b256ab4b2d792dbaa9ca"},
{file = "numexpr-2.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10789450032357afaeda4ac4d06da9542d1535c13151e8d32b49ae1a488d1358"},
{file = "numexpr-2.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4feafc65ea3044b8bf8f305b757a928e59167a310630c22b97a57dff07a56490"},
{file = "numexpr-2.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:937d36c6d3cf15601f26f84f0f706649f976491e9e0892d16cd7c876d77fa7dc"},
{file = "numexpr-2.10.0-cp311-cp311-win32.whl", hash = "sha256:03d0ba492e484a5a1aeb24b300c4213ed168f2c246177be5733abb4e18cbb043"},
{file = "numexpr-2.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:6b5f8242c075477156d26b3a6b8e0cd0a06d4c8eb68d907bde56dd3c9c683e92"},
{file = "numexpr-2.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b276e2ba3e87ace9a30fd49078ad5dcdc6a1674d030b1ec132599c55465c0346"},
{file = "numexpr-2.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb5e12787101f1216f2cdabedc3417748f2e1f472442e16bbfabf0bab2336300"},
{file = "numexpr-2.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05278bad96b5846d712eba58b44e5cec743bdb3e19ca624916c921d049fdbcf6"},
{file = "numexpr-2.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6cdf9e64c5b3dbb61729edb505ea75ee212fa02b85c5b1d851331381ae3b0e1"},
{file = "numexpr-2.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e3a973265591b0a875fd1151c4549e468959c7192821aac0bb86937694a08efa"},
{file = "numexpr-2.10.0-cp312-cp312-win32.whl", hash = "sha256:416e0e9f0fc4cced67767585e44cb6b301728bdb9edbb7c534a853222ec62cac"},
{file = "numexpr-2.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:748e8d4cde22d9a5603165293fb293a4de1a4623513299416c64fdab557118c2"},
{file = "numexpr-2.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc3506c30c03b082da2cadef43747d474e5170c1f58a6dcdf882b3dc88b1e849"},
{file = "numexpr-2.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:efa63ecdc9fcaf582045639ddcf56e9bdc1f4d9a01729be528f62df4db86c9d6"},
{file = "numexpr-2.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96a64d0dd8f8e694da3f8582d73d7da8446ff375f6dd239b546010efea371ac3"},
{file = "numexpr-2.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d47bb567e330ebe86781864219a36cbccb3a47aec893bd509f0139c6b23e8104"},
{file = "numexpr-2.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c7517b774d309b1f0896c89bdd1ddd33c4418a92ecfbe5e1df3ac698698f6fcf"},
{file = "numexpr-2.10.0-cp39-cp39-win32.whl", hash = "sha256:04e8620e7e676504201d4082e7b3ee2d9b561d1cb9470b47a6104e10c1e2870e"},
{file = "numexpr-2.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:56d0d96b130f7cd4d78d0017030d6a0e9d9fc2a717ac51d4cf4860b39637e86a"},
{file = "numexpr-2.10.0.tar.gz", hash = "sha256:c89e930752639df040539160326d8f99a84159bbea41943ab8e960591edaaef0"},
]
[package.dependencies]
numpy = ">=1.13.3"
numpy = ">=1.19.3"
[[package]]
name = "numpy"
@ -5336,6 +5366,7 @@ description = "Nvidia JIT LTO Library"
optional = true
python-versions = ">=3"
files = [
{file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_aarch64.whl", hash = "sha256:75d6498c96d9adb9435f2bbdbddb479805ddfb97b5c1b32395c694185c20ca57"},
{file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c6428836d20fe7e327191c175791d38570e10762edc588fb46749217cd444c74"},
{file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-win_amd64.whl", hash = "sha256:991905ffa2144cb603d8ca7962d75c35334ae82bf92820b6ba78157277da1ad2"},
]
@ -5470,57 +5501,42 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
[[package]]
name = "opentelemetry-api"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_api-1.23.0-py3-none-any.whl", hash = "sha256:cc03ea4025353048aadb9c64919099663664672ea1c6be6ddd8fee8e4cd5e774"},
{file = "opentelemetry_api-1.23.0.tar.gz", hash = "sha256:14a766548c8dd2eb4dfc349739eb4c3893712a0daa996e5dbf945f9da665da9d"},
{file = "opentelemetry_api-1.24.0-py3-none-any.whl", hash = "sha256:0f2c363d98d10d1ce93330015ca7fd3a65f60be64e05e30f557c61de52c80ca2"},
{file = "opentelemetry_api-1.24.0.tar.gz", hash = "sha256:42719f10ce7b5a9a73b10a4baf620574fb8ad495a9cbe5c18d76b75d8689c67e"},
]
[package.dependencies]
deprecated = ">=1.2.6"
importlib-metadata = ">=6.0,<7.0"
[[package]]
name = "opentelemetry-exporter-otlp"
version = "1.23.0"
description = "OpenTelemetry Collector Exporters"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_exporter_otlp-1.23.0-py3-none-any.whl", hash = "sha256:92371fdc8d7803465a45801fe30cd8c522ef355a385b0a1d5346d32f77511ea2"},
{file = "opentelemetry_exporter_otlp-1.23.0.tar.gz", hash = "sha256:4af8798f9bc3bddb92fcbb5b4aa9d0e955d962aa1d9bceaab08891c355a9f907"},
]
[package.dependencies]
opentelemetry-exporter-otlp-proto-grpc = "1.23.0"
opentelemetry-exporter-otlp-proto-http = "1.23.0"
importlib-metadata = ">=6.0,<=7.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-common"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Protobuf encoding"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_exporter_otlp_proto_common-1.23.0-py3-none-any.whl", hash = "sha256:2a9e7e9d5a8b026b572684b6b24dcdefcaa58613d5ce3d644130b0c373c056c1"},
{file = "opentelemetry_exporter_otlp_proto_common-1.23.0.tar.gz", hash = "sha256:35e4ea909e7a0b24235bd0aaf17fba49676527feb1823b46565ff246d5a1ab18"},
{file = "opentelemetry_exporter_otlp_proto_common-1.24.0-py3-none-any.whl", hash = "sha256:e51f2c9735054d598ad2df5d3eca830fecfb5b0bda0a2fa742c9c7718e12f641"},
{file = "opentelemetry_exporter_otlp_proto_common-1.24.0.tar.gz", hash = "sha256:5d31fa1ff976cacc38be1ec4e3279a3f88435c75b38b1f7a099a1faffc302461"},
]
[package.dependencies]
opentelemetry-proto = "1.23.0"
opentelemetry-proto = "1.24.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-grpc"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Collector Protobuf over gRPC Exporter"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_exporter_otlp_proto_grpc-1.23.0-py3-none-any.whl", hash = "sha256:40f9e3e7761eb34f2a1001f4543028783ac26e2db27e420d5374f2cca0182dad"},
{file = "opentelemetry_exporter_otlp_proto_grpc-1.23.0.tar.gz", hash = "sha256:aa1a012eea5342bfef51fcf3f7f22601dcb0f0984a07ffe6025b2fbb6d91a2a9"},
{file = "opentelemetry_exporter_otlp_proto_grpc-1.24.0-py3-none-any.whl", hash = "sha256:f40d62aa30a0a43cc1657428e59fcf82ad5f7ea8fff75de0f9d9cb6f739e0a3b"},
{file = "opentelemetry_exporter_otlp_proto_grpc-1.24.0.tar.gz", hash = "sha256:217c6e30634f2c9797999ea9da29f7300479a94a610139b9df17433f915e7baa"},
]
[package.dependencies]
@ -5528,45 +5544,42 @@ deprecated = ">=1.2.6"
googleapis-common-protos = ">=1.52,<2.0"
grpcio = ">=1.0.0,<2.0.0"
opentelemetry-api = ">=1.15,<2.0"
opentelemetry-exporter-otlp-proto-common = "1.23.0"
opentelemetry-proto = "1.23.0"
opentelemetry-sdk = ">=1.23.0,<1.24.0"
opentelemetry-exporter-otlp-proto-common = "1.24.0"
opentelemetry-proto = "1.24.0"
opentelemetry-sdk = ">=1.24.0,<1.25.0"
[package.extras]
test = ["pytest-grpc"]
[[package]]
name = "opentelemetry-exporter-otlp-proto-http"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Collector Protobuf over HTTP Exporter"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_exporter_otlp_proto_http-1.23.0-py3-none-any.whl", hash = "sha256:ad853b58681df8efcb2cfc93be2b5fd86351c99ff4ab47dc917da384b8650d91"},
{file = "opentelemetry_exporter_otlp_proto_http-1.23.0.tar.gz", hash = "sha256:088eac2320f4a604e2d9ff71aced71fdae601ac6457005fb0303d6bbbf44e6ca"},
{file = "opentelemetry_exporter_otlp_proto_http-1.24.0-py3-none-any.whl", hash = "sha256:25af10e46fdf4cd3833175e42f4879a1255fc01655fe14c876183a2903949836"},
{file = "opentelemetry_exporter_otlp_proto_http-1.24.0.tar.gz", hash = "sha256:704c066cc96f5131881b75c0eac286cd73fc735c490b054838b4513254bd7850"},
]
[package.dependencies]
deprecated = ">=1.2.6"
googleapis-common-protos = ">=1.52,<2.0"
opentelemetry-api = ">=1.15,<2.0"
opentelemetry-exporter-otlp-proto-common = "1.23.0"
opentelemetry-proto = "1.23.0"
opentelemetry-sdk = ">=1.23.0,<1.24.0"
opentelemetry-exporter-otlp-proto-common = "1.24.0"
opentelemetry-proto = "1.24.0"
opentelemetry-sdk = ">=1.24.0,<1.25.0"
requests = ">=2.7,<3.0"
[package.extras]
test = ["responses (>=0.22.0,<0.25)"]
[[package]]
name = "opentelemetry-instrumentation"
version = "0.44b0"
version = "0.45b0"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_instrumentation-0.44b0-py3-none-any.whl", hash = "sha256:79560f386425176bcc60c59190064597096114c4a8e5154f1cb281bb4e47d2fc"},
{file = "opentelemetry_instrumentation-0.44b0.tar.gz", hash = "sha256:8213d02d8c0987b9b26386ae3e091e0477d6331673123df736479322e1a50b48"},
{file = "opentelemetry_instrumentation-0.45b0-py3-none-any.whl", hash = "sha256:06c02e2c952c1b076e8eaedf1b82f715e2937ba7eeacab55913dd434fbcec258"},
{file = "opentelemetry_instrumentation-0.45b0.tar.gz", hash = "sha256:6c47120a7970bbeb458e6a73686ee9ba84b106329a79e4a4a66761f933709c7e"},
]
[package.dependencies]
@ -5576,78 +5589,55 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-asgi"
version = "0.44b0"
version = "0.45b0"
description = "ASGI instrumentation for OpenTelemetry"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_instrumentation_asgi-0.44b0-py3-none-any.whl", hash = "sha256:0d95c84a8991008c8a8ac35e15d43cc7768a5bb46f95f129e802ad2990d7c366"},
{file = "opentelemetry_instrumentation_asgi-0.44b0.tar.gz", hash = "sha256:72d4d28ec7ccd551eac11edc5ae8cac3586c0a228467d6a95fad7b6d4edd597a"},
{file = "opentelemetry_instrumentation_asgi-0.45b0-py3-none-any.whl", hash = "sha256:8be1157ed62f0db24e45fdf7933c530c4338bd025c5d4af7830e903c0756021b"},
{file = "opentelemetry_instrumentation_asgi-0.45b0.tar.gz", hash = "sha256:97f55620f163fd3d20323e9fd8dc3aacc826c03397213ff36b877e0f4b6b08a6"},
]
[package.dependencies]
asgiref = ">=3.0,<4.0"
opentelemetry-api = ">=1.12,<2.0"
opentelemetry-instrumentation = "0.44b0"
opentelemetry-semantic-conventions = "0.44b0"
opentelemetry-util-http = "0.44b0"
opentelemetry-instrumentation = "0.45b0"
opentelemetry-semantic-conventions = "0.45b0"
opentelemetry-util-http = "0.45b0"
[package.extras]
instruments = ["asgiref (>=3.0,<4.0)"]
test = ["opentelemetry-instrumentation-asgi[instruments]", "opentelemetry-test-utils (==0.44b0)"]
[[package]]
name = "opentelemetry-instrumentation-fastapi"
version = "0.44b0"
version = "0.45b0"
description = "OpenTelemetry FastAPI Instrumentation"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_instrumentation_fastapi-0.44b0-py3-none-any.whl", hash = "sha256:4441482944bea6676816668d56deb94af990e8c6e9582c581047e5d84c91d3c9"},
{file = "opentelemetry_instrumentation_fastapi-0.44b0.tar.gz", hash = "sha256:67ed10b93ad9d35238ae0be73cf8acbbb65a4a61fb7444d0aee5b0c492e294db"},
{file = "opentelemetry_instrumentation_fastapi-0.45b0-py3-none-any.whl", hash = "sha256:77d9c123a363129148f5f66d44094f3d67aaaa2b201396d94782b4a7f9ce4314"},
{file = "opentelemetry_instrumentation_fastapi-0.45b0.tar.gz", hash = "sha256:5a6b91e1c08a01601845fcfcfdefd0a2aecdb3c356d4a436a3210cb58c21487e"},
]
[package.dependencies]
opentelemetry-api = ">=1.12,<2.0"
opentelemetry-instrumentation = "0.44b0"
opentelemetry-instrumentation-asgi = "0.44b0"
opentelemetry-semantic-conventions = "0.44b0"
opentelemetry-util-http = "0.44b0"
opentelemetry-instrumentation = "0.45b0"
opentelemetry-instrumentation-asgi = "0.45b0"
opentelemetry-semantic-conventions = "0.45b0"
opentelemetry-util-http = "0.45b0"
[package.extras]
instruments = ["fastapi (>=0.58,<1.0)"]
test = ["httpx (>=0.22,<1.0)", "opentelemetry-instrumentation-fastapi[instruments]", "opentelemetry-test-utils (==0.44b0)", "requests (>=2.23,<3.0)"]
[[package]]
name = "opentelemetry-instrumentation-httpx"
version = "0.44b0"
description = "OpenTelemetry HTTPX Instrumentation"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_instrumentation_httpx-0.44b0-py3-none-any.whl", hash = "sha256:a4f1121b6212b018e719ef6a9a2f8317c329edd01a61452b7250f574f7d95a91"},
{file = "opentelemetry_instrumentation_httpx-0.44b0.tar.gz", hash = "sha256:6cc81c4182f54dfb0d15774e3e48bb90d3ed44e9ad8bf5eef2a64a7197f945d8"},
]
[package.dependencies]
opentelemetry-api = ">=1.12,<2.0"
opentelemetry-instrumentation = "0.44b0"
opentelemetry-semantic-conventions = "0.44b0"
opentelemetry-util-http = "0.44b0"
[package.extras]
instruments = ["httpx (>=0.18.0)"]
test = ["opentelemetry-instrumentation-httpx[instruments]", "opentelemetry-sdk (>=1.12,<2.0)", "opentelemetry-test-utils (==0.44b0)"]
[[package]]
name = "opentelemetry-proto"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_proto-1.23.0-py3-none-any.whl", hash = "sha256:4c017deca052cb287a6003b7c989ed8b47af65baeb5d57ebf93dde0793f78509"},
{file = "opentelemetry_proto-1.23.0.tar.gz", hash = "sha256:e6aaf8b7ace8d021942d546161401b83eed90f9f2cc6f13275008cea730e4651"},
{file = "opentelemetry_proto-1.24.0-py3-none-any.whl", hash = "sha256:bcb80e1e78a003040db71ccf83f2ad2019273d1e0828089d183b18a1476527ce"},
{file = "opentelemetry_proto-1.24.0.tar.gz", hash = "sha256:ff551b8ad63c6cabb1845ce217a6709358dfaba0f75ea1fa21a61ceddc78cab8"},
]
[package.dependencies]
@ -5655,40 +5645,40 @@ protobuf = ">=3.19,<5.0"
[[package]]
name = "opentelemetry-sdk"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_sdk-1.23.0-py3-none-any.whl", hash = "sha256:a93c96990ac0f07c6d679e2f1015864ff7a4f5587122dd5af968034436efb1fd"},
{file = "opentelemetry_sdk-1.23.0.tar.gz", hash = "sha256:9ddf60195837b59e72fd2033d6a47e2b59a0f74f0ec37d89387d89e3da8cab7f"},
{file = "opentelemetry_sdk-1.24.0-py3-none-any.whl", hash = "sha256:fa731e24efe832e98bcd90902085b359dcfef7d9c9c00eb5b9a18587dae3eb59"},
{file = "opentelemetry_sdk-1.24.0.tar.gz", hash = "sha256:75bc0563affffa827700e0f4f4a68e1e257db0df13372344aebc6f8a64cde2e5"},
]
[package.dependencies]
opentelemetry-api = "1.23.0"
opentelemetry-semantic-conventions = "0.44b0"
opentelemetry-api = "1.24.0"
opentelemetry-semantic-conventions = "0.45b0"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
version = "0.44b0"
version = "0.45b0"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_semantic_conventions-0.44b0-py3-none-any.whl", hash = "sha256:7c434546c9cbd797ab980cc88bf9ff3f4a5a28f941117cad21694e43d5d92019"},
{file = "opentelemetry_semantic_conventions-0.44b0.tar.gz", hash = "sha256:2e997cb28cd4ca81a25a9a43365f593d0c2b76be0685015349a89abdf1aa4ffa"},
{file = "opentelemetry_semantic_conventions-0.45b0-py3-none-any.whl", hash = "sha256:a4a6fb9a7bacd9167c082aa4681009e9acdbfa28ffb2387af50c2fef3d30c864"},
{file = "opentelemetry_semantic_conventions-0.45b0.tar.gz", hash = "sha256:7c84215a44ac846bc4b8e32d5e78935c5c43482e491812a0bb8aaf87e4d92118"},
]
[[package]]
name = "opentelemetry-util-http"
version = "0.44b0"
version = "0.45b0"
description = "Web util for OpenTelemetry"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_util_http-0.44b0-py3-none-any.whl", hash = "sha256:ff018ab6a2fa349537ff21adcef99a294248b599be53843c44f367aef6bccea5"},
{file = "opentelemetry_util_http-0.44b0.tar.gz", hash = "sha256:75896dffcbbeb5df5429ad4526e22307fc041a27114e0c5bfd90bb219381e68f"},
{file = "opentelemetry_util_http-0.45b0-py3-none-any.whl", hash = "sha256:6628868b501b3004e1860f976f410eeb3d3499e009719d818000f24ce17b6e33"},
{file = "opentelemetry_util_http-0.45b0.tar.gz", hash = "sha256:4ce08b6a7d52dd7c96b7705b5b4f06fdb6aa3eac1233b3b0bfef8a0cab9a92cd"},
]
[[package]]
@ -6128,6 +6118,23 @@ docs = ["sphinx (>=1.7.1)"]
redis = ["redis"]
tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"]
[[package]]
name = "postgrest"
version = "0.16.2"
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "postgrest-0.16.2-py3-none-any.whl", hash = "sha256:cf89106d0877ac2c7b070ad136f78350eb89dbdd998cd83d6852010e0bcdb878"},
{file = "postgrest-0.16.2.tar.gz", hash = "sha256:6c5c8e53cdcede8b6654ddfc7505e5af0c41ce56c6935f7b1d05545bb899d8b8"},
]
[package.dependencies]
deprecation = ">=2.1.0,<3.0.0"
httpx = ">=0.24,<0.28"
pydantic = ">=1.9,<3.0"
strenum = ">=0.4.9,<0.5.0"
[[package]]
name = "posthog"
version = "3.5.0"
@ -7723,6 +7730,22 @@ files = [
[package.extras]
full = ["numpy"]
[[package]]
name = "realtime"
version = "1.0.3"
description = ""
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "realtime-1.0.3-py3-none-any.whl", hash = "sha256:809b99a1c09390a4580ca2d37d84c85dffacb1804f80c6f5a4491d312c20e6e3"},
{file = "realtime-1.0.3.tar.gz", hash = "sha256:1a39b5dcdb345b4cc7fd43bc035feb38ca915c9248962f20d264625bc8eb2c4e"},
]
[package.dependencies]
python-dateutil = ">=2.8.1,<3.0.0"
typing-extensions = ">=4.2.0,<5.0.0"
websockets = ">=11,<13"
[[package]]
name = "red-black-tree-mod"
version = "1.20"
@ -8451,6 +8474,38 @@ anyio = ">=3.4.0,<5"
[package.extras]
full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"]
[[package]]
name = "storage3"
version = "0.7.4"
description = "Supabase Storage client for Python."
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "storage3-0.7.4-py3-none-any.whl", hash = "sha256:0b8e8839b10a64063796ce55a41462c7ffd6842e0ada74f25f5dcf37e1d1bade"},
{file = "storage3-0.7.4.tar.gz", hash = "sha256:61fcbf836f566405981722abb7d56caa57025b261e7a316e73316701abf0c040"},
]
[package.dependencies]
httpx = ">=0.24,<0.28"
python-dateutil = ">=2.8.2,<3.0.0"
typing-extensions = ">=4.2.0,<5.0.0"
[[package]]
name = "strenum"
version = "0.4.15"
description = "An Enum that inherits from str."
optional = false
python-versions = "*"
files = [
{file = "StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659"},
{file = "StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff"},
]
[package.extras]
docs = ["myst-parser[linkify]", "sphinx", "sphinx-rtd-theme"]
release = ["twine"]
test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"]
[[package]]
name = "striprtf"
version = "0.0.26"
@ -8462,6 +8517,39 @@ files = [
{file = "striprtf-0.0.26.tar.gz", hash = "sha256:fdb2bba7ac440072d1c41eab50d8d74ae88f60a8b6575c6e2c7805dc462093aa"},
]
[[package]]
name = "supabase"
version = "2.4.1"
description = "Supabase client for Python."
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "supabase-2.4.1-py3-none-any.whl", hash = "sha256:8b95744ce4ad24245ec23c090f273dfc9c2d9a53e3a80186959903947dbe1ed6"},
{file = "supabase-2.4.1.tar.gz", hash = "sha256:a7dec0586f8931f378a45b2ffb28d8e37b3719f979c17f541b0156019144e645"},
]
[package.dependencies]
gotrue = ">=1.3,<3.0"
httpx = ">=0.24,<0.28"
postgrest = ">=0.10.8,<0.17.0"
realtime = ">=1.0.0,<2.0.0"
storage3 = ">=0.5.3,<0.8.0"
supafunc = ">=0.3.1,<0.5.0"
[[package]]
name = "supafunc"
version = "0.4.5"
description = "Library for Supabase Functions"
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "supafunc-0.4.5-py3-none-any.whl", hash = "sha256:2208045f8f5c797924666f6a332efad75ad368f8030b2e4ceb9d2bf63f329373"},
{file = "supafunc-0.4.5.tar.gz", hash = "sha256:a6466d78bdcaa58b7f0303793643103baae8106a87acd5d01e196179a9d0d024"},
]
[package.dependencies]
httpx = ">=0.24,<0.28"
[[package]]
name = "sympy"
version = "1.12"
@ -8845,13 +8933,13 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,
[[package]]
name = "transformers"
version = "4.39.2"
version = "4.39.3"
description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow"
optional = true
python-versions = ">=3.8.0"
files = [
{file = "transformers-4.39.2-py3-none-any.whl", hash = "sha256:8388a4ae1d91ade935f5c5b36dc47aa1a352b092c30595e3337b49a5f7e71b4e"},
{file = "transformers-4.39.2.tar.gz", hash = "sha256:be0c7392cb92ab48efab2656f1cfd1cbda33b2b8a2917a18bd1196707dbebe14"},
{file = "transformers-4.39.3-py3-none-any.whl", hash = "sha256:7838034a12cca3168247f9d2d1dba6724c9de3ae0f73a108258c6b8fc5912601"},
{file = "transformers-4.39.3.tar.gz", hash = "sha256:2586e5ff4150f122716fc40f5530e92871befc051848fbe82600969c535b762d"},
]
[package.dependencies]
@ -9001,13 +9089,13 @@ files = [
[[package]]
name = "types-pyasn1"
version = "0.5.0.20240301"
version = "0.6.0.20240402"
description = "Typing stubs for pyasn1"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-pyasn1-0.5.0.20240301.tar.gz", hash = "sha256:da328f5771d54a2016863270b281047f9cc38e39f65a297ba9f987d5de3403f1"},
{file = "types_pyasn1-0.5.0.20240301-py3-none-any.whl", hash = "sha256:d9989899184bbd6e2adf6f812c8f49c48197fceea251a6fb13666dae3203f80d"},
{file = "types-pyasn1-0.6.0.20240402.tar.gz", hash = "sha256:5d54dcb33f69dd269071ca098e923ac20c5f03c814631fa7f3ed9ee035a5da3a"},
{file = "types_pyasn1-0.6.0.20240402-py3-none-any.whl", hash = "sha256:848d01e7313c200acc035a8b3d377fe7b2aecbe77f2be49eb160a7f82835aaaf"},
]
[[package]]
@ -9088,13 +9176,13 @@ types-pyOpenSSL = "*"
[[package]]
name = "types-requests"
version = "2.31.0.20240311"
version = "2.31.0.20240402"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-requests-2.31.0.20240311.tar.gz", hash = "sha256:b1c1b66abfb7fa79aae09097a811c4aa97130eb8831c60e47aee4ca344731ca5"},
{file = "types_requests-2.31.0.20240311-py3-none-any.whl", hash = "sha256:47872893d65a38e282ee9f277a4ee50d1b28bd592040df7d1fdaffdf3779937d"},
{file = "types-requests-2.31.0.20240402.tar.gz", hash = "sha256:e5c09a202f8ae79cd6ffbbba2203b6c3775a83126283bb2a6abbc129abc02a12"},
{file = "types_requests-2.31.0.20240402-py3-none-any.whl", hash = "sha256:bd7eb7102168d4b5b489f15cdd9842b63ab7fe56aa82a0589fa595b94195acf4"},
]
[package.dependencies]
@ -9452,15 +9540,26 @@ test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)"
[[package]]
name = "validators"
version = "0.24.0"
version = "0.22.0"
description = "Python Data Validation for Humans™"
optional = false
python-versions = ">=3.8"
files = [
{file = "validators-0.24.0-py3-none-any.whl", hash = "sha256:4a99eb368747e60900bae947418eb21e230ff4ff5e7b7944b9308c456d86da32"},
{file = "validators-0.24.0.tar.gz", hash = "sha256:cd23defb36de42d14e7559cf0757f761bb46b10d9de2998e6ef805f769d859e3"},
{file = "validators-0.22.0-py3-none-any.whl", hash = "sha256:61cf7d4a62bbae559f2e54aed3b000cea9ff3e2fdbe463f51179b92c58c9585a"},
{file = "validators-0.22.0.tar.gz", hash = "sha256:77b2689b172eeeb600d9605ab86194641670cdb73b60afd577142a9397873370"},
]
[package.extras]
docs-offline = ["myst-parser (>=2.0.0)", "pypandoc-binary (>=1.11)", "sphinx (>=7.1.1)"]
docs-online = ["mkdocs (>=1.5.2)", "mkdocs-git-revision-date-localized-plugin (>=1.2.0)", "mkdocs-material (>=9.2.6)", "mkdocstrings[python] (>=0.22.0)", "pyaml (>=23.7.0)"]
hooks = ["pre-commit (>=3.3.3)"]
package = ["build (>=1.0.0)", "twine (>=4.0.2)"]
runner = ["tox (>=4.11.1)"]
sast = ["bandit[toml] (>=1.7.5)"]
testing = ["pytest (>=7.4.0)"]
tooling = ["black (>=23.7.0)", "pyright (>=1.1.325)", "ruff (>=0.0.287)"]
tooling-extras = ["pyaml (>=23.7.0)", "pypandoc-binary (>=1.11)", "pytest (>=7.4.0)"]
[[package]]
name = "vine"
version = "5.1.0"
@ -9572,22 +9671,24 @@ files = [
[[package]]
name = "weaviate-client"
version = "3.26.2"
version = "4.5.4"
description = "A python native Weaviate client"
optional = false
python-versions = ">=3.8"
files = [
{file = "weaviate-client-3.26.2.tar.gz", hash = "sha256:63ec70839b64909810a64aa7b3e5b85088462e93c7e2ed3c32ebefb702f36723"},
{file = "weaviate_client-3.26.2-py3-none-any.whl", hash = "sha256:ca43bfb9c06b8ae3fd938dc9158acd93d4cbf4622192e173333e1ff63cf97164"},
{file = "weaviate-client-4.5.4.tar.gz", hash = "sha256:fc53dc73cd53df453c5e6dc758e49a6a1549212d6670ddd013392107120692f8"},
{file = "weaviate_client-4.5.4-py3-none-any.whl", hash = "sha256:f6d3a6b759e5aa0d3350067490526ea38b9274ae4043b4a3ae0064c28d56883f"},
]
[package.dependencies]
authlib = ">=1.2.1,<2.0.0"
grpcio = ">=1.57.0,<2.0.0"
grpcio-health-checking = ">=1.57.0,<2.0.0"
grpcio-tools = ">=1.57.0,<2.0.0"
httpx = "0.27.0"
pydantic = ">=2.5.0,<3.0.0"
requests = ">=2.30.0,<3.0.0"
validators = ">=0.21.2,<1.0.0"
[package.extras]
grpc = ["grpcio (>=1.57.0,<2.0.0)", "grpcio-tools (>=1.57.0,<2.0.0)"]
validators = "0.22.0"
[[package]]
name = "websocket-client"
@ -10054,21 +10155,6 @@ files = [
idna = ">=2.0"
multidict = ">=4.0"
[[package]]
name = "zep-python"
version = "2.0.0rc5"
description = "Long-Term Memory for AI Assistants. This is the Python client for the Zep service."
optional = false
python-versions = "<4,>=3.9.0"
files = [
{file = "zep_python-2.0.0rc5-py3-none-any.whl", hash = "sha256:8b1b5c22c9e1ef439c9ef3d785347abf89b1243c7149e32025dd065cc022af40"},
{file = "zep_python-2.0.0rc5.tar.gz", hash = "sha256:e6ced8089760374dead948d6b4b88fceb09a356bf9a7fe182b4ceb6e828f0bb1"},
]
[package.dependencies]
httpx = ">=0.24.0,<0.29.0"
pydantic = ">=2.0.0"
[[package]]
name = "zipp"
version = "3.18.1"
@ -10163,4 +10249,4 @@ local = ["ctransformers", "llama-cpp-python", "sentence-transformers"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.12"
content-hash = "b1b40cf39cc544faf5ca6ad04a2be009df7d8d343d86c1e9d7c02d21b2cad431"
content-hash = "3eb1181a83884c7ba52a7d1c98dcff13a307452eaf8f4a148fc0778f97499dfd"

View file

@ -28,8 +28,7 @@ enable = true
[tool.poetry.dependencies]
python = ">=3.10,<3.12"
# langflow-base = { path = "./src/backend/base", develop = true }
langflow-base = "0.0.13"
langflow-base = { path = "./src/backend/base", develop = true }
beautifulsoup4 = "^4.12.2"
google-search-results = "^2.4.1"
google-api-python-client = "^2.118.0"
@ -50,6 +49,7 @@ faiss-cpu = "^1.7.4"
types-cachetools = "^5.3.0.5"
pinecone-client = "^3.0.3"
pymongo = "^4.6.0"
supabase = "^2.3.0"
certifi = "^2023.11.17"
psycopg = "^3.1.9"
psycopg-binary = "^3.1.9"
@ -58,9 +58,8 @@ celery = { extras = ["redis"], version = "^5.3.6", optional = true }
redis = { version = "^5.0.1", optional = true }
flower = { version = "^2.0.0", optional = true }
metaphor-python = "^0.1.11"
zep-python = "*"
pywin32 = { version = "^306", markers = "sys_platform == 'win32'" }
langfuse = "*"
langfuse = "^2.9.0"
metal-sdk = "^2.5.0"
markupsafe = "^2.1.3"
extract-msg = "^0.47.0"
@ -109,7 +108,7 @@ respx = "^0.20.2"
[tool.poetry.extras]
deploy = ["langchain-serve", "celery", "redis", "flower"]
deploy = ["celery", "redis", "flower"]
local = ["llama-cpp-python", "sentence-transformers", "ctransformers"]
all = ["deploy", "local"]

View file

@ -7,6 +7,129 @@ then
exit 1
fi
# Utility function to display an error message and exit
exit_with_message() {
echo "$1" >&2
exit 1
}
# Check if version argument is provided
if [ -z "$1" ]; then
exit_with_message "No argument supplied. Please provide the Poetry version to check."
fi
# Detect Operating System
OS="$(uname -s)"
case "$OS" in
Darwin)
OS="macOS"
;;
Linux)
OS="Linux"
;;
*)
exit_with_message "Unsupported operating system. This script supports macOS and Linux."
;;
esac
echo "Detected Operating System: $OS"
# Installation of pipx based on the detected OS
install_pipx() {
case $1 in
macOS)
# macOS installation using Homebrew
command -v brew >/dev/null 2>&1 || exit_with_message "Homebrew is not installed. Please install Homebrew first."
echo "Installing pipx using Homebrew..."
brew install pipx
pipx ensurepath
;;
Linux)
# Linux installation. Further checks are needed to distinguish between distributions
if grep -qEi "(ubuntu|debian)" /etc/*release; then
echo "Installing pipx on Ubuntu/Debian..."
sudo apt update
sudo apt install pipx -y
elif grep -qEi "fedora" /etc/*release; then
echo "Installing pipx on Fedora..."
sudo dnf install pipx -y
else
echo "Installing pipx using pip (other Linux distributions)..."
python3 -m pip install --user pipx
fi
pipx ensurepath
;;
*)
exit_with_message "Unsupported operating system for pipx installation."
;;
esac
}
# Function to fetch the latest version of pipx from GitHub and compare with the installed version
check_for_pipx_update() {
echo "Checking for updates to pipx..."
# Fetch the latest version of pipx, ensuring only to capture the numeric version without 'v' prefix.
local latest_version=$(curl -s https://api.github.com/repos/pypa/pipx/releases/latest | grep '"tag_name":' | sed -E 's/.*"tag_name": "v?([^"]+)".*/\1/')
# Extract the current installed version of pipx.
local current_version=$(pipx --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+')
if [[ "$latest_version" == "$current_version" ]]; then
echo "You have the latest version of pipx ($current_version)."
else
echo "A newer version of pipx ($latest_version) is available. You have $current_version. Do you want to update? (yes/no)"
read -r user_input
if [[ "$user_input" == "yes" ]]; then
echo "Updating pipx..."
case "$OS" in
macOS)
brew upgrade pipx
;;
Linux)
if grep -qEi "(ubuntu|debian)" /etc/*release; then
sudo apt update
sudo apt install --only-upgrade pipx -y
elif grep -qEi "fedora" /etc/*release; then
sudo dnf upgrade pipx -y
else
python3 -m pip install --user --upgrade pipx
fi
;;
*)
exit_with_message "Unsupported operating system for pipx update."
;;
esac
pipx ensurepath
echo "pipx updated to version $latest_version"
else
echo "Not updating pipx at this time."
fi
fi
}
# Now, modify the existing check to call check_for_pipx_update even if pipx is installed
if ! command -v pipx &> /dev/null; then
echo "Pipx is not installed. Installing..."
install_pipx "$OS"
echo "Pipx installed successfully."
else
echo "Pipx is already installed."
check_for_pipx_update
fi
echo "Checking Poetry installation..."
# Check if Poetry is installed
if ! command -v poetry &> /dev/null
then
echo "Poetry is not installed. Installing..."
# Also install python 3.10 and use
pipx install poetry --python python3.10 --fetch-missing-python
echo "Poetry installed successfully."
else
echo "Poetry is already installed."
fi
echo "Checking Poetry version..."
# Check Poetry version

View file

@ -0,0 +1,42 @@
import re
from pathlib import Path
def read_version_from_pyproject(file_path):
with open(file_path, "r") as file:
for line in file:
match = re.search(r'version = "(.*)"', line)
if match:
return match.group(1)
return None
def update_pyproject_dependency(pyproject_path, version):
pattern = re.compile(r'langflow-base = \{ path = "\./src/backend/base", develop = true \}')
replacement = f'langflow-base = "^{version}"'
with open(pyproject_path, "r") as file:
content = file.read()
content = pattern.sub(replacement, content)
with open(pyproject_path, "w") as file:
file.write(content)
if __name__ == "__main__":
# Backing up files
pyproject_path = Path(__file__).resolve().parent / "../pyproject.toml"
pyproject_path = pyproject_path.resolve()
with open(pyproject_path, "r") as original, open(pyproject_path.with_name("pyproject.toml.bak"), "w") as backup:
backup.write(original.read())
# Now backup poetry.lock
with open(pyproject_path.with_name("poetry.lock"), "r") as original, open(
pyproject_path.with_name("poetry.lock.bak"), "w"
) as backup:
backup.write(original.read())
# Reading version and updating pyproject.toml
langflow_base_path = Path(__file__).resolve().parent / "../src/backend/base/pyproject.toml"
version = read_version_from_pyproject(langflow_base_path)
if version:
update_pyproject_dependency(pyproject_path, version)
else:
print("Error: Version not found.")

View file

@ -149,13 +149,13 @@ def run(
Run the Langflow.
"""
configure(log_level=log_level, log_file=log_file)
set_var_for_macos_issue()
# override env variables with .env file
if env_file:
load_dotenv(env_file, override=True)
configure(log_level=log_level, log_file=log_file)
update_settings(
config,
dev=dev,
@ -246,10 +246,10 @@ def get_free_port(port):
def print_banner(host, port):
# console = Console()
from langflow.version import __version__
word = "Langflow"
colors = ["#3300cc"]
colors = ["#6e42f5"]
styled_word = ""
@ -259,7 +259,7 @@ def print_banner(host, port):
# Title with emojis and gradient text
title = (
f"[bold]Welcome to :chains: {styled_word} [/bold]\n\n"
f"[bold]Welcome to :chains: {styled_word} v{__version__}[/bold]\n"
f"Access [link=http://{host}:{port}]http://{host}:{port}[/link]"
)
info_text = (
@ -307,7 +307,7 @@ def run_langflow(host, port, log_level, options, app):
def superuser(
username: str = typer.Option(..., prompt=True, help="Username for the superuser."),
password: str = typer.Option(..., prompt=True, hide_input=True, help="Password for the superuser."),
log_level: str = typer.Option("critical", help="Logging level.", envvar="LANGFLOW_LOG_LEVEL"),
log_level: str = typer.Option("error", help="Logging level.", envvar="LANGFLOW_LOG_LEVEL"),
):
"""
Create a superuser.

View file

@ -1,4 +1,5 @@
import os
import warnings
from logging.config import fileConfig
from alembic import context
@ -82,11 +83,12 @@ def run_migrations_online() -> None:
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata, render_as_batch=True)
with context.begin_transaction():
context.run_migrations()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata, render_as_batch=True)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():

View file

@ -58,9 +58,7 @@ class APIRequest(CustomComponent):
data = body if body else None
payload = json.dumps(data)
try:
response = await client.request(
method, url, headers=headers, content=payload, timeout=timeout
)
response = await client.request(method, url, headers=headers, content=payload, timeout=timeout)
try:
result = response.json()
except Exception:
@ -117,10 +115,7 @@ class APIRequest(CustomComponent):
bodies += [None] * (len(urls) - len(bodies)) # type: ignore
async with httpx.AsyncClient() as client:
results = await asyncio.gather(
*[
self.make_request(client, method, u, headers, rec, timeout)
for u, rec in zip(urls, bodies)
]
*[self.make_request(client, method, u, headers, rec, timeout) for u, rec in zip(urls, bodies)]
)
self.status = results
return results

View file

@ -9,7 +9,7 @@ from langchain_core.documents import Document
from langflow.interface.custom.custom_component import CustomComponent
from langflow.schema import Record
from langflow.field_typing import Text
from langflow.utils.util import build_loader_repr_from_records, unescape_string
from langflow.utils.util import unescape_string
class SplitTextComponent(CustomComponent):
@ -54,7 +54,6 @@ class SplitTextComponent(CustomComponent):
chunk_overlap: Optional[int] = 200,
recursive: bool = False,
) -> list[Record]:
separators = [unescape_string(x) for x in separators]
# Make sure chunk_size and chunk_overlap are ints

View file

@ -5,6 +5,7 @@ from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class AnthropicLLM(LCModelComponent):

View file

@ -5,6 +5,7 @@ from langchain_openai import AzureChatOpenAI
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class AzureChatOpenAIComponent(LCModelComponent):

View file

@ -5,6 +5,7 @@ from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class QianfanChatEndpointComponent(LCModelComponent):

View file

@ -2,7 +2,7 @@ from typing import Optional
from langchain_community.chat_models.cohere import ChatCohere
from pydantic.v1 import SecretStr
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent

View file

@ -2,7 +2,7 @@ from typing import Optional
from langchain_google_genai import ChatGoogleGenerativeAI
from pydantic.v1 import SecretStr
from langflow.field_typing import Text, RangeSpec
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent

View file

@ -2,7 +2,7 @@ from typing import Optional
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent

View file

@ -4,6 +4,7 @@ from langchain_core.messages.base import BaseMessage
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class ChatVertexAIComponent(LCModelComponent):

View file

@ -92,6 +92,11 @@ class AstraDBSearchComponent(LCVectorStoreComponent):
"info": "Optional dictionary defining the indexing policy for the collection.",
"advanced": True,
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build(
@ -102,6 +107,7 @@ class AstraDBSearchComponent(LCVectorStoreComponent):
token: str,
api_endpoint: str,
search_type: str = "Similarity",
number_of_results: int = 4,
namespace: Optional[str] = None,
metric: Optional[str] = None,
batch_size: Optional[int] = None,
@ -131,4 +137,12 @@ class AstraDBSearchComponent(LCVectorStoreComponent):
metadata_indexing_exclude=metadata_indexing_exclude,
collection_indexing_policy=collection_indexing_policy,
)
return self.search_with_vector_store(input_value, search_type, vector_store)
try:
return self.search_with_vector_store(input_value, search_type, vector_store, k=number_of_results)
except KeyError as e:
if "content" in str(e):
raise ValueError(
"You should ingest data through Langflow (or LangChain) to query it in Langflow. Your collection does not contain a field name 'content'."
)
else:
raise e

View file

@ -48,6 +48,11 @@ class ChromaSearchComponent(LCVectorStoreComponent):
"display_name": "Server SSL Enabled",
"advanced": True,
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build(
@ -57,6 +62,7 @@ class ChromaSearchComponent(LCVectorStoreComponent):
collection_name: str,
embedding: Embeddings,
chroma_server_ssl_enabled: bool,
number_of_results: int = 4,
index_directory: Optional[str] = None,
chroma_server_cors_allow_origins: Optional[str] = None,
chroma_server_host: Optional[str] = None,
@ -102,4 +108,4 @@ class ChromaSearchComponent(LCVectorStoreComponent):
client_settings=chroma_settings,
)
return self.search_with_vector_store(input_value, search_type, vector_store)
return self.search_with_vector_store(input_value, search_type, vector_store, k=number_of_results)

View file

@ -21,6 +21,11 @@ class FAISSSearchComponent(LCVectorStoreComponent):
},
"input_value": {"display_name": "Input"},
"index_name": {"display_name": "Index Name"},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build(
@ -28,6 +33,7 @@ class FAISSSearchComponent(LCVectorStoreComponent):
input_value: Text,
embedding: Embeddings,
folder_path: str,
number_of_results: int = 4,
index_name: str = "langflow_index",
) -> List[Record]:
if not folder_path:
@ -38,5 +44,5 @@ class FAISSSearchComponent(LCVectorStoreComponent):
raise ValueError("Failed to load the FAISS index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type="similarity"
vector_store=vector_store, input_value=input_value, search_type="similarity", k=number_of_results
)

View file

@ -23,6 +23,11 @@ class MongoDBAtlasSearchComponent(LCVectorStoreComponent):
"index_name": {"display_name": "Index Name"},
"mongodb_atlas_cluster_uri": {"display_name": "MongoDB Atlas Cluster URI"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
@ -30,6 +35,7 @@ class MongoDBAtlasSearchComponent(LCVectorStoreComponent):
input_value: Text,
search_type: str,
embedding: Embeddings,
number_of_results: int = 4,
collection_name: str = "",
db_name: str = "",
index_name: str = "",
@ -47,5 +53,5 @@ class MongoDBAtlasSearchComponent(LCVectorStoreComponent):
if not vector_store:
raise ValueError("Failed to create MongoDB Atlas Vector Store")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
vector_store=vector_store, input_value=input_value, search_type=search_type, k=number_of_results
)

View file

@ -38,6 +38,11 @@ class PineconeSearchComponent(PineconeComponent, LCVectorStoreComponent):
"default": 1,
"advanced": True,
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
@ -46,6 +51,7 @@ class PineconeSearchComponent(PineconeComponent, LCVectorStoreComponent):
embedding: Embeddings,
pinecone_env: str,
text_key: str = "text",
number_of_results: int = 4,
pool_threads: int = 4,
index_name: Optional[str] = None,
pinecone_api_key: Optional[str] = None,
@ -66,5 +72,5 @@ class PineconeSearchComponent(PineconeComponent, LCVectorStoreComponent):
raise ValueError("Failed to load the Pinecone index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
vector_store=vector_store, input_value=input_value, search_type=search_type, k=number_of_results
)

View file

@ -41,6 +41,11 @@ class QdrantSearchComponent(QdrantComponent, LCVectorStoreComponent):
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"timeout": {"display_name": "Timeout", "advanced": True},
"url": {"display_name": "URL", "advanced": True},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
@ -48,6 +53,7 @@ class QdrantSearchComponent(QdrantComponent, LCVectorStoreComponent):
input_value: Text,
embedding: Embeddings,
collection_name: str,
number_of_results: int = 4,
search_type: str = "similarity",
api_key: Optional[str] = None,
content_payload_key: str = "page_content",
@ -88,5 +94,5 @@ class QdrantSearchComponent(QdrantComponent, LCVectorStoreComponent):
raise ValueError("Failed to load the Qdrant index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
vector_store=vector_store, input_value=input_value, search_type=search_type, k=number_of_results
)

View file

@ -39,6 +39,11 @@ class RedisSearchComponent(RedisComponent, LCVectorStoreComponent):
"advanced": False,
},
"redis_index_name": {"display_name": "Redis Index", "advanced": False},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
@ -48,6 +53,7 @@ class RedisSearchComponent(RedisComponent, LCVectorStoreComponent):
embedding: Embeddings,
redis_server_url: str,
redis_index_name: str,
number_of_results: int = 4,
schema: Optional[str] = None,
) -> List[Record]:
"""
@ -72,5 +78,5 @@ class RedisSearchComponent(RedisComponent, LCVectorStoreComponent):
raise ValueError("Failed to load the Redis index.")
return self.search_with_vector_store(
input_value=input_value, search_type=search_type, vector_store=vector_store
input_value=input_value, search_type=search_type, vector_store=vector_store, k=number_of_results
)

View file

@ -26,6 +26,11 @@ class SupabaseSearchComponent(LCVectorStoreComponent):
"supabase_service_key": {"display_name": "Supabase Service Key"},
"supabase_url": {"display_name": "Supabase URL"},
"table_name": {"display_name": "Table Name", "advanced": True},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build(
@ -33,6 +38,7 @@ class SupabaseSearchComponent(LCVectorStoreComponent):
input_value: Text,
search_type: str,
embedding: Embeddings,
number_of_results: int = 4,
query_name: str = "",
supabase_service_key: str = "",
supabase_url: str = "",
@ -45,4 +51,4 @@ class SupabaseSearchComponent(LCVectorStoreComponent):
table_name=table_name,
query_name=query_name,
)
return self.search_with_vector_store(input_value, search_type, vector_store)
return self.search_with_vector_store(input_value, search_type, vector_store, k=number_of_results)

View file

@ -34,6 +34,11 @@ class VectaraSearchComponent(VectaraComponent, LCVectorStoreComponent):
"display_name": "Files Url",
"info": "Make vectara object using url of files (optional)",
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
@ -43,6 +48,7 @@ class VectaraSearchComponent(VectaraComponent, LCVectorStoreComponent):
vectara_customer_id: str,
vectara_corpus_id: str,
vectara_api_key: str,
number_of_results: int = 4,
) -> List[Record]:
source = "Langflow"
vector_store = Vectara(
@ -56,5 +62,5 @@ class VectaraSearchComponent(VectaraComponent, LCVectorStoreComponent):
raise ValueError("Failed to create Vectara Vector Store")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
vector_store=vector_store, input_value=input_value, search_type=search_type, k=number_of_results
)

View file

@ -49,7 +49,11 @@ class WeaviateSearchVectorStore(WeaviateVectorStoreComponent, LCVectorStoreCompo
"field_type": "bool",
"advanced": True,
},
"code": {"show": False},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
@ -57,6 +61,7 @@ class WeaviateSearchVectorStore(WeaviateVectorStoreComponent, LCVectorStoreCompo
input_value: Text,
search_type: str,
url: str,
number_of_results: int = 4,
search_by_text: bool = False,
api_key: Optional[str] = None,
index_name: Optional[str] = None,
@ -77,5 +82,5 @@ class WeaviateSearchVectorStore(WeaviateVectorStoreComponent, LCVectorStoreCompo
raise ValueError("Failed to load the Weaviate index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type
vector_store=vector_store, input_value=input_value, search_type=search_type, k=number_of_results
)

View file

@ -33,6 +33,11 @@ class PGVectorSearchComponent(PGVectorComponent, LCVectorStoreComponent):
},
"collection_name": {"display_name": "Table", "advanced": False},
"input_value": {"display_name": "Input"},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
@ -42,6 +47,7 @@ class PGVectorSearchComponent(PGVectorComponent, LCVectorStoreComponent):
search_type: str,
pg_server_url: str,
collection_name: str,
number_of_results: int = 4,
) -> List[Record]:
"""
Builds the Vector Store or BaseRetriever object.
@ -64,5 +70,5 @@ class PGVectorSearchComponent(PGVectorComponent, LCVectorStoreComponent):
except Exception as e:
raise RuntimeError(f"Failed to build PGVector: {e}")
return self.search_with_vector_store(
input_value=input_value, search_type=search_type, vector_store=vector_store
input_value=input_value, search_type=search_type, vector_store=vector_store, k=number_of_results
)

View file

@ -19,6 +19,8 @@ class LCVectorStoreComponent(CustomComponent):
input_value: Text,
search_type: str,
vector_store: Union[VectorStore, BaseRetriever],
k=10,
**kwargs,
) -> List[Record]:
"""
Search for records in the vector store based on the input value and search type.
@ -37,7 +39,7 @@ class LCVectorStoreComponent(CustomComponent):
docs: List[Document] = []
if input_value and isinstance(input_value, str) and hasattr(vector_store, "search"):
docs = vector_store.search(query=input_value, search_type=search_type.lower())
docs = vector_store.search(query=input_value, search_type=search_type.lower(), k=k, **kwargs)
else:
raise ValueError("Invalid inputs provided.")
records = docs_to_records(docs)

View file

@ -1,7 +1,7 @@
import asyncio
from collections import defaultdict, deque
from itertools import chain
from typing import TYPE_CHECKING, Callable, Coroutine, Dict, Generator, List, Literal, Optional, Type, Union
from typing import TYPE_CHECKING, Callable, Coroutine, Dict, Generator, List, Optional, Type, Union
from loguru import logger

View file

@ -192,6 +192,7 @@ def delete_start_projects(session):
).all()
for flow in flows:
session.delete(flow)
session.commit()
def create_or_update_starter_projects():

View file

@ -982,7 +982,7 @@
"zoom": 0.47344308394045925
}
},
"description": "",
"description": "This flow can be used to create a blog post following instructions from the user, using two other blogs as reference.",
"name": "Blog Writer",
"last_tested_version": "1.0.0a0",
"is_component": false

View file

@ -3,7 +3,7 @@
"data": {
"nodes": [
{
"id": "Prompt-6qkyO",
"id": "Prompt-amqBu",
"type": "genericNode",
"position": {
"x": 2191.5837146441663,
@ -109,7 +109,7 @@
"beta": false,
"error": null
},
"id": "Prompt-6qkyO",
"id": "Prompt-amqBu",
"description": "Create a prompt template with dynamic variables.",
"display_name": "Prompt"
},
@ -123,7 +123,7 @@
"dragging": false
},
{
"id": "Prompt-8M7lZ",
"id": "Prompt-gTNiz",
"type": "genericNode",
"position": {
"x": 3731.0813766902447,
@ -229,16 +229,17 @@
"beta": false,
"error": null
},
"id": "Prompt-8M7lZ",
"id": "Prompt-gTNiz",
"description": "Create a prompt template with dynamic variables.",
"display_name": "Prompt"
},
"selected": false,
"width": 384,
"height": 385
"height": 385,
"dragging": false
},
{
"id": "ChatOutput-TKkOi",
"id": "ChatOutput-EJkG3",
"type": "genericNode",
"position": {
"x": 3722.1747844849388,
@ -426,7 +427,7 @@
"field_order": [],
"beta": false
},
"id": "ChatOutput-TKkOi"
"id": "ChatOutput-EJkG3"
},
"selected": false,
"width": 384,
@ -434,7 +435,7 @@
"dragging": false
},
{
"id": "ChatOutput-WOZJj",
"id": "ChatOutput-DNmvg",
"type": "genericNode",
"position": {
"x": 5077.71285886074,
@ -622,14 +623,14 @@
"field_order": [],
"beta": false
},
"id": "ChatOutput-WOZJj"
"id": "ChatOutput-DNmvg"
},
"selected": false,
"width": 384,
"height": 385
},
{
"id": "TextInput-fd38z",
"id": "TextInput-sptaH",
"type": "genericNode",
"position": {
"x": 1700.5624822024752,
@ -725,9 +726,9 @@
"field_order": [],
"beta": false
},
"id": "TextInput-fd38z"
"id": "TextInput-sptaH"
},
"selected": true,
"selected": false,
"width": 384,
"height": 290,
"positionAbsolute": {
@ -737,11 +738,11 @@
"dragging": false
},
{
"id": "TextOutput-LH9pW",
"id": "TextOutput-2MS4a",
"type": "genericNode",
"position": {
"x": 2429.4897030584134,
"y": 552.2482060219679
"x": 2917.216113690115,
"y": 513.0058511435552
},
"data": {
"type": "TextOutput",
@ -833,19 +834,19 @@
"field_order": [],
"beta": false
},
"id": "TextOutput-LH9pW"
"id": "TextOutput-2MS4a"
},
"selected": false,
"width": 384,
"height": 290,
"positionAbsolute": {
"x": 2429.4897030584134,
"y": 552.2482060219679
"x": 2917.216113690115,
"y": 513.0058511435552
},
"dragging": false
},
{
"id": "OpenAIModel-WAiN2",
"id": "OpenAIModel-uYXZJ",
"type": "genericNode",
"position": {
"x": 2925.784767523062,
@ -1111,7 +1112,7 @@
],
"beta": false
},
"id": "OpenAIModel-WAiN2"
"id": "OpenAIModel-uYXZJ"
},
"selected": false,
"width": 384,
@ -1123,7 +1124,7 @@
"dragging": false
},
{
"id": "TextOutput-ZUN0s",
"id": "TextOutput-MUDOR",
"type": "genericNode",
"position": {
"x": 4446.064323520379,
@ -1219,7 +1220,7 @@
"field_order": [],
"beta": false
},
"id": "TextOutput-ZUN0s"
"id": "TextOutput-MUDOR"
},
"selected": false,
"width": 384,
@ -1231,7 +1232,7 @@
}
},
{
"id": "OpenAIModel-4b8eN",
"id": "OpenAIModel-XawYB",
"type": "genericNode",
"position": {
"x": 4500.152018344182,
@ -1497,7 +1498,7 @@
],
"beta": false
},
"id": "OpenAIModel-4b8eN"
"id": "OpenAIModel-XawYB"
},
"selected": false,
"width": 384,
@ -1511,14 +1512,14 @@
],
"edges": [
{
"source": "TextInput-fd38z",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-fd38zœ}",
"target": "Prompt-6qkyO",
"targetHandle": "{œfieldNameœ:œdocumentœ,œidœ:œPrompt-6qkyOœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"source": "TextInput-sptaH",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-sptaHœ}",
"target": "Prompt-amqBu",
"targetHandle": "{œfieldNameœ:œdocumentœ,œidœ:œPrompt-amqBuœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "document",
"id": "Prompt-6qkyO",
"id": "Prompt-amqBu",
"inputTypes": [
"Document",
"BaseOutputParser",
@ -1534,24 +1535,24 @@
"object"
],
"dataType": "TextInput",
"id": "TextInput-fd38z"
"id": "TextInput-sptaH"
}
},
"style": {
"stroke": "#555"
},
"className": "stroke-foreground stroke-connection",
"id": "reactflow__edge-TextInput-fd38z{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-fd38zœ}-Prompt-6qkyO{œfieldNameœ:œdocumentœ,œidœ:œPrompt-6qkyOœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-TextInput-sptaH{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œTextInputœ,œidœ:œTextInput-sptaHœ}-Prompt-amqBu{œfieldNameœ:œdocumentœ,œidœ:œPrompt-amqBuœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-6qkyO",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-6qkyOœ}",
"target": "TextOutput-LH9pW",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-LH9pWœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}",
"source": "Prompt-amqBu",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}",
"target": "TextOutput-2MS4a",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-2MS4aœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "TextOutput-LH9pW",
"id": "TextOutput-2MS4a",
"inputTypes": [
"Record",
"Text"
@ -1565,24 +1566,24 @@
"Text"
],
"dataType": "Prompt",
"id": "Prompt-6qkyO"
"id": "Prompt-amqBu"
}
},
"style": {
"stroke": "#555"
},
"className": "stroke-foreground stroke-connection",
"id": "reactflow__edge-Prompt-6qkyO{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-6qkyOœ}-TextOutput-LH9pW{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-LH9pWœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}"
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-amqBu{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}-TextOutput-2MS4a{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-2MS4aœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-6qkyO",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-6qkyOœ}",
"target": "OpenAIModel-WAiN2",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-WAiN2œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "Prompt-amqBu",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}",
"target": "OpenAIModel-uYXZJ",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-uYXZJœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-WAiN2",
"id": "OpenAIModel-uYXZJ",
"inputTypes": [
"Text"
],
@ -1595,24 +1596,24 @@
"Text"
],
"dataType": "Prompt",
"id": "Prompt-6qkyO"
"id": "Prompt-amqBu"
}
},
"style": {
"stroke": "#555"
},
"className": "stroke-foreground stroke-connection",
"id": "reactflow__edge-Prompt-6qkyO{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-6qkyOœ}-OpenAIModel-WAiN2{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-WAiN2œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-amqBu{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-amqBuœ}-OpenAIModel-uYXZJ{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-uYXZJœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "OpenAIModel-WAiN2",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-WAiN2œ}",
"target": "Prompt-8M7lZ",
"targetHandle": "{œfieldNameœ:œsummaryœ,œidœ:œPrompt-8M7lZœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-uYXZJ",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}",
"target": "Prompt-gTNiz",
"targetHandle": "{œfieldNameœ:œsummaryœ,œidœ:œPrompt-gTNizœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "summary",
"id": "Prompt-8M7lZ",
"id": "Prompt-gTNiz",
"inputTypes": [
"Document",
"BaseOutputParser",
@ -1628,24 +1629,24 @@
"object"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-WAiN2"
"id": "OpenAIModel-uYXZJ"
}
},
"style": {
"stroke": "#555"
},
"className": "stroke-foreground stroke-connection",
"id": "reactflow__edge-OpenAIModel-WAiN2{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-WAiN2œ}-Prompt-8M7lZ{œfieldNameœ:œsummaryœ,œidœ:œPrompt-8M7lZœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-Prompt-gTNiz{œfieldNameœ:œsummaryœ,œidœ:œPrompt-gTNizœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "OpenAIModel-WAiN2",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-WAiN2œ}",
"target": "ChatOutput-TKkOi",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-TKkOiœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-uYXZJ",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}",
"target": "ChatOutput-EJkG3",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-EJkG3œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-TKkOi",
"id": "ChatOutput-EJkG3",
"inputTypes": [
"Text"
],
@ -1658,24 +1659,24 @@
"object"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-WAiN2"
"id": "OpenAIModel-uYXZJ"
}
},
"style": {
"stroke": "#555"
},
"className": "stroke-foreground stroke-connection",
"id": "reactflow__edge-OpenAIModel-WAiN2{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-WAiN2œ}-ChatOutput-TKkOi{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-TKkOiœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-ChatOutput-EJkG3{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-EJkG3œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-8M7lZ",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-8M7lZœ}",
"target": "TextOutput-ZUN0s",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-ZUN0sœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}",
"source": "Prompt-gTNiz",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}",
"target": "TextOutput-MUDOR",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-MUDORœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "TextOutput-ZUN0s",
"id": "TextOutput-MUDOR",
"inputTypes": [
"Record",
"Text"
@ -1689,24 +1690,24 @@
"Text"
],
"dataType": "Prompt",
"id": "Prompt-8M7lZ"
"id": "Prompt-gTNiz"
}
},
"style": {
"stroke": "#555"
},
"className": "stroke-foreground stroke-connection",
"id": "reactflow__edge-Prompt-8M7lZ{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-8M7lZœ}-TextOutput-ZUN0s{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-ZUN0sœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}"
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-gTNiz{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}-TextOutput-MUDOR{œfieldNameœ:œinput_valueœ,œidœ:œTextOutput-MUDORœ,œinputTypesœ:[œRecordœ,œTextœ],œtypeœ:œstrœ}"
},
{
"source": "Prompt-8M7lZ",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-8M7lZœ}",
"target": "OpenAIModel-4b8eN",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-4b8eNœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "Prompt-gTNiz",
"sourceHandle": "{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}",
"target": "OpenAIModel-XawYB",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-XawYBœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-4b8eN",
"id": "OpenAIModel-XawYB",
"inputTypes": [
"Text"
],
@ -1719,24 +1720,24 @@
"Text"
],
"dataType": "Prompt",
"id": "Prompt-8M7lZ"
"id": "Prompt-gTNiz"
}
},
"style": {
"stroke": "#555"
},
"className": "stroke-foreground stroke-connection",
"id": "reactflow__edge-Prompt-8M7lZ{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-8M7lZœ}-OpenAIModel-4b8eN{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-4b8eNœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-Prompt-gTNiz{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œPromptœ,œidœ:œPrompt-gTNizœ}-OpenAIModel-XawYB{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-XawYBœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
},
{
"source": "OpenAIModel-4b8eN",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-4b8eNœ}",
"target": "ChatOutput-WOZJj",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-WOZJjœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-XawYB",
"sourceHandle": "{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-XawYBœ}",
"target": "ChatOutput-DNmvg",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-DNmvgœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"data": {
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-WOZJj",
"id": "ChatOutput-DNmvg",
"inputTypes": [
"Text"
],
@ -1749,20 +1750,20 @@
"object"
],
"dataType": "OpenAIModel",
"id": "OpenAIModel-4b8eN"
"id": "OpenAIModel-XawYB"
}
},
"style": {
"stroke": "#555"
},
"className": "stroke-foreground stroke-connection",
"id": "reactflow__edge-OpenAIModel-4b8eN{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-4b8eNœ}-ChatOutput-WOZJj{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-WOZJjœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
"className": "stroke-gray-900 stroke-connection",
"id": "reactflow__edge-OpenAIModel-XawYB{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-XawYBœ}-ChatOutput-DNmvg{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-DNmvgœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}"
}
],
"viewport": {
"x": -269.16751160112597,
"y": 42.28236292358156,
"zoom": 0.30778611191960875
"x": -383.7251879618552,
"y": 69.19813933800037,
"zoom": 0.3105753483695743
}
},
"description": "The Prompt Chaining flow chains prompts with LLMs, refining outputs through iterative stages.",

View file

@ -1,3 +1,4 @@
import warnings
from typing import Callable, Dict, List, Optional
from langchain.agents import agent_toolkits
@ -29,13 +30,15 @@ class ToolkitCreator(LangChainTypeCreator):
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
settings_service = get_settings_service()
self.type_dict = {
toolkit_name: import_class(f"langchain.agents.agent_toolkits.{toolkit_name}")
# if toolkit_name is not lower case it is a class
for toolkit_name in agent_toolkits.__all__
if not toolkit_name.islower() and toolkit_name in settings_service.settings.TOOLKITS
}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
settings_service = get_settings_service()
self.type_dict = {
toolkit_name: import_class(f"langchain.agents.agent_toolkits.{toolkit_name}")
# if toolkit_name is not lower case it is a class
for toolkit_name in agent_toolkits.__all__
if not toolkit_name.islower() and toolkit_name in settings_service.settings.TOOLKITS
}
return self.type_dict

View file

@ -1,6 +1,7 @@
from typing import Dict, List, Optional
from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS
from langflow.interface.tools.util import get_tool_params

View file

@ -9,6 +9,7 @@ from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from loguru import logger
from langflow.api import router
from langflow.initial_setup.setup import create_or_update_starter_projects
@ -106,6 +107,7 @@ def get_static_files_dir():
def setup_app(static_files_dir: Optional[Path] = None, backend_only: bool = False) -> FastAPI:
"""Setup the FastAPI app."""
# get the directory of the current file
logger.info(f"Setting up app with static files directory {static_files_dir}")
if not static_files_dir:
static_files_dir = get_static_files_dir()
@ -128,7 +130,7 @@ if __name__ == "__main__":
host="127.0.0.1",
port=7860,
workers=get_number_of_workers(),
log_level="debug",
log_level="error",
reload=True,
loop="asyncio",
)

View file

@ -1,3 +1,5 @@
import os
from gunicorn.app.base import BaseApplication # type: ignore
from uvicorn.workers import UvicornWorker
@ -11,6 +13,7 @@ class LangflowApplication(BaseApplication):
self.options = options or {}
self.options["worker_class"] = "langflow.server.LangflowUvicornWorker"
self.options["loglevel"] = os.getenv("LANGFLOW_LOG_LEVEL", "error").lower()
self.application = app
super().__init__()

View file

@ -1,3 +1,4 @@
from datetime import datetime
import time
from pathlib import Path
from typing import TYPE_CHECKING
@ -124,10 +125,16 @@ class DatabaseService(Service):
# if not self.script_location.exists(): # this is not the correct way to check if alembic has been initialized
# We need to check if the alembic_version table exists
# if not, we need to initialize alembic
alembic_cfg = Config()
# stdout should be something like sys.stdout
# which is a buffer
# I don't want to output anything
# subprocess.DEVNULL is an int
buffer = open(self.script_location / "alembic.log", "w")
alembic_cfg = Config(stdout=buffer)
# alembic_cfg.attributes["connection"] = session
alembic_cfg.set_main_option("script_location", str(self.script_location))
alembic_cfg.set_main_option("sqlalchemy.url", self.database_url)
should_initialize_alembic = False
with Session(self.engine) as session:
# If the table does not exist it throws an error
@ -150,6 +157,7 @@ class DatabaseService(Service):
logger.info(f"Running DB migrations in {self.script_location}")
try:
buffer.write(f"{datetime.now().isoformat()}: Checking migrations\n")
command.check(alembic_cfg)
except Exception as exc:
if isinstance(exc, (util.exc.CommandError, util.exc.AutogenerateDiffsDetected)):
@ -157,6 +165,7 @@ class DatabaseService(Service):
time.sleep(3)
try:
buffer.write(f"{datetime.now().isoformat()}: Checking migrations\n")
command.check(alembic_cfg)
except util.exc.AutogenerateDiffsDetected as exc:
logger.error(f"AutogenerateDiffsDetected: {exc}")

View file

@ -40,9 +40,9 @@ class AuthSettings(BaseSettings):
"""The Secure attribute of the refresh token cookie."""
REFRESH_HTTPONLY: bool = True
"""The HttpOnly attribute of the refresh token cookie."""
ACCESS_SAME_SITE: Literal["lax", "strict", "none"] = "none"
ACCESS_SAME_SITE: Literal["lax", "strict", "none"] = "lax"
"""The SameSite attribute of the access token cookie."""
ACCESS_SECURE: bool = True
ACCESS_SECURE: bool = False
"""The Secure attribute of the access token cookie."""
ACCESS_HTTPONLY: bool = False
"""The HttpOnly attribute of the access token cookie."""

View file

@ -94,7 +94,8 @@ class FrontendNode(BaseModel):
def process_base_classes(self, base_classes: List[str]) -> List[str]:
"""Removes unwanted base classes from the list of base classes."""
return list(set(base_classes))
sorted_base_classes = sorted(list(set(base_classes)), key=lambda x: x.lower())
return sorted_base_classes
@field_serializer("display_name")
def process_display_name(self, display_name: str) -> str:

View file

@ -28,7 +28,7 @@ def configure(log_level: Optional[str] = None, log_file: Optional[Path] = None):
if os.getenv("LANGFLOW_LOG_LEVEL", "").upper() in VALID_LOG_LEVELS and log_level is None:
log_level = os.getenv("LANGFLOW_LOG_LEVEL")
if log_level is None:
log_level = "INFO"
log_level = "ERROR"
# Human-readable
log_format = (
"<green>{time:YYYY-MM-DD HH:mm:ss}</green> - <level>"

View file

@ -2050,13 +2050,13 @@ files = [
[[package]]
name = "importlib-metadata"
version = "6.11.0"
version = "7.0.0"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"},
{file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"},
{file = "importlib_metadata-7.0.0-py3-none-any.whl", hash = "sha256:d97503976bb81f40a193d41ee6570868479c69d5068651eb039c40d850c59d67"},
{file = "importlib_metadata-7.0.0.tar.gz", hash = "sha256:7fc841f8b8332803464e5dc1c63a2e59121f46ca186c0e2e182e80bf8c1319f7"},
]
[package.dependencies]
@ -2487,13 +2487,13 @@ extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.
[[package]]
name = "langchain-core"
version = "0.1.37"
version = "0.1.38"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langchain_core-0.1.37-py3-none-any.whl", hash = "sha256:63c6aecb0f2eb1a21f8e944da622748cfeafa2cc3d94c0182ffa8038bd00fe0b"},
{file = "langchain_core-0.1.37.tar.gz", hash = "sha256:3db7008796e25aea90f98c2159dbf29bf1fd296bdcb78dc2d8183a92fdde4433"},
{file = "langchain_core-0.1.38-py3-none-any.whl", hash = "sha256:d881b2754254cb4bdb0d5bb56e5c138d032b6e75e5cb21f151b01224b322e02b"},
{file = "langchain_core-0.1.38.tar.gz", hash = "sha256:ee8da6d061c06cce7dc22fec224b6ecbc3a8de106d6dd9f409c7fe448ea41861"},
]
[package.dependencies]
@ -3071,7 +3071,6 @@ files = [
{file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"},
{file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"},
{file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"},
{file = "msgpack-1.0.8-py3-none-any.whl", hash = "sha256:24f727df1e20b9876fa6e95f840a2a2651e34c0ad147676356f4bf5fbb0206ca"},
{file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"},
]
@ -3372,57 +3371,42 @@ sympy = "*"
[[package]]
name = "opentelemetry-api"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_api-1.23.0-py3-none-any.whl", hash = "sha256:cc03ea4025353048aadb9c64919099663664672ea1c6be6ddd8fee8e4cd5e774"},
{file = "opentelemetry_api-1.23.0.tar.gz", hash = "sha256:14a766548c8dd2eb4dfc349739eb4c3893712a0daa996e5dbf945f9da665da9d"},
{file = "opentelemetry_api-1.24.0-py3-none-any.whl", hash = "sha256:0f2c363d98d10d1ce93330015ca7fd3a65f60be64e05e30f557c61de52c80ca2"},
{file = "opentelemetry_api-1.24.0.tar.gz", hash = "sha256:42719f10ce7b5a9a73b10a4baf620574fb8ad495a9cbe5c18d76b75d8689c67e"},
]
[package.dependencies]
deprecated = ">=1.2.6"
importlib-metadata = ">=6.0,<7.0"
[[package]]
name = "opentelemetry-exporter-otlp"
version = "1.23.0"
description = "OpenTelemetry Collector Exporters"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_exporter_otlp-1.23.0-py3-none-any.whl", hash = "sha256:92371fdc8d7803465a45801fe30cd8c522ef355a385b0a1d5346d32f77511ea2"},
{file = "opentelemetry_exporter_otlp-1.23.0.tar.gz", hash = "sha256:4af8798f9bc3bddb92fcbb5b4aa9d0e955d962aa1d9bceaab08891c355a9f907"},
]
[package.dependencies]
opentelemetry-exporter-otlp-proto-grpc = "1.23.0"
opentelemetry-exporter-otlp-proto-http = "1.23.0"
importlib-metadata = ">=6.0,<=7.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-common"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Protobuf encoding"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_exporter_otlp_proto_common-1.23.0-py3-none-any.whl", hash = "sha256:2a9e7e9d5a8b026b572684b6b24dcdefcaa58613d5ce3d644130b0c373c056c1"},
{file = "opentelemetry_exporter_otlp_proto_common-1.23.0.tar.gz", hash = "sha256:35e4ea909e7a0b24235bd0aaf17fba49676527feb1823b46565ff246d5a1ab18"},
{file = "opentelemetry_exporter_otlp_proto_common-1.24.0-py3-none-any.whl", hash = "sha256:e51f2c9735054d598ad2df5d3eca830fecfb5b0bda0a2fa742c9c7718e12f641"},
{file = "opentelemetry_exporter_otlp_proto_common-1.24.0.tar.gz", hash = "sha256:5d31fa1ff976cacc38be1ec4e3279a3f88435c75b38b1f7a099a1faffc302461"},
]
[package.dependencies]
opentelemetry-proto = "1.23.0"
opentelemetry-proto = "1.24.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-grpc"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Collector Protobuf over gRPC Exporter"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_exporter_otlp_proto_grpc-1.23.0-py3-none-any.whl", hash = "sha256:40f9e3e7761eb34f2a1001f4543028783ac26e2db27e420d5374f2cca0182dad"},
{file = "opentelemetry_exporter_otlp_proto_grpc-1.23.0.tar.gz", hash = "sha256:aa1a012eea5342bfef51fcf3f7f22601dcb0f0984a07ffe6025b2fbb6d91a2a9"},
{file = "opentelemetry_exporter_otlp_proto_grpc-1.24.0-py3-none-any.whl", hash = "sha256:f40d62aa30a0a43cc1657428e59fcf82ad5f7ea8fff75de0f9d9cb6f739e0a3b"},
{file = "opentelemetry_exporter_otlp_proto_grpc-1.24.0.tar.gz", hash = "sha256:217c6e30634f2c9797999ea9da29f7300479a94a610139b9df17433f915e7baa"},
]
[package.dependencies]
@ -3430,45 +3414,22 @@ deprecated = ">=1.2.6"
googleapis-common-protos = ">=1.52,<2.0"
grpcio = ">=1.0.0,<2.0.0"
opentelemetry-api = ">=1.15,<2.0"
opentelemetry-exporter-otlp-proto-common = "1.23.0"
opentelemetry-proto = "1.23.0"
opentelemetry-sdk = ">=1.23.0,<1.24.0"
opentelemetry-exporter-otlp-proto-common = "1.24.0"
opentelemetry-proto = "1.24.0"
opentelemetry-sdk = ">=1.24.0,<1.25.0"
[package.extras]
test = ["pytest-grpc"]
[[package]]
name = "opentelemetry-exporter-otlp-proto-http"
version = "1.23.0"
description = "OpenTelemetry Collector Protobuf over HTTP Exporter"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_exporter_otlp_proto_http-1.23.0-py3-none-any.whl", hash = "sha256:ad853b58681df8efcb2cfc93be2b5fd86351c99ff4ab47dc917da384b8650d91"},
{file = "opentelemetry_exporter_otlp_proto_http-1.23.0.tar.gz", hash = "sha256:088eac2320f4a604e2d9ff71aced71fdae601ac6457005fb0303d6bbbf44e6ca"},
]
[package.dependencies]
deprecated = ">=1.2.6"
googleapis-common-protos = ">=1.52,<2.0"
opentelemetry-api = ">=1.15,<2.0"
opentelemetry-exporter-otlp-proto-common = "1.23.0"
opentelemetry-proto = "1.23.0"
opentelemetry-sdk = ">=1.23.0,<1.24.0"
requests = ">=2.7,<3.0"
[package.extras]
test = ["responses (>=0.22.0,<0.25)"]
[[package]]
name = "opentelemetry-instrumentation"
version = "0.44b0"
version = "0.45b0"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_instrumentation-0.44b0-py3-none-any.whl", hash = "sha256:79560f386425176bcc60c59190064597096114c4a8e5154f1cb281bb4e47d2fc"},
{file = "opentelemetry_instrumentation-0.44b0.tar.gz", hash = "sha256:8213d02d8c0987b9b26386ae3e091e0477d6331673123df736479322e1a50b48"},
{file = "opentelemetry_instrumentation-0.45b0-py3-none-any.whl", hash = "sha256:06c02e2c952c1b076e8eaedf1b82f715e2937ba7eeacab55913dd434fbcec258"},
{file = "opentelemetry_instrumentation-0.45b0.tar.gz", hash = "sha256:6c47120a7970bbeb458e6a73686ee9ba84b106329a79e4a4a66761f933709c7e"},
]
[package.dependencies]
@ -3478,78 +3439,55 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-asgi"
version = "0.44b0"
version = "0.45b0"
description = "ASGI instrumentation for OpenTelemetry"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_instrumentation_asgi-0.44b0-py3-none-any.whl", hash = "sha256:0d95c84a8991008c8a8ac35e15d43cc7768a5bb46f95f129e802ad2990d7c366"},
{file = "opentelemetry_instrumentation_asgi-0.44b0.tar.gz", hash = "sha256:72d4d28ec7ccd551eac11edc5ae8cac3586c0a228467d6a95fad7b6d4edd597a"},
{file = "opentelemetry_instrumentation_asgi-0.45b0-py3-none-any.whl", hash = "sha256:8be1157ed62f0db24e45fdf7933c530c4338bd025c5d4af7830e903c0756021b"},
{file = "opentelemetry_instrumentation_asgi-0.45b0.tar.gz", hash = "sha256:97f55620f163fd3d20323e9fd8dc3aacc826c03397213ff36b877e0f4b6b08a6"},
]
[package.dependencies]
asgiref = ">=3.0,<4.0"
opentelemetry-api = ">=1.12,<2.0"
opentelemetry-instrumentation = "0.44b0"
opentelemetry-semantic-conventions = "0.44b0"
opentelemetry-util-http = "0.44b0"
opentelemetry-instrumentation = "0.45b0"
opentelemetry-semantic-conventions = "0.45b0"
opentelemetry-util-http = "0.45b0"
[package.extras]
instruments = ["asgiref (>=3.0,<4.0)"]
test = ["opentelemetry-instrumentation-asgi[instruments]", "opentelemetry-test-utils (==0.44b0)"]
[[package]]
name = "opentelemetry-instrumentation-fastapi"
version = "0.44b0"
version = "0.45b0"
description = "OpenTelemetry FastAPI Instrumentation"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_instrumentation_fastapi-0.44b0-py3-none-any.whl", hash = "sha256:4441482944bea6676816668d56deb94af990e8c6e9582c581047e5d84c91d3c9"},
{file = "opentelemetry_instrumentation_fastapi-0.44b0.tar.gz", hash = "sha256:67ed10b93ad9d35238ae0be73cf8acbbb65a4a61fb7444d0aee5b0c492e294db"},
{file = "opentelemetry_instrumentation_fastapi-0.45b0-py3-none-any.whl", hash = "sha256:77d9c123a363129148f5f66d44094f3d67aaaa2b201396d94782b4a7f9ce4314"},
{file = "opentelemetry_instrumentation_fastapi-0.45b0.tar.gz", hash = "sha256:5a6b91e1c08a01601845fcfcfdefd0a2aecdb3c356d4a436a3210cb58c21487e"},
]
[package.dependencies]
opentelemetry-api = ">=1.12,<2.0"
opentelemetry-instrumentation = "0.44b0"
opentelemetry-instrumentation-asgi = "0.44b0"
opentelemetry-semantic-conventions = "0.44b0"
opentelemetry-util-http = "0.44b0"
opentelemetry-instrumentation = "0.45b0"
opentelemetry-instrumentation-asgi = "0.45b0"
opentelemetry-semantic-conventions = "0.45b0"
opentelemetry-util-http = "0.45b0"
[package.extras]
instruments = ["fastapi (>=0.58,<1.0)"]
test = ["httpx (>=0.22,<1.0)", "opentelemetry-instrumentation-fastapi[instruments]", "opentelemetry-test-utils (==0.44b0)", "requests (>=2.23,<3.0)"]
[[package]]
name = "opentelemetry-instrumentation-httpx"
version = "0.44b0"
description = "OpenTelemetry HTTPX Instrumentation"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_instrumentation_httpx-0.44b0-py3-none-any.whl", hash = "sha256:a4f1121b6212b018e719ef6a9a2f8317c329edd01a61452b7250f574f7d95a91"},
{file = "opentelemetry_instrumentation_httpx-0.44b0.tar.gz", hash = "sha256:6cc81c4182f54dfb0d15774e3e48bb90d3ed44e9ad8bf5eef2a64a7197f945d8"},
]
[package.dependencies]
opentelemetry-api = ">=1.12,<2.0"
opentelemetry-instrumentation = "0.44b0"
opentelemetry-semantic-conventions = "0.44b0"
opentelemetry-util-http = "0.44b0"
[package.extras]
instruments = ["httpx (>=0.18.0)"]
test = ["opentelemetry-instrumentation-httpx[instruments]", "opentelemetry-sdk (>=1.12,<2.0)", "opentelemetry-test-utils (==0.44b0)"]
[[package]]
name = "opentelemetry-proto"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_proto-1.23.0-py3-none-any.whl", hash = "sha256:4c017deca052cb287a6003b7c989ed8b47af65baeb5d57ebf93dde0793f78509"},
{file = "opentelemetry_proto-1.23.0.tar.gz", hash = "sha256:e6aaf8b7ace8d021942d546161401b83eed90f9f2cc6f13275008cea730e4651"},
{file = "opentelemetry_proto-1.24.0-py3-none-any.whl", hash = "sha256:bcb80e1e78a003040db71ccf83f2ad2019273d1e0828089d183b18a1476527ce"},
{file = "opentelemetry_proto-1.24.0.tar.gz", hash = "sha256:ff551b8ad63c6cabb1845ce217a6709358dfaba0f75ea1fa21a61ceddc78cab8"},
]
[package.dependencies]
@ -3557,40 +3495,40 @@ protobuf = ">=3.19,<5.0"
[[package]]
name = "opentelemetry-sdk"
version = "1.23.0"
version = "1.24.0"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_sdk-1.23.0-py3-none-any.whl", hash = "sha256:a93c96990ac0f07c6d679e2f1015864ff7a4f5587122dd5af968034436efb1fd"},
{file = "opentelemetry_sdk-1.23.0.tar.gz", hash = "sha256:9ddf60195837b59e72fd2033d6a47e2b59a0f74f0ec37d89387d89e3da8cab7f"},
{file = "opentelemetry_sdk-1.24.0-py3-none-any.whl", hash = "sha256:fa731e24efe832e98bcd90902085b359dcfef7d9c9c00eb5b9a18587dae3eb59"},
{file = "opentelemetry_sdk-1.24.0.tar.gz", hash = "sha256:75bc0563affffa827700e0f4f4a68e1e257db0df13372344aebc6f8a64cde2e5"},
]
[package.dependencies]
opentelemetry-api = "1.23.0"
opentelemetry-semantic-conventions = "0.44b0"
opentelemetry-api = "1.24.0"
opentelemetry-semantic-conventions = "0.45b0"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
version = "0.44b0"
version = "0.45b0"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_semantic_conventions-0.44b0-py3-none-any.whl", hash = "sha256:7c434546c9cbd797ab980cc88bf9ff3f4a5a28f941117cad21694e43d5d92019"},
{file = "opentelemetry_semantic_conventions-0.44b0.tar.gz", hash = "sha256:2e997cb28cd4ca81a25a9a43365f593d0c2b76be0685015349a89abdf1aa4ffa"},
{file = "opentelemetry_semantic_conventions-0.45b0-py3-none-any.whl", hash = "sha256:a4a6fb9a7bacd9167c082aa4681009e9acdbfa28ffb2387af50c2fef3d30c864"},
{file = "opentelemetry_semantic_conventions-0.45b0.tar.gz", hash = "sha256:7c84215a44ac846bc4b8e32d5e78935c5c43482e491812a0bb8aaf87e4d92118"},
]
[[package]]
name = "opentelemetry-util-http"
version = "0.44b0"
version = "0.45b0"
description = "Web util for OpenTelemetry"
optional = false
python-versions = ">=3.8"
files = [
{file = "opentelemetry_util_http-0.44b0-py3-none-any.whl", hash = "sha256:ff018ab6a2fa349537ff21adcef99a294248b599be53843c44f367aef6bccea5"},
{file = "opentelemetry_util_http-0.44b0.tar.gz", hash = "sha256:75896dffcbbeb5df5429ad4526e22307fc041a27114e0c5bfd90bb219381e68f"},
{file = "opentelemetry_util_http-0.45b0-py3-none-any.whl", hash = "sha256:6628868b501b3004e1860f976f410eeb3d3499e009719d818000f24ce17b6e33"},
{file = "opentelemetry_util_http-0.45b0.tar.gz", hash = "sha256:4ce08b6a7d52dd7c96b7705b5b4f06fdb6aa3eac1233b3b0bfef8a0cab9a92cd"},
]
[[package]]
@ -3892,6 +3830,41 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa
typing = ["typing-extensions"]
xmp = ["defusedxml"]
[[package]]
name = "pip"
version = "24.0"
description = "The PyPA recommended tool for installing Python packages."
optional = false
python-versions = ">=3.7"
files = [
{file = "pip-24.0-py3-none-any.whl", hash = "sha256:ba0d021a166865d2265246961bec0152ff124de910c5cc39f1156ce3fa7c69dc"},
{file = "pip-24.0.tar.gz", hash = "sha256:ea9bd1a847e8c5774a5777bb398c19e80bcd4e2aa16a4b301b718fe6f593aba2"},
]
[[package]]
name = "pip-tools"
version = "7.4.1"
description = "pip-tools keeps your pinned dependencies fresh."
optional = false
python-versions = ">=3.8"
files = [
{file = "pip-tools-7.4.1.tar.gz", hash = "sha256:864826f5073864450e24dbeeb85ce3920cdfb09848a3d69ebf537b521f14bcc9"},
{file = "pip_tools-7.4.1-py3-none-any.whl", hash = "sha256:4c690e5fbae2f21e87843e89c26191f0d9454f362d8acdbd695716493ec8b3a9"},
]
[package.dependencies]
build = ">=1.0.0"
click = ">=8"
pip = ">=22.2"
pyproject_hooks = "*"
setuptools = "*"
tomli = {version = "*", markers = "python_version < \"3.11\""}
wheel = "*"
[package.extras]
coverage = ["covdefaults", "pytest-cov"]
testing = ["flit_core (>=2,<4)", "poetry_core (>=1.0.0)", "pytest (>=7.2.0)", "pytest-rerunfailures", "pytest-xdist", "tomli-w"]
[[package]]
name = "platformdirs"
version = "4.2.0"
@ -5344,13 +5317,13 @@ files = [
[[package]]
name = "types-pyasn1"
version = "0.5.0.20240301"
version = "0.6.0.20240402"
description = "Typing stubs for pyasn1"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-pyasn1-0.5.0.20240301.tar.gz", hash = "sha256:da328f5771d54a2016863270b281047f9cc38e39f65a297ba9f987d5de3403f1"},
{file = "types_pyasn1-0.5.0.20240301-py3-none-any.whl", hash = "sha256:d9989899184bbd6e2adf6f812c8f49c48197fceea251a6fb13666dae3203f80d"},
{file = "types-pyasn1-0.6.0.20240402.tar.gz", hash = "sha256:5d54dcb33f69dd269071ca098e923ac20c5f03c814631fa7f3ed9ee035a5da3a"},
{file = "types_pyasn1-0.6.0.20240402-py3-none-any.whl", hash = "sha256:848d01e7313c200acc035a8b3d377fe7b2aecbe77f2be49eb160a7f82835aaaf"},
]
[[package]]
@ -5431,13 +5404,13 @@ types-pyOpenSSL = "*"
[[package]]
name = "types-requests"
version = "2.31.0.20240311"
version = "2.31.0.20240402"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-requests-2.31.0.20240311.tar.gz", hash = "sha256:b1c1b66abfb7fa79aae09097a811c4aa97130eb8831c60e47aee4ca344731ca5"},
{file = "types_requests-2.31.0.20240311-py3-none-any.whl", hash = "sha256:47872893d65a38e282ee9f277a4ee50d1b28bd592040df7d1fdaffdf3779937d"},
{file = "types-requests-2.31.0.20240402.tar.gz", hash = "sha256:e5c09a202f8ae79cd6ffbbba2203b6c3775a83126283bb2a6abbc129abc02a12"},
{file = "types_requests-2.31.0.20240402-py3-none-any.whl", hash = "sha256:bd7eb7102168d4b5b489f15cdd9842b63ab7fe56aa82a0589fa595b94195acf4"},
]
[package.dependencies]
@ -5779,6 +5752,20 @@ MarkupSafe = ">=2.1.1"
[package.extras]
watchdog = ["watchdog (>=2.3)"]
[[package]]
name = "wheel"
version = "0.43.0"
description = "A built-package format for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "wheel-0.43.0-py3-none-any.whl", hash = "sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81"},
{file = "wheel-0.43.0.tar.gz", hash = "sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85"},
]
[package.extras]
test = ["pytest (>=6.0.0)", "setuptools (>=65)"]
[[package]]
name = "win32-setctime"
version = "1.1.0"
@ -6083,4 +6070,4 @@ local = []
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.12"
content-hash = "5a06c86bfbf2cc209bcb3f202bd749af1c8146aa08c10fd69152c170f890866d"
content-hash = "455e5f44f2e5dcbc3e0359658d7c4ef9f93e40c99841c9de99311a0ecad483c2"

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow-base"
version = "0.0.13"
version = "0.0.16"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -52,12 +52,6 @@ docstring-parser = "^0.15"
python-jose = "^3.3.0"
pandas = "2.2.0"
multiprocess = "^0.70.14"
opentelemetry-api = "^1.23.0"
opentelemetry-sdk = "^1.23.0"
opentelemetry-exporter-otlp = "^1.23.0"
opentelemetry-instrumentation-fastapi = "^0.44b0"
opentelemetry-instrumentation-httpx = "^0.44b0"
opentelemetry-instrumentation-asgi = "^0.44b0"
duckdb = "^0.9.2"
python-socketio = "^5.11.0"
python-docx = "^1.1.0"
@ -93,7 +87,7 @@ pytest-sugar = "^0.9.7"
[tool.poetry.extras]
deploy = ["langchain-serve", "celery", "redis", "flower"]
deploy = ["celery", "redis", "flower"]
local = ["llama-cpp-python", "sentence-transformers", "ctransformers"]
all = ["deploy", "local"]

View file

@ -1,70 +0,0 @@
from typing import List, Union
from langchain.agents import AgentExecutor, BaseMultiActionAgent, BaseSingleActionAgent
from langflow import CustomComponent
from langflow.field_typing import BaseMemory, Text, Tool
class LCAgentComponent(CustomComponent):
def build_config(self):
return {
"lc": {
"display_name": "LangChain",
"info": "The LangChain to interact with.",
},
"handle_parsing_errors": {
"display_name": "Handle Parsing Errors",
"info": "If True, the agent will handle parsing errors. If False, the agent will raise an error.",
"advanced": True,
},
"output_key": {
"display_name": "Output Key",
"info": "The key to use to get the output from the agent.",
"advanced": True,
},
"memory": {
"display_name": "Memory",
"info": "Memory to use for the agent.",
},
"tools": {
"display_name": "Tools",
"info": "Tools the agent can use.",
},
"input_value": {
"display_name": "Input",
"info": "Input text to pass to the agent.",
},
}
async def run_agent(
self,
agent: Union[BaseSingleActionAgent, BaseMultiActionAgent, AgentExecutor],
inputs: str,
input_variables: list[str],
tools: List[Tool],
memory: BaseMemory = None,
handle_parsing_errors: bool = True,
output_key: str = "output",
) -> Text:
if isinstance(agent, AgentExecutor):
runnable = agent
else:
runnable = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory, handle_parsing_errors=handle_parsing_errors
)
input_dict = {"input": inputs}
for var in input_variables:
if var not in ["agent_scratchpad", "input"]:
input_dict[var] = ""
result = await runnable.ainvoke(input_dict)
self.status = result
if output_key in result:
return result.get(output_key)
elif "output" not in result:
if output_key != "output":
raise ValueError(f"Output key not found in result. Tried '{output_key}' and 'output'.")
else:
raise ValueError("Output key not found in result. Tried 'output'.")
return result.get("output")

View file

@ -1,3 +0,0 @@
from .model import LCModelComponent
__all__ = ["LCModelComponent"]

View file

@ -1,48 +0,0 @@
from typing import Optional
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.llms import LLM
from langchain_core.messages import HumanMessage, SystemMessage
from langflow import CustomComponent
class LCModelComponent(CustomComponent):
display_name: str = "Model Name"
description: str = "Model Description"
def get_result(self, runnable: LLM, stream: bool, input_value: str):
"""
Retrieves the result from the output of a Runnable object.
Args:
output (Runnable): The output object to retrieve the result from.
stream (bool): Indicates whether to use streaming or invocation mode.
input_value (str): The input value to pass to the output object.
Returns:
The result obtained from the output object.
"""
if stream:
result = runnable.stream(input_value)
else:
message = runnable.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
def get_chat_result(
self, runnable: BaseChatModel, stream: bool, input_value: str, system_message: Optional[str] = None
):
messages = []
if input_value:
messages.append(HumanMessage(input_value))
if system_message:
messages.append(SystemMessage(system_message))
if stream:
result = runnable.stream(messages)
else:
message = runnable.invoke(messages)
result = message.content
self.status = result
return result

View file

@ -1,37 +0,0 @@
from langchain_community.tools.searchapi import SearchAPIRun
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
from langflow import CustomComponent
from langflow.field_typing import Tool
class SearchApiToolComponent(CustomComponent):
display_name: str = "SearchApi Tool"
description: str = "Real-time search engine results API."
documentation: str = "https://www.searchapi.io/docs/google"
field_config = {
"engine": {
"display_name": "Engine",
"field_type": "str",
"info": "The search engine to use.",
},
"api_key": {
"display_name": "API Key",
"field_type": "str",
"required": True,
"password": True,
"info": "The API key to use SearchApi.",
},
}
def build(
self,
engine: str,
api_key: str,
) -> Tool:
search_api_wrapper = SearchApiAPIWrapper(engine=engine, searchapi_api_key=api_key)
tool = SearchAPIRun(api_wrapper=search_api_wrapper)
self.status = tool
return tool

View file

@ -1,103 +0,0 @@
from contextlib import contextmanager
from typing import TYPE_CHECKING, Generator
from langflow.services import ServiceType, service_manager
if TYPE_CHECKING:
from sqlmodel import Session
from langflow.services.cache.service import CacheService
from langflow.services.chat.service import ChatService
from langflow.services.credentials.service import CredentialService
from langflow.services.database.service import DatabaseService
from langflow.services.monitor.service import MonitorService
from langflow.services.plugins.service import PluginService
from langflow.services.session.service import SessionService
from langflow.services.settings.service import SettingsService
from langflow.services.socket.service import SocketIOService
from langflow.services.storage.service import StorageService
from langflow.services.store.service import StoreService
from langflow.services.task.service import TaskService
def get_socket_service() -> "SocketIOService":
return service_manager.get(ServiceType.SOCKETIO_SERVICE) # type: ignore
def get_storage_service() -> "StorageService":
return service_manager.get(ServiceType.STORAGE_SERVICE) # type: ignore
def get_credential_service() -> "CredentialService":
return service_manager.get(ServiceType.CREDENTIAL_SERVICE) # type: ignore
def get_plugins_service() -> "PluginService":
return service_manager.get(ServiceType.PLUGIN_SERVICE) # type: ignore
def get_settings_service() -> "SettingsService":
try:
return service_manager.get(ServiceType.SETTINGS_SERVICE) # type: ignore
except ValueError:
# initialize settings service
from langflow.services.manager import initialize_settings_service
initialize_settings_service()
return service_manager.get(ServiceType.SETTINGS_SERVICE) # type: ignore
def get_db_service() -> "DatabaseService":
return service_manager.get(ServiceType.DATABASE_SERVICE) # type: ignore
def get_session() -> Generator["Session", None, None]:
db_service = get_db_service()
yield from db_service.get_session()
@contextmanager
def session_scope():
"""
Context manager for managing a session scope.
Yields:
session: The session object.
Raises:
Exception: If an error occurs during the session scope.
"""
session = next(get_session())
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def get_cache_service() -> "CacheService":
return service_manager.get(ServiceType.CACHE_SERVICE) # type: ignore
def get_session_service() -> "SessionService":
return service_manager.get(ServiceType.SESSION_SERVICE) # type: ignore
def get_monitor_service() -> "MonitorService":
return service_manager.get(ServiceType.MONITOR_SERVICE) # type: ignore
def get_task_service() -> "TaskService":
return service_manager.get(ServiceType.TASK_SERVICE) # type: ignore
def get_chat_service() -> "ChatService":
return service_manager.get(ServiceType.CHAT_SERVICE) # type: ignore
def get_store_service() -> "StoreService":
return service_manager.get(ServiceType.STORE_SERVICE) # type: ignore

View file

@ -1,83 +0,0 @@
import importlib
import inspect
from typing import TYPE_CHECKING, Type, get_type_hints
from cachetools import LRUCache, cached
from loguru import logger
from langflow.services.schema import ServiceType
if TYPE_CHECKING:
from langflow.services.base import Service
class ServiceFactory:
def __init__(
self,
service_class,
):
self.service_class = service_class
self.dependencies = infer_service_types(self, import_all_services_into_a_dict())
def create(self, *args, **kwargs) -> "Service":
raise self.service_class(*args, **kwargs)
def hash_factory(factory: ServiceFactory) -> str:
return factory.service_class.__name__
def hash_dict(d: dict) -> str:
return str(d)
def hash_infer_service_types_args(factory_class: Type[ServiceFactory], available_services=None) -> str:
factory_hash = hash_factory(factory_class)
services_hash = hash_dict(available_services)
return f"{factory_hash}_{services_hash}"
@cached(cache=LRUCache(maxsize=10), key=hash_infer_service_types_args)
def infer_service_types(factory_class: Type[ServiceFactory], available_services=None) -> "ServiceType":
create_method = factory_class.create
type_hints = get_type_hints(create_method, globalns=available_services)
service_types = []
for param_name, param_type in type_hints.items():
# Skip the return type if it's included in type hints
if param_name == "return":
continue
# Convert the type to the expected enum format directly without appending "_SERVICE"
type_name = param_type.__name__.upper().replace("SERVICE", "_SERVICE")
try:
# Attempt to find a matching enum value
service_type = ServiceType[type_name]
service_types.append(service_type)
except KeyError:
raise ValueError(f"No matching ServiceType for parameter type: {param_type.__name__}")
return service_types
@cached(cache=LRUCache(maxsize=1))
def import_all_services_into_a_dict():
# Services are all in langflow.services.{service_name}.service
# and are subclass of Service
# We want to import all of them and put them in a dict
# to use as globals
from langflow.services.base import Service
services = {}
for service_type in ServiceType:
try:
service_name = ServiceType(service_type).value.replace("_service", "")
module_name = f"langflow.services.{service_name}.service"
module = importlib.import_module(module_name)
for name, obj in inspect.getmembers(module, inspect.isclass):
if issubclass(obj, Service) and obj is not Service:
services[name] = obj
break
except Exception as exc:
logger.exception(exc)
raise RuntimeError("Could not initialize services. Please check your settings.") from exc
return services

View file

@ -1,16 +1,19 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="icon" href="/favicon.ico" />
<script src="/node_modules/ace-builds/src-min-noconflict/ace.js" type="text/javascript"></script>
<script
src="/node_modules/ace-builds/src-min-noconflict/ace.js"
type="module"
></script>
<title>Langflow</title>
</head>
<body id='body' style="width: 100%; height:100%">
</head>
<body id="body" style="width: 100%; height: 100%">
<noscript>You need to enable JavaScript to run this app.</noscript>
<div style="width: 100vw; height:100vh" id='root'></div>
<div style="width: 100vw; height: 100vh" id="root"></div>
<script type="module" src="/src/index.tsx"></script>
</body>
</body>
</html>

View file

@ -969,9 +969,9 @@
}
},
"node_modules/@humanwhocodes/object-schema": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz",
"integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==",
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz",
"integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==",
"dev": true
},
"node_modules/@isaacs/cliui": {
@ -4888,9 +4888,9 @@
}
},
"node_modules/caniuse-lite": {
"version": "1.0.30001600",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001600.tgz",
"integrity": "sha512-+2S9/2JFhYmYaDpZvo0lKkfvuKIglrx68MwOBqMGHhQsNkLjB5xtc/TGoEPs+MxjSyN/72qer2g97nzR641mOQ==",
"version": "1.0.30001603",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001603.tgz",
"integrity": "sha512-iL2iSS0eDILMb9n5yKQoTBim9jMZ0Yrk8g0N9K7UzYyWnfIKzXBZD5ngpM37ZcL/cv0Mli8XtVMRYMQAfFpi5Q==",
"funding": [
{
"type": "opencollective",
@ -5677,9 +5677,9 @@
"integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
},
"node_modules/electron-to-chromium": {
"version": "1.4.722",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.722.tgz",
"integrity": "sha512-5nLE0TWFFpZ80Crhtp4pIp8LXCztjYX41yUcV6b+bKR2PqzjskTMOOlBi1VjBHlvHwS+4gar7kNKOrsbsewEZQ=="
"version": "1.4.723",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.723.tgz",
"integrity": "sha512-rxFVtrMGMFROr4qqU6n95rUi9IlfIm+lIAt+hOToy/9r6CDv0XiEcQdC3VP71y1pE5CFTzKV0RvxOGYCPWWHPw=="
},
"node_modules/emoji-regex": {
"version": "9.2.2",

View file

@ -205,7 +205,7 @@ export default function CodeTabsComponent({
<div className="api-modal-according-display">
<div
className={classNames(
"h-[70vh] w-full rounded-lg bg-muted overflow-y-auto overflow-x-hidden custom-scroll"
"h-[70vh] w-full overflow-y-auto overflow-x-hidden rounded-lg bg-muted custom-scroll"
)}
>
{data?.map((node: any, i) => (

View file

@ -1,10 +1,28 @@
const SvgGoogleGenerativeAI = (props) => (<svg fill="none" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" {...props}>
<path d="M16 8.016A8.522 8.522 0 008.016 16h-.032A8.521 8.521 0 000 8.016v-.032A8.521 8.521 0 007.984 0h.032A8.522 8.522 0 0016 7.984v.032z" fill="url(#prefix__paint0_radial_980_20147)"/>
<defs>
<radialGradient id="prefix__paint0_radial_980_20147" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="matrix(16.1326 5.4553 -43.70045 129.2322 1.588 6.503)">
<stop offset=".067" stop-color="#9168C0"/><stop offset=".343" stop-color="#5684D1"/><stop offset=".672" stop-color="#1BA1E3"/>
const SvgGoogleGenerativeAI = (props) => (
<svg
fill="none"
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
{...props}
>
<path
d="M16 8.016A8.522 8.522 0 008.016 16h-.032A8.521 8.521 0 000 8.016v-.032A8.521 8.521 0 007.984 0h.032A8.522 8.522 0 0016 7.984v.032z"
fill="url(#prefix__paint0_radial_980_20147)"
/>
<defs>
<radialGradient
id="prefix__paint0_radial_980_20147"
cx="0"
cy="0"
r="1"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(16.1326 5.4553 -43.70045 129.2322 1.588 6.503)"
>
<stop offset=".067" stop-color="#9168C0" />
<stop offset=".343" stop-color="#5684D1" />
<stop offset=".672" stop-color="#1BA1E3" />
</radialGradient>
</defs>
</defs>
</svg>
);
export default SvgGoogleGenerativeAI;

View file

@ -28,9 +28,7 @@ import {
LANGFLOW_SUPPORTED_TYPES,
limitScrollFieldsModal,
} from "../../constants/constants";
import useAlertStore from "../../stores/alertStore";
import useFlowStore from "../../stores/flowStore";
import { useGlobalVariablesStore } from "../../stores/globalVariables";
import { NodeDataType } from "../../types/flow";
import {
convertObjToArray,

View file

@ -23,7 +23,9 @@ export default function IOFieldView({
case "TextInput":
return (
<Textarea
className={`w-full custom-scroll ${left ? " min-h-32" : " h-full"}`}
className={`w-full custom-scroll ${
left ? " min-h-32" : " h-full"
}`}
placeholder={"Enter text..."}
value={node.data.node!.template["input_value"].value}
onChange={(e) => {
@ -54,7 +56,9 @@ export default function IOFieldView({
default:
return (
<Textarea
className={`w-full custom-scroll ${left ? " min-h-32" : " h-full"}`}
className={`w-full custom-scroll ${
left ? " min-h-32" : " h-full"
}`}
placeholder={"Enter text..."}
value={node.data.node!.template["input_value"]}
onChange={(e) => {
@ -74,7 +78,9 @@ export default function IOFieldView({
case "TextOutput":
return (
<Textarea
className={`w-full custom-scroll ${left ? " min-h-32" : " h-full"}`}
className={`w-full custom-scroll ${
left ? " min-h-32" : " h-full"
}`}
placeholder={"Empty"}
// update to real value on flowPool
value={
@ -89,7 +95,9 @@ export default function IOFieldView({
default:
return (
<Textarea
className={`w-full custom-scroll ${left ? " min-h-32" : " h-full"}`}
className={`w-full custom-scroll ${
left ? " min-h-32" : " h-full"
}`}
placeholder={"Empty"}
// update to real value on flowPool
value={

View file

@ -9,6 +9,7 @@ import Robot from "../../../../../assets/robot.png";
import SanitizedHTMLWrapper from "../../../../../components/SanitizedHTMLWrapper";
import CodeTabsComponent from "../../../../../components/codeTabsComponent";
import IconComponent from "../../../../../components/genericIconComponent";
import useAlertStore from "../../../../../stores/alertStore";
import useFlowStore from "../../../../../stores/flowStore";
import { chatMessagePropsType } from "../../../../../types/components";
import { classNames, cn } from "../../../../../utils/utils";
@ -33,6 +34,7 @@ export default function ChatMessage({
const [isStreaming, setIsStreaming] = useState(false);
const eventSource = useRef<EventSource | undefined>(undefined);
const updateFlowPool = useFlowStore((state) => state.updateFlowPool);
const setErrorData = useAlertStore((state) => state.setErrorData);
const chatMessageRef = useRef(chatMessage);
// Sync ref with state
@ -53,10 +55,17 @@ export default function ChatMessage({
setChatMessage((prev) => prev + parsedData.chunk);
}
};
eventSource.current.onerror = (event) => {
eventSource.current.onerror = (event: any) => {
setIsStreaming(false);
eventSource.current?.close();
setStreamUrl(undefined);
if (JSON.parse(event.data)?.error) {
setErrorData({
title: "Error on Streaming",
list: [JSON.parse(event.data)?.error],
});
}
updateChat(chat, chatMessageRef.current);
reject(new Error("Streaming failed"));
};
eventSource.current.addEventListener("close", (event) => {

View file

@ -1,6 +1,5 @@
import { useEffect, useRef, useState } from "react";
import IconComponent from "../../../../components/genericIconComponent";
import { NOCHATOUTPUT_NOTICE_ALERT } from "../../../../constants/alerts_constants";
import {
CHAT_FIRST_INITIAL_TEXT,
CHAT_SECOND_INITIAL_TEXT,
@ -124,7 +123,7 @@ export default function ChatView({
message: string,
stream_url?: string
) {
if (message === "") return;
// if (message === "") return;
chat.message = message;
// chat is one of the chatHistory
updateFlowPool(chat.componentId, {

View file

@ -10,6 +10,12 @@ export default function NewFlowModal({
}: newFlowModalPropsType): JSX.Element {
const examples = useFlowsManagerStore((state) => state.examples);
examples.forEach((example) => {
if (example.name === "Blog Writter") {
example.name = "Blog Writer";
}
});
return (
<BaseModal size="three-cards" open={open} setOpen={setOpen}>
<BaseModal.Header description={"Select a template below"}>

View file

@ -143,7 +143,6 @@ export default function ShareModal({
});
});
};
console.log("ShareModal");
const handleUpdateComponent = () => {
handleShareComponent(true);

View file

@ -477,8 +477,8 @@ export default function Page({
<Background className="" />
{!view && (
<Controls
className="[&>button]:bg-muted fill-foreground stroke-foreground text-primary
[&>button]:border-b-border hover:[&>button]:bg-border"
className="fill-foreground stroke-foreground text-primary [&>button]:border-b-border
[&>button]:bg-muted hover:[&>button]:bg-border"
></Controls>
)}
<SelectionMenu

View file

@ -378,7 +378,7 @@ export default function ExtraSidebar(): JSX.Element {
Object.keys(dataFilter[SBSectionName]).length > 0 ? (
<>
<DisclosureComponent
isChild={false}
isChild={false}
openDisc={
getFilterEdge.length !== 0 || search.length !== 0
? true

View file

@ -664,6 +664,9 @@ export function reconnectEdges(groupNode: NodeType, excludedEdges: Edge[]) {
let newEdges = cloneDeep(excludedEdges);
const { nodes, edges } = groupNode.data.node!.flow!.data!;
const lastNode = findLastNode(groupNode.data.node!.flow!.data!);
newEdges = newEdges.filter(
(e) => !(nodes.some((n) => n.id === e.source) && e.source !== lastNode?.id)
);
newEdges.forEach((edge) => {
if (lastNode && edge.source === lastNode.id) {
edge.source = groupNode.id;

Some files were not shown because too many files have changed in this diff Show more