merge fix zustand

This commit is contained in:
cristhianzl 2024-04-01 16:38:01 -03:00
commit 6a8ca8db47
160 changed files with 9418 additions and 6208 deletions

4
.vscode/launch.json vendored
View file

@ -13,7 +13,9 @@
"7860",
"--reload",
"--log-level",
"debug"
"debug",
"--loop",
"asyncio"
],
"jinja": true,
"justMyCode": false,

View file

@ -1,6 +1,11 @@
.PHONY: all init format lint build build_frontend install_frontend run_frontend run_backend dev help tests coverage
all: help
log_level ?= debug
host ?= 0.0.0.0
port ?= 7860
env ?= .env
open_browser ?= true
setup_poetry:
pipx install poetry
@ -69,17 +74,16 @@ endif
run_cli:
@echo 'Running the CLI'
@make install_frontend > /dev/null
@echo 'Building the frontend'
@make build_frontend > /dev/null
@echo 'Install backend dependencies'
@make install_backend > /dev/null
@echo 'Building the frontend'
@make build_frontend > /dev/null
ifdef env
poetry run langflow run --path src/frontend/build --host $(host) --port $(port) --env-file $(env)
@make start env=$(env) host=$(host) port=$(port) log_level=$(log_level)
else
poetry run langflow run --path src/frontend/build --host $(host) --port $(port) --env-file .env
@make start host=$(host) port=$(port) log_level=$(log_level)
endif
run_cli_debug:
@echo 'Running the CLI in debug mode'
@make install_frontend > /dev/null
@ -88,11 +92,21 @@ run_cli_debug:
@echo 'Install backend dependencies'
@make install_backend > /dev/null
ifdef env
poetry run langflow run --path src/frontend/build --log-level debug --host $(host) --port $(port) --env-file $(env)
@make start env=$(env) host=$(host) port=$(port) log_level=debug
else
poetry run langflow run --path src/frontend/build --log-level debug --host $(host) --port $(port) --env-file .env
@make start host=$(host) port=$(port) log_level=debug
endif
start:
@echo 'Running the CLI'
ifeq ($(open_browser),false)
poetry run langflow run --path src/frontend/build --log-level $(log_level) --host $(host) --port $(port) --env-file $(env) --no-open-browser
else
poetry run langflow run --path src/frontend/build --log-level $(log_level) --host $(host) --port $(port) --env-file $(env)
endif
setup_devcontainer:
make init
make build_frontend
@ -120,7 +134,7 @@ backend:
@-kill -9 `lsof -t -i:7860`
ifeq ($(login),1)
@echo "Running backend without autologin";
poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env
poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env --loop asyncio
else
@echo "Running backend with autologin";
LANGFLOW_AUTO_LOGIN=True poetry run uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --env-file .env

View file

@ -15,7 +15,7 @@
[![GitHub fork](https://img.shields.io/github/forks/logspace-ai/langflow?style=social)](https://github.com/logspace-ai/langflow/fork)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langflow_ai.svg?style=social&label=Follow%20%40langflow_ai)](https://twitter.com/langflow_ai)
[![](https://dcbadge.vercel.app/api/server/EqksyE2EX9?compact=true&style=flat)](https://discord.com/invite/EqksyE2EX9)
[![HuggingFace Spaces](https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg)](https://huggingface.co/spaces/Logspace/Langflow)
[![HuggingFace Spaces](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-md.svg)](https://huggingface.co/spaces/Logspace/Langflow?duplicate=true)
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/logspace-ai/langflow)
The easiest way to create and customize your flow
@ -27,6 +27,8 @@ The easiest way to create and customize your flow
### <b>Locally</b>
Make sure you have Python 3.10 installed on your system.
You can install Langflow from pip:
```shell
@ -62,7 +64,7 @@ langflow run # or langflow --help
### HuggingFace Spaces
You can also check it out on [HuggingFace Spaces](https://huggingface.co/spaces/Logspace/Langflow) and run it in your browser! You can even clone it and have your own copy of Langflow to play with.
You can also check it out on HuggingFace Spaces and run it in your browser for free! [Click here to duplicate the Space](https://huggingface.co/spaces/Logspace/Langflow?duplicate=true)
# 🖥️ Command Line Interface (CLI)

View file

@ -70,7 +70,6 @@ The CustomComponent class serves as the foundation for creating custom component
| Key | Description |
| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _`field_type: str`_ | The type of the field (can be any of the types supported by the _`build`_ method). |
| _`is_list: bool`_ | If the field can be a list of values, meaning that the user can manually add more inputs to the same field. |
| _`options: List[str]`_ | When defined, the field becomes a dropdown menu where a list of strings defines the options to be displayed. If the _`value`_ attribute is set to one of the options, that option becomes default. For this parameter to work, _`field_type`_ should invariably be _`str`_. |
| _`multiline: bool`_ | Defines if a string field opens a text editor. Useful for longer texts. |
@ -78,13 +77,14 @@ The CustomComponent class serves as the foundation for creating custom component
| _`display_name: str`_ | Defines the name of the field. |
| _`advanced: bool`_ | Hide the field in the canvas view (displayed component settings only). Useful when a field is for advanced users. |
| _`password: bool`_ | To mask the input text. Useful to hide sensitive text (e.g. API keys). |
| _`required: bool`_ | Makes the field required. |
| _`required: bool`_ | This is determined automatically but can be used to override the default behavior. |
| _`info: str`_ | Adds a tooltip to the field. |
| _`file_types: List[str]`_ | This is a requirement if the _`field_type`_ is _file_. Defines which file types will be accepted. For example, _json_, _yaml_ or _yml_. |
| _`range_spec: langflow.field_typing.RangeSpec`_ | This is a requirement if the _`field_type`_ is _`float`_. Defines the range of values accepted and the step size. If none is defined, the default is _`[-1, 1, 0.1]`_. |
| _`title_case: bool`_ | Formats the name of the field when _`display_name`_ is not defined. Set it to False to keep the name as you set it in the _`build`_ method. |
| _`refresh_button: bool`_ | If set to True a button will appear to the right of the field, and when clicked, it will call the _`update_build_config`_ method which takes in the _`build_config`_, the name of the field (_`field_name`_) and the latest value of the field (_`field_value`_). This is useful when you want to update the _`build_config`_ based on the value of the field. |
| _`real_time_refresh: bool`_ | If set to True, the _`update_build_config`_ method will be called every time the field value changes. |
| _`field_type: str`_ | You should never define this key. It is automatically set based on the type hint of the _`build`_ method. |
<Admonition type="info" label="Tip">

View file

@ -283,7 +283,7 @@ The return type is _`Document`_.
The _`build_config`_ method is here defined to customize the component fields.
- _`options`_ determines that the field will be a dropdown menu. The list values and field type must be _`str`_.
- _`value`_ is the default option of the dropdown menu.
- _`value`_ is the default value of the field.
- _`display_name`_ is the name of the field to be displayed.
```python
@ -408,4 +408,6 @@ Once your custom components have been loaded successfully, they will appear in L
Remember, creating custom components allows you to extend the functionality of Langflow to better suit your unique needs. Happy coding!import ZoomableImage from "/src/theme/ZoomableImage.js";
import Admonition from "@theme/Admonition";
import ZoomableImage from "/src/theme/ZoomableImage.js";
import Admonition from "@theme/Admonition";

4
poetry.lock generated
View file

@ -4846,7 +4846,6 @@ files = [
{file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"},
{file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"},
{file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"},
{file = "msgpack-1.0.8-py3-none-any.whl", hash = "sha256:24f727df1e20b9876fa6e95f840a2a2651e34c0ad147676356f4bf5fbb0206ca"},
{file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"},
]
@ -5316,6 +5315,7 @@ description = "Nvidia JIT LTO Library"
optional = true
python-versions = ">=3"
files = [
{file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_aarch64.whl", hash = "sha256:75d6498c96d9adb9435f2bbdbddb479805ddfb97b5c1b32395c694185c20ca57"},
{file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c6428836d20fe7e327191c175791d38570e10762edc588fb46749217cd444c74"},
{file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-win_amd64.whl", hash = "sha256:991905ffa2144cb603d8ca7962d75c35334ae82bf92820b6ba78157277da1ad2"},
]
@ -10240,4 +10240,4 @@ local = ["ctransformers", "llama-cpp-python", "sentence-transformers"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.12"
content-hash = "b66acb0ed04e62c9f311828307ac1503bc7a19912753c217d4ea6237f474543a"
content-hash = "014454e08274cc189a4b8b74c6577a8731bb0d9693f40f6f98b4c20bdf70b58d"

View file

@ -78,6 +78,12 @@ pytube = "^15.0.0"
llama-index = "^0.10.13"
langchain-openai = "^0.0.5"
unstructured = { extras = ["md"], version = "^0.12.4" }
opentelemetry-api = "^1.23.0"
opentelemetry-sdk = "^1.23.0"
opentelemetry-exporter-otlp = "^1.23.0"
opentelemetry-instrumentation-fastapi = "^0.44b0"
opentelemetry-instrumentation-httpx = "^0.44b0"
opentelemetry-instrumentation-asgi = "^0.44b0"
dspy-ai = "^2.4.0"
crewai = "^0.22.5"
html2text = "^2024.2.26"

View file

@ -290,6 +290,7 @@ def run_langflow(host, port, log_level, options, app):
host=host,
port=port,
log_level=log_level.lower(),
loop="asyncio",
)
else:
from langflow.server import LangflowApplication

View file

@ -223,7 +223,8 @@ async def build_vertex(
except Exception as exc:
logger.error(f"Error building vertex: {exc}")
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
message = parse_exception(exc)
raise HTTPException(status_code=500, detail=message) from exc
@router.get("/build/{flow_id}/{vertex_id}/stream", response_class=StreamingResponse)

View file

@ -12,6 +12,7 @@ from langflow.api.v1.schemas import (
InputValueRequest,
ProcessResponse,
RunResponse,
SimplifiedAPIRequest,
TaskStatusResponse,
Tweaks,
UpdateCustomComponentRequest,
@ -22,7 +23,7 @@ from langflow.graph.schema import RunOutputs
from langflow.interface.custom.custom_component import CustomComponent
from langflow.interface.custom.directory_reader import DirectoryReader
from langflow.interface.custom.utils import build_custom_component_template
from langflow.processing.process import process_tweaks, run_graph
from langflow.processing.process import process_tweaks, run_graph_internal
from langflow.services.auth.utils import api_key_security, get_current_active_user
from langflow.services.cache.utils import save_uploaded_file
from langflow.services.database.models.flow import Flow
@ -51,7 +52,138 @@ def get_all(
@router.post("/run/{flow_id}", response_model=RunResponse, response_model_exclude_none=True)
async def run_flow_with_caching(
async def simplified_run_flow(
db: Annotated[Session, Depends(get_session)],
flow_id: str,
input_request: SimplifiedAPIRequest = SimplifiedAPIRequest(),
stream: bool = False,
api_key_user: User = Depends(api_key_security),
session_service: SessionService = Depends(get_session_service),
):
"""
Executes a specified flow by ID with input customization, performance enhancements through caching, and optional data streaming.
### Parameters:
- `db` (Session): Database session for executing queries.
- `flow_id` (str): Unique identifier of the flow to be executed.
- `input_request` (SimplifiedAPIRequest): Request object containing input values, types, output selection, tweaks, and session ID.
- `api_key_user` (User): User object derived from the provided API key, used for authentication.
- `session_service` (SessionService): Service for managing flow sessions, essential for session reuse and caching.
### SimplifiedAPIRequest:
- `input_value` (Optional[str], default=""): Input value to pass to the flow.
- `input_type` (Optional[Literal["chat", "text", "any"]], default="chat"): Type of the input value, determining how the input is interpreted.
- `output_type` (Optional[Literal["chat", "text", "any", "debug"]], default="chat"): Desired type of output, affecting which components' outputs are included in the response. If set to "debug", all outputs are returned.
- `output_component` (Optional[str], default=None): Specific component output to retrieve. If provided, only the output of the specified component is returned. This overrides the `output_type` parameter.
- `tweaks` (Optional[Tweaks], default=None): Adjustments to the flow's behavior, allowing for custom execution parameters.
- `session_id` (Optional[str], default=None): An identifier for reusing session data, aiding in performance for subsequent requests.
### Tweaks
A dictionary of tweaks to customize the flow execution. The tweaks can be used to modify the flow's parameters and components. Tweaks can be overridden by the input values.
You can use Component's `id` or Display Name as key to tweak a specific component (e.g., `{"Component Name": {"parameter_name": "value"}}`).
You can also use the parameter name as key to tweak all components with that parameter (e.g., `{"parameter_name": "value"}`).
### Returns:
- A `RunResponse` object containing the execution results, including selected (or all, based on `output_type`) outputs of the flow and the session ID, facilitating result retrieval and further interactions in a session context.
### Raises:
- HTTPException: 404 if the specified flow ID curl -X 'POST' \
### Example:
```bash
curl -X 'POST' \
'http://<your_server>/run/{flow_id}' \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-H 'x-api-key: YOU_API_KEY' \
-H '
-d '{
"input_value": "Sample input",
"input_type": "chat",
"output_type": "chat",
"tweaks": {},
}'
```
This endpoint provides a powerful interface for executing flows with enhanced flexibility and efficiency, supporting a wide range of applications by allowing for dynamic input and output configuration along with performance optimizations through session management and caching.
"""
session_id = input_request.session_id
try:
task_result: List[RunOutputs] = []
artifacts = {}
if input_request.session_id:
session_data = await session_service.load_session(input_request.session_id, flow_id=flow_id)
graph, artifacts = session_data if session_data else (None, None)
if graph is None:
raise ValueError(f"Session {input_request.session_id} not found")
else:
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = db.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
graph_data = process_tweaks(graph_data, input_request.tweaks or {})
graph = Graph.from_payload(graph_data, flow_id=flow_id)
inputs = [
InputValueRequest(components=[], input_value=input_request.input_value, type=input_request.input_type)
]
# outputs is a list of all components that should return output
# we need to get them by checking their type
# if the output type is debug, we return all outputs
# if the output type is any, we return all outputs that are either chat or text
# if the output type is chat or text, we return only the outputs that match the type
if input_request.output_component:
outputs = [input_request.output_component]
else:
outputs = [
vertex.id
for vertex in graph.vertices
if input_request.output_type == "debug"
or (
vertex.is_output
and (input_request.output_type == "any" or input_request.output_type in vertex.id.lower())
)
]
task_result, session_id = await run_graph_internal(
graph=graph,
flow_id=flow_id,
session_id=input_request.session_id,
inputs=inputs,
outputs=outputs,
artifacts=artifacts,
session_service=session_service,
stream=stream,
)
return RunResponse(outputs=task_result, session_id=session_id)
except sa.exc.StatementError as exc:
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
if "badly formed hexadecimal UUID string" in str(exc):
logger.error(f"Flow ID {flow_id} is not a valid UUID")
# This means the Flow ID is not a valid UUID which means it can't find the flow
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
except ValueError as exc:
if f"Flow {flow_id} not found" in str(exc):
logger.error(f"Flow {flow_id} not found")
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
elif f"Session {session_id} not found" in str(exc):
logger.error(f"Session {session_id} not found")
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
else:
logger.exception(exc)
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)) from exc
except Exception as exc:
logger.exception(exc)
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)) from exc
@router.post("/run/advanced/{flow_id}", response_model=RunResponse, response_model_exclude_none=True)
async def experimental_run_flow(
session: Annotated[Session, Depends(get_session)],
flow_id: str,
inputs: Optional[List[InputValueRequest]] = [InputValueRequest(components=[], input_value="")],
@ -85,6 +217,7 @@ async def run_flow_with_caching(
### Example usage:
```json
POST /run/{flow_id}
x-api-key: YOUR_API_KEY
Payload:
{
"inputs": [
@ -122,7 +255,7 @@ async def run_flow_with_caching(
graph_data = flow.data
graph_data = process_tweaks(graph_data, tweaks or {})
graph = Graph.from_payload(graph_data, flow_id=flow_id)
task_result, session_id = await run_graph(
task_result, session_id = await run_graph_internal(
graph=graph,
flow_id=flow_id,
session_id=session_id,

View file

@ -1,13 +1,14 @@
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union
from typing import Any, Dict, List, Optional, Union
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, RootModel, field_validator, model_serializer
from langflow.graph.schema import RunOutputs
from langflow.schema import dotdict
from langflow.schema.schema import InputType, OutputType
from langflow.services.database.models.api_key.model import ApiKeyRead
from langflow.services.database.models.base import orjson_dumps
from langflow.services.database.models.flow import FlowCreate, FlowRead
@ -259,7 +260,7 @@ class VerticesBuiltResponse(BaseModel):
class InputValueRequest(BaseModel):
components: Optional[List[str]] = []
input_value: Optional[str] = None
type: Optional[Literal["chat", "text", "json", "any"]] = Field(
type: Optional[InputType] = Field(
"any",
description="Defines on which components the input value should be applied. 'any' applies to all input components.",
)
@ -310,3 +311,15 @@ class Tweaks(RootModel):
def items(self):
return self.root.items()
class SimplifiedAPIRequest(BaseModel):
input_value: Optional[str] = Field(default="", description="The input value")
input_type: Optional[InputType] = Field(default="chat", description="The input type")
output_type: Optional[OutputType] = Field(default="chat", description="The output type")
output_component: Optional[str] = Field(
default="",
description="If there are multiple output components, you can specify the component to get the output from.",
)
tweaks: Optional[Tweaks] = Field(default=None, description="The tweaks")
session_id: Optional[str] = Field(default=None, description="The session id")

View file

@ -0,0 +1,27 @@
"""
This module contains constants used in the Langflow base module.
Constants:
- STREAM_INFO_TEXT: A string representing the information about streaming the response from the model.
- NODE_FORMAT_ATTRIBUTES: A list of attributes used for formatting nodes.
- FIELD_FORMAT_ATTRIBUTES: A list of attributes used for formatting fields.
"""
STREAM_INFO_TEXT = "Stream the response from the model. Streaming works only in Chat."
NODE_FORMAT_ATTRIBUTES = ["beta", "icon", "display_name", "description"]
FIELD_FORMAT_ATTRIBUTES = [
"info",
"display_name",
"required",
"list",
"multiline",
"fileTypes",
"password",
"input_types",
"title_case",
"real_time_refresh",
"refresh_button",
"refresh_button_text",
]

View file

@ -113,6 +113,6 @@ class ChatComponent(CustomComponent):
else:
result = input_value
self.status = result
if session_id:
if session_id and isinstance(result, (Record, str)):
self.store_message(result, session_id, sender, sender_name)
return result

View file

@ -12,16 +12,29 @@ class TextComponent(CustomComponent):
def build_config(self):
return {
"input_value": {"display_name": "Value", "input_types": ["Record"], "info": "Text or Record to be passed."},
"record_template": {"display_name": "Record Template", "multiline": True},
"input_value": {
"display_name": "Value",
"input_types": ["Text", "Record"],
"info": "Text or Record to be passed.",
},
"record_template": {
"display_name": "Record Template",
"multiline": True,
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"advanced": True,
},
}
def build(
self,
input_value: Optional[Union[Text, Record]] = "",
record_template: Optional[str] = "Text: {text}\nData: {data}",
record_template: Optional[str] = "{text}",
) -> Text:
if isinstance(input_value, Record):
if record_template == "":
# it should be dynamically set to the Record's .text_key value
# meaning, if text_key = "bacon", then record_template = "{bacon}"
record_template = "{" + input_value.text_key + "}"
input_value = records_to_text(template=record_template, records=input_value)
self.status = input_value
if not input_value:

View file

@ -35,6 +35,8 @@ class LCModelComponent(CustomComponent):
self, runnable: BaseChatModel, stream: bool, input_value: str, system_message: Optional[str] = None
):
messages: list[Union[HumanMessage, SystemMessage]] = []
if not input_value and not system_message:
raise ValueError("The message you want to send to the model is empty.")
if system_message:
messages.append(SystemMessage(content=system_message))
if input_value:

View file

@ -42,7 +42,7 @@ class ConversationalAgent(CustomComponent):
"temperature": {
"display_name": "Temperature",
"value": 0.2,
"range_spec": RangeSpec(min=0, max=2, step=0.1),
"rangeSpec": RangeSpec(min=0, max=2, step=0.1),
},
}

View file

@ -1,11 +1,11 @@
from typing import Optional
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.retrieval_qa.base import RetrievalQA
from langchain_core.documents import Document
from langflow.field_typing import BaseMemory, BaseRetriever, Text
from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever, Text
from langflow.interface.custom.custom_component import CustomComponent
from langflow.schema.schema import Record
class RetrievalQAComponent(CustomComponent):
@ -14,7 +14,8 @@ class RetrievalQAComponent(CustomComponent):
def build_config(self):
return {
"combine_documents_chain": {"display_name": "Combine Documents Chain"},
"llm": {"display_name": "LLM"},
"chain_type": {"display_name": "Chain Type", "options": ["Stuff", "Map Reduce", "Refine", "Map Rerank"]},
"retriever": {"display_name": "Retriever"},
"memory": {"display_name": "Memory", "required": False},
"input_key": {"display_name": "Input Key", "advanced": True},
@ -22,13 +23,14 @@ class RetrievalQAComponent(CustomComponent):
"return_source_documents": {"display_name": "Return Source Documents"},
"input_value": {
"display_name": "Input",
"input_types": ["Text", "Document"],
"input_types": ["Record", "Document"],
},
}
def build(
self,
combine_documents_chain: BaseCombineDocumentsChain,
llm: BaseLanguageModel,
chain_type: str,
retriever: BaseRetriever,
input_value: str = "",
memory: Optional[BaseMemory] = None,
@ -36,8 +38,10 @@ class RetrievalQAComponent(CustomComponent):
output_key: str = "result",
return_source_documents: bool = True,
) -> Text:
runnable = RetrievalQA(
combine_documents_chain=combine_documents_chain,
chain_type = chain_type.lower().replace(" ", "_")
runnable = RetrievalQA.from_chain_type(
llm=llm,
chain_type=chain_type,
retriever=retriever,
memory=memory,
input_key=input_key,
@ -46,6 +50,8 @@ class RetrievalQAComponent(CustomComponent):
)
if isinstance(input_value, Document):
input_value = input_value.page_content
if isinstance(input_value, Record):
input_value = input_value.get_text()
self.status = runnable
result = runnable.invoke({input_key: input_value})
result = result.content if hasattr(result, "content") else result

View file

@ -16,7 +16,7 @@ class RetrievalQAWithSourcesChainComponent(CustomComponent):
"llm": {"display_name": "LLM"},
"chain_type": {
"display_name": "Chain Type",
"options": ["stuff", "map_reduce", "map_rerank", "refine"],
"options": ["Stuff", "Map Reduce", "Refine", "Map Rerank"],
"info": "The type of chain to use to combined Documents.",
},
"memory": {"display_name": "Memory"},
@ -37,6 +37,7 @@ class RetrievalQAWithSourcesChainComponent(CustomComponent):
memory: Optional[BaseMemory] = None,
return_source_documents: Optional[bool] = True,
) -> Text:
chain_type = chain_type.lower().replace(" ", "_")
runnable = RetrievalQAWithSourcesChain.from_chain_type(
llm=llm,
chain_type=chain_type,

View file

@ -13,6 +13,8 @@ class APIRequest(CustomComponent):
description: str = "Make HTTP requests given one or more URLs."
output_types: list[str] = ["Record"]
documentation: str = "https://docs.langflow.org/components/utilities#api-request"
icon = "Globe"
field_config = {
"urls": {"display_name": "URLs", "info": "URLs to make requests to."},
"method": {
@ -25,12 +27,12 @@ class APIRequest(CustomComponent):
"headers": {
"display_name": "Headers",
"info": "The headers to send with the request.",
"input_types": ["dict"],
"input_types": ["Record"],
},
"body": {
"display_name": "Body",
"info": "The body to send with the request (for POST, PATCH, PUT).",
"input_types": ["dict"],
"input_types": ["Record"],
},
"timeout": {
"display_name": "Timeout",
@ -45,8 +47,8 @@ class APIRequest(CustomComponent):
client: httpx.AsyncClient,
method: str,
url: str,
headers: Optional[dict] = None,
body: Optional[dict] = None,
headers: Optional[Record] = None,
body: Optional[Record] = None,
timeout: int = 5,
) -> Record:
method = method.upper()
@ -56,7 +58,9 @@ class APIRequest(CustomComponent):
data = body if body else None
payload = json.dumps(data)
try:
response = await client.request(method, url, headers=headers, content=payload, timeout=timeout)
response = await client.request(
method, url, headers=headers, content=payload, timeout=timeout
)
try:
result = response.json()
except Exception:
@ -92,24 +96,31 @@ class APIRequest(CustomComponent):
self,
method: str,
urls: List[str],
headers: Optional[dict] = None,
body: Optional[List[Record]] = None,
headers: Optional[Record] = None,
body: Optional[Record] = None,
timeout: int = 5,
) -> List[Record]:
if headers is None:
headers = {}
else:
headers = headers.data
bodies = []
if body:
if isinstance(body, list):
bodies = [b.data for b in body]
else:
bodies = [body.data]
if len(urls) != len(bodies):
# add bodies with None
bodies += [None] * (len(urls) - len(bodies)) # type: ignore
async with httpx.AsyncClient() as client:
results = await asyncio.gather(
*[self.make_request(client, method, u, headers, rec, timeout) for u, rec in zip(urls, bodies)]
*[
self.make_request(client, method, u, headers, rec, timeout)
for u, rec in zip(urls, bodies)
]
)
self.status = results
return results

View file

@ -8,6 +8,7 @@ from langflow.schema import Record
class DirectoryComponent(CustomComponent):
display_name = "Directory"
description = "Recursively load files from a directory."
icon = "folder"
def build_config(self) -> Dict[str, Any]:
return {

View file

@ -9,6 +9,7 @@ from langflow.schema import Record
class FileComponent(CustomComponent):
display_name = "File"
description = "A generic file loader."
icon = "file-text"
def build_config(self) -> Dict[str, Any]:
return {

View file

@ -9,6 +9,7 @@ from langflow.schema import Record
class URLComponent(CustomComponent):
display_name = "URL"
description = "Fetch content from one or more URLs."
icon = "layout-template"
def build_config(self) -> Dict[str, Any]:
return {

View file

@ -16,6 +16,7 @@ class CohereEmbeddingsComponent(CustomComponent):
"truncate": {"display_name": "Truncate", "advanced": True},
"max_retries": {"display_name": "Max Retries", "advanced": True},
"user_agent": {"display_name": "User Agent", "advanced": True},
"request_timeout": {"display_name": "Request Timeout", "advanced": True},
}
def build(

View file

@ -45,12 +45,24 @@ class OpenAIEmbeddingsComponent(CustomComponent):
"model": {
"display_name": "Model",
"advanced": False,
"options": ["text-embedding-3-small", "text-embedding-3-large", "text-embedding-ada-002"],
"options": [
"text-embedding-3-small",
"text-embedding-3-large",
"text-embedding-ada-002",
],
},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"openai_api_base": {"display_name": "OpenAI API Base", "password": True, "advanced": True},
"openai_api_base": {
"display_name": "OpenAI API Base",
"password": True,
"advanced": True,
},
"openai_api_key": {"display_name": "OpenAI API Key", "password": True},
"openai_api_type": {"display_name": "OpenAI API Type", "advanced": True, "password": True},
"openai_api_type": {
"display_name": "OpenAI API Type",
"advanced": True,
"password": True,
},
"openai_api_version": {
"display_name": "OpenAI API Version",
"advanced": True,
@ -66,12 +78,16 @@ class OpenAIEmbeddingsComponent(CustomComponent):
"advanced": True,
},
"skip_empty": {"display_name": "Skip Empty", "advanced": True},
"tiktoken_model_name": {"display_name": "TikToken Model Name"},
"tikToken_enable": {"display_name": "TikToken Enable", "advanced": True},
"tiktoken_model_name": {
"display_name": "TikToken Model Name",
"advanced": True,
},
"tiktoken_enable": {"display_name": "TikToken Enable", "advanced": True},
}
def build(
self,
openai_api_key: str,
default_headers: Optional[Dict[str, str]] = None,
default_query: Optional[NestedDict] = {},
allowed_special: List[str] = [],
@ -84,7 +100,6 @@ class OpenAIEmbeddingsComponent(CustomComponent):
model: str = "text-embedding-3-small",
model_kwargs: NestedDict = {},
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = "",
openai_api_type: Optional[str] = None,
openai_api_version: Optional[str] = None,
openai_organization: Optional[str] = None,

View file

@ -1,19 +1,19 @@
from typing import Any, List, Optional
from langflow.helpers.flow import get_flow_inputs
from loguru import logger
from langflow.custom import CustomComponent
from langflow.graph.graph.base import Graph
from langflow.graph.schema import ResultData, RunOutputs
from langflow.graph.vertex.base import Vertex
from langflow.helpers.flow import get_flow_inputs
from langflow.schema import Record
from langflow.schema.dotdict import dotdict
from langflow.template.field.base import TemplateField
class SubFlowComponent(CustomComponent):
display_name = "SubFlow"
display_name = "Sub Flow"
description = "Dynamically Generates a Component from a Flow. The output is a list of records with keys 'result' and 'message'."
beta: bool = True
field_order = ["flow_name"]

View file

@ -24,4 +24,5 @@ __all__ = [
"RunnableExecComponent",
"SQLExecutorComponent",
"SubFlowComponent",
"PythonFunctionComponent",
]

View file

@ -0,0 +1,29 @@
from langflow.interface.custom.custom_component import CustomComponent
from langflow.field_typing import Text
class CombineTextComponent(CustomComponent):
display_name = "Combine Text"
description = "Concatenate two text sources into a single text chunk using a specified delimiter."
icon = "merge"
def build_config(self):
return {
"text1": {
"display_name": "First Text",
"info": "The first text input to concatenate.",
},
"text2": {
"display_name": "Second Text",
"info": "The second text input to concatenate.",
},
"delimiter": {
"display_name": "Delimiter",
"info": "A string used to separate the two text inputs. Defaults to a whitespace.",
},
}
def build(self, text1: str, text2: str, delimiter: str = " ") -> Text:
combined = delimiter.join([text1, text2])
self.status = combined
return combined

View file

@ -1,25 +1,80 @@
from langflow.interface.custom.custom_component import CustomComponent
from typing import Any
from langflow.custom import CustomComponent
from langflow.field_typing.range_spec import RangeSpec
from langflow.schema import Record
from langflow.schema.dotdict import dotdict
from langflow.template.field.base import TemplateField
class CreateRecordComponent(CustomComponent):
display_name = "Create Record"
description = "Create a Record structure using text-based key/value pairs."
beta: bool = True
description = "Dynamically create a Record with a specified number of fields."
field_order = ["number_of_fields", "text_key"]
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
if field_name == "number_of_fields":
default_keys = ["code", "_type", "number_of_fields", "text_key"]
try:
field_value_int = int(field_value)
except TypeError:
return build_config
existing_fields = {}
if field_value_int > 15:
build_config["number_of_fields"]["value"] = 15
raise ValueError("Number of fields cannot exceed 15. Try using a Component to combine two Records.")
if len(build_config) > len(default_keys) + field_value_int:
# back up the existing template fields
for key in build_config.copy():
if key not in default_keys:
existing_fields[key] = build_config.pop(key)
for i in range(1, field_value_int + 1):
key = f"field_{i}_key"
if key in existing_fields:
field = existing_fields[key]
build_config[key] = field
else:
field = TemplateField(
display_name=f"Field {i}",
name=key,
info=f"Key for field {i}.",
field_type="dict",
input_types=["Text", "Record"],
)
build_config[field.name] = field.to_dict()
return build_config
def build_config(self):
return {
"data": {
"display_name": "Data",
"info": "Data to contruct the record.",
"input_types": ["Text"],
}
"number_of_fields": {
"display_name": "Number of Fields",
"info": "Number of fields to be added to the record.",
"real_time_refresh": True,
"rangeSpec": RangeSpec(min=1, max=15, step=1, step_type="int"),
},
"text_key": {
"display_name": "Text Key",
"info": "Key to be used as text.",
"advanced": True,
},
}
def build(
self,
data: dict,
number_of_fields: int = 0,
text_key: str = "text",
**kwargs,
) -> Record:
return_record = Record(data=data)
data = {}
for value_dict in kwargs.values():
if isinstance(value_dict, dict):
# Check if the value of the value_dict is a Record
value_dict = {
key: value.get_text() if isinstance(value, Record) else value for key, value in value_dict.items()
}
data.update(value_dict)
return_record = Record(data=data, text_key=text_key)
self.status = return_record
return return_record

View file

@ -0,0 +1,67 @@
from typing import Optional
from langflow.field_typing import Text
from langflow.helpers.record import records_to_text
from langflow.interface.custom.custom_component import CustomComponent
from langflow.memory import get_messages
class MemoryComponent(CustomComponent):
display_name = "Chat Memory"
description = "Retrieves stored chat messages given a specific Session ID."
beta: bool = True
icon = "history"
def build_config(self):
return {
"sender": {
"options": ["Machine", "User", "Machine and User"],
"display_name": "Sender Type",
},
"sender_name": {"display_name": "Sender Name", "advanced": True},
"n_messages": {
"display_name": "Number of Messages",
"info": "Number of messages to retrieve.",
"advanced": True,
},
"session_id": {
"display_name": "Session ID",
"info": "Session ID of the chat history.",
"input_types": ["Text"],
},
"order": {
"options": ["Ascending", "Descending"],
"display_name": "Order",
"info": "Order of the messages.",
"advanced": True,
},
"record_template": {
"display_name": "Record Template",
"multiline": True,
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"advanced": True,
},
}
def build(
self,
sender: Optional[str] = "Machine and User",
sender_name: Optional[str] = None,
session_id: Optional[str] = None,
n_messages: int = 5,
order: Optional[str] = "Descending",
record_template: Optional[str] = "{sender_name}: {text}",
) -> Text:
order = "DESC" if order == "Descending" else "ASC"
if sender == "Machine and User":
sender = None
messages = get_messages(
sender=sender,
sender_name=sender_name,
session_id=session_id,
limit=n_messages,
order=order,
)
messages_str = records_to_text(template=record_template, records=messages)
self.status = messages_str
return messages_str

View file

@ -16,7 +16,7 @@ class MessageHistoryComponent(CustomComponent):
"options": ["Machine", "User", "Machine and User"],
"display_name": "Sender Type",
},
"sender_name": {"display_name": "Sender Name"},
"sender_name": {"display_name": "Sender Name", "advanced": True},
"n_messages": {
"display_name": "Number of Messages",
"info": "Number of messages to retrieve.",
@ -26,15 +26,23 @@ class MessageHistoryComponent(CustomComponent):
"info": "Session ID of the chat history.",
"input_types": ["Text"],
},
"order": {
"options": ["Ascending", "Descending"],
"display_name": "Order",
"info": "Order of the messages.",
"advanced": True,
},
}
def build(
self,
sender: Optional[str] = "Machine",
sender: Optional[str] = "Machine and User",
sender_name: Optional[str] = None,
session_id: Optional[str] = None,
n_messages: int = 5,
order: Optional[str] = "Descending",
) -> List[Record]:
order = "DESC" if order == "Descending" else "ASC"
if sender == "Machine and User":
sender = None
messages = get_messages(
@ -42,6 +50,7 @@ class MessageHistoryComponent(CustomComponent):
sender_name=sender_name,
session_id=session_id,
limit=n_messages,
order=order,
)
self.status = messages
return messages

View file

@ -7,7 +7,6 @@ from langflow.schema import Record
class RecordsToTextComponent(CustomComponent):
display_name = "Records To Text"
description = "Convert Records into plain text following a specified template."
def build_config(self):
return {

View file

@ -0,0 +1,87 @@
from typing import Optional
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
CharacterTextSplitter,
)
from langchain_core.documents import Document
from langflow.interface.custom.custom_component import CustomComponent
from langflow.schema import Record
from langflow.field_typing import Text
from langflow.utils.util import build_loader_repr_from_records, unescape_string
class SplitTextComponent(CustomComponent):
display_name: str = "Split Text"
description: str = "Split text into chunks of a specified length."
def build_config(self):
return {
"texts": {
"display_name": "Texts",
"info": "Texts to split.",
"input_types": ["Text"],
},
"separators": {
"display_name": "Separators",
"info": 'The characters to split on. Defaults to [" "].',
"is_list": True,
},
"chunk_size": {
"display_name": "Max Chunk Size",
"info": "The maximum length (in number of characters) of each chunk.",
"field_type": "int",
"value": 1000,
},
"chunk_overlap": {
"display_name": "Chunk Overlap",
"info": "The amount of character overlap between chunks.",
"field_type": "int",
"value": 200,
},
"recursive": {
"display_name": "Recursive",
},
"code": {"show": False},
}
def build(
self,
texts: list[Text],
separators: Optional[list[str]] = [" "],
chunk_size: Optional[int] = 1000,
chunk_overlap: Optional[int] = 200,
recursive: bool = False,
) -> list[Record]:
separators = [unescape_string(x) for x in separators]
# Make sure chunk_size and chunk_overlap are ints
if isinstance(chunk_size, str):
chunk_size = int(chunk_size)
if isinstance(chunk_overlap, str):
chunk_overlap = int(chunk_overlap)
if recursive:
splitter = RecursiveCharacterTextSplitter(
separators=separators,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
else:
splitter = CharacterTextSplitter(
separator=separators[0],
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
documents = []
for _text in texts:
# documents.append(_input.to_lc_document())
documents.append(Document(page_content=_text))
records = self.to_records(splitter.split_documents(documents))
self.status = records
return records

View file

@ -4,13 +4,13 @@ from .DocumentToRecord import DocumentToRecordComponent
from .IDGenerator import UUIDGeneratorComponent
from .MessageHistory import MessageHistoryComponent
from .UpdateRecord import UpdateRecordComponent
from .RecordsToText import RecordsToTextComponent
__all__ = [
"Component",
"UpdateRecordComponent",
"DocumentToRecordComponent",
"UUIDGeneratorComponent",
"PythonFunctionComponent",
"RecordsToTextComponent",
"CreateRecordComponent",
"MessageHistoryComponent",

View file

@ -10,6 +10,16 @@ class ChatInput(ChatComponent):
description = "Get chat inputs from the Interaction Panel."
icon = "ChatInput"
def build_config(self):
build_config = super().build_config()
build_config["input_value"] = {
"input_types": [],
"display_name": "Message",
"multiline": True,
}
return build_config
def build(
self,
sender: Optional[str] = "User",

View file

@ -7,7 +7,7 @@ from langflow.interface.custom.custom_component import CustomComponent
class PromptComponent(CustomComponent):
display_name: str = "Prompt"
description: str = "Create a prompt template with dynamic variables."
icon = "terminal-square"
icon = "prompts"
def build_config(self):
return {

View file

@ -7,20 +7,26 @@ from langflow.field_typing import Text
class TextInput(TextComponent):
display_name = "Text Input"
description = "Get text inputs from the Interaction Panel."
icon = "type"
def build_config(self):
return {
"input_value": {
"display_name": "Value",
"input_types": ["Record"],
"input_types": ["Record", "Text"],
"info": "Text or Record to be passed as input.",
},
"record_template": {"display_name": "Record Template", "multiline": True},
"record_template": {
"display_name": "Record Template",
"multiline": True,
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"advanced": True,
},
}
def build(
self,
input_value: Optional[str] = "",
record_template: Optional[str] = "{text}",
record_template: Optional[str] = "",
) -> Text:
return super().build(input_value=input_value, record_template=record_template)

View file

@ -30,7 +30,7 @@ class GoogleGenerativeAIComponent(CustomComponent):
"top_k": {
"display_name": "Top K",
"info": "Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
"range_spec": RangeSpec(min=0, max=2, step=0.1),
"rangeSpec": RangeSpec(min=0, max=2, step=0.1),
"advanced": True,
},
"top_p": {

View file

@ -2,6 +2,7 @@ from typing import Optional
from langchain_community.chat_models.bedrock import BedrockChat
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
@ -52,13 +53,21 @@ class AmazonBedrockComponent(LCModelComponent):
"credentials_profile_name": {"display_name": "Credentials Profile Name"},
"endpoint_url": {"display_name": "Endpoint URL"},
"region_name": {"display_name": "Region Name"},
"model_kwargs": {"display_name": "Model Kwargs"},
"model_kwargs": {
"display_name": "Model Kwargs",
"advanced": True,
},
"cache": {"display_name": "Cache"},
"input_value": {"display_name": "Input"},
"system_message": {"display_name": "System Message", "info": "System message to pass to the model."},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
}

View file

@ -3,8 +3,8 @@ from typing import Optional
from langchain_anthropic.chat_models import ChatAnthropic
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class AnthropicLLM(LCModelComponent):
@ -49,25 +49,29 @@ class AnthropicLLM(LCModelComponent):
"max_tokens": {
"display_name": "Max Tokens",
"field_type": "int",
"advanced": True,
"value": 256,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.7,
"value": 0.1,
},
"anthropic_api_url": {
"display_name": "Anthropic API URL",
"advanced": True,
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"advanced": True,
"info": STREAM_INFO_TEXT,
},
"system_message": {
"display_name": "System Message",
"advanced": True,
"info": "System message to pass to the model.",
},
}

View file

@ -3,8 +3,8 @@ from typing import Optional
from langchain.llms.base import BaseLanguageModel
from langchain_openai import AzureChatOpenAI
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class AzureChatOpenAIComponent(LCModelComponent):
@ -52,36 +52,28 @@ class AzureChatOpenAIComponent(LCModelComponent):
"display_name": "Model Name",
"value": self.AZURE_OPENAI_MODELS[0],
"options": self.AZURE_OPENAI_MODELS,
"required": True,
},
"azure_endpoint": {
"display_name": "Azure Endpoint",
"required": True,
"info": "Your Azure endpoint, including the resource.. Example: `https://example-resource.azure.openai.com/`",
},
"azure_deployment": {
"display_name": "Deployment Name",
"required": True,
},
"api_version": {
"display_name": "API Version",
"options": self.AZURE_OPENAI_API_VERSIONS,
"value": self.AZURE_OPENAI_API_VERSIONS[-1],
"required": True,
"advanced": True,
},
"api_key": {"display_name": "API Key", "required": True, "password": True},
"api_key": {"display_name": "API Key", "password": True},
"temperature": {
"display_name": "Temperature",
"value": 0.7,
"field_type": "float",
"required": False,
},
"max_tokens": {
"display_name": "Max Tokens",
"value": 1000,
"required": False,
"field_type": "int",
"advanced": True,
"info": "Maximum number of tokens to generate.",
},
@ -89,11 +81,13 @@ class AzureChatOpenAIComponent(LCModelComponent):
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}
@ -104,9 +98,9 @@ class AzureChatOpenAIComponent(LCModelComponent):
input_value: Text,
azure_deployment: str,
api_version: str,
api_key: Optional[str] = None,
api_key: str,
temperature: float,
system_message: Optional[str] = None,
temperature: float = 0.7,
max_tokens: Optional[int] = 1000,
stream: bool = False,
) -> BaseLanguageModel:

View file

@ -3,8 +3,8 @@ from typing import Optional
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class QianfanChatEndpointComponent(LCModelComponent):
@ -43,17 +43,15 @@ class QianfanChatEndpointComponent(LCModelComponent):
"AquilaChat-7B",
],
"info": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint",
"required": True,
"value": "ERNIE-Bot-turbo",
},
"qianfan_ak": {
"display_name": "Qianfan Ak",
"required": True,
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
"qianfan_sk": {
"display_name": "Qianfan Sk",
"required": True,
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
@ -62,6 +60,7 @@ class QianfanChatEndpointComponent(LCModelComponent):
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 0.8,
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
@ -74,6 +73,7 @@ class QianfanChatEndpointComponent(LCModelComponent):
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 1.0,
"advanced": True,
},
"endpoint": {
"display_name": "Endpoint",
@ -83,20 +83,22 @@ class QianfanChatEndpointComponent(LCModelComponent):
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}
def build(
self,
input_value: Text,
model: str = "ERNIE-Bot-turbo",
qianfan_ak: Optional[str] = None,
qianfan_sk: Optional[str] = None,
qianfan_ak: str,
qianfan_sk: str,
model: str,
top_p: Optional[float] = None,
temperature: Optional[float] = None,
penalty_score: Optional[float] = None,

View file

@ -3,8 +3,8 @@ from typing import Optional
from langchain_community.chat_models.cohere import ChatCohere
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class CohereComponent(LCModelComponent):
@ -29,9 +29,11 @@ class CohereComponent(LCModelComponent):
"display_name": "Cohere API Key",
"type": "password",
"password": True,
"required": True,
},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
"default": 256,
"type": "int",
"show": True,
@ -45,11 +47,13 @@ class CohereComponent(LCModelComponent):
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}

View file

@ -3,8 +3,8 @@ from typing import Optional
from langchain_google_genai import ChatGoogleGenerativeAI
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import RangeSpec, Text
class GoogleGenerativeAIComponent(LCModelComponent):
@ -34,6 +34,7 @@ class GoogleGenerativeAIComponent(LCModelComponent):
"max_output_tokens": {
"display_name": "Max Output Tokens",
"info": "The maximum number of tokens to generate.",
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
@ -42,7 +43,7 @@ class GoogleGenerativeAIComponent(LCModelComponent):
"top_k": {
"display_name": "Top K",
"info": "Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
"range_spec": RangeSpec(min=0, max=2, step=0.1),
"rangeSpec": RangeSpec(min=0, max=2, step=0.1),
"advanced": True,
},
"top_p": {
@ -66,11 +67,13 @@ class GoogleGenerativeAIComponent(LCModelComponent):
"input_value": {"display_name": "Input", "info": "The input to the model."},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}

View file

@ -3,8 +3,8 @@ from typing import Optional
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class HuggingFaceEndpointsComponent(LCModelComponent):
@ -33,16 +33,19 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
"model_kwargs": {
"display_name": "Model Keyword Arguments",
"field_type": "code",
"advanced": True,
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}

View file

@ -3,6 +3,7 @@ from typing import Any, Dict, List, Optional
# from langchain_community.chat_models import ChatOllama
from langchain_community.chat_models import ChatOllama
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
# from langchain.chat_models import ChatOllama
@ -53,6 +54,7 @@ class ChatOllamaComponent(LCModelComponent):
"base_url": {
"display_name": "Base URL",
"info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
"advanced": True,
},
"model": {
"display_name": "Model Name",
@ -200,11 +202,12 @@ class ChatOllamaComponent(LCModelComponent):
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"info": STREAM_INFO_TEXT,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}

View file

@ -2,6 +2,7 @@ from typing import Optional
from langchain_openai import ChatOpenAI
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import NestedDict, Text
@ -28,31 +29,29 @@ class OpenAIModelComponent(LCModelComponent):
"input_value": {"display_name": "Input"},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": False,
"required": False,
"advanced": True,
},
"model_kwargs": {
"display_name": "Model Kwargs",
"advanced": True,
"required": False,
},
"model_name": {
"display_name": "Model Name",
"advanced": False,
"required": False,
"options": [
"gpt-4-turbo-preview",
"gpt-3.5-turbo",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-1106",
],
"value": "gpt-4-turbo-preview",
},
"openai_api_base": {
"display_name": "OpenAI API Base",
"advanced": False,
"required": False,
"advanced": True,
"info": (
"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\n"
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
@ -60,35 +59,36 @@ class OpenAIModelComponent(LCModelComponent):
},
"openai_api_key": {
"display_name": "OpenAI API Key",
"info": "The OpenAI API Key to use for the OpenAI model.",
"advanced": False,
"required": False,
"password": True,
},
"temperature": {
"display_name": "Temperature",
"advanced": False,
"required": False,
"value": 0.7,
"value": 0.1,
},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}
def build(
self,
input_value: Text,
openai_api_key: str,
temperature: float,
model_name: str,
max_tokens: Optional[int] = 256,
model_kwargs: NestedDict = {},
model_name: str = "gpt-4-1106-preview",
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = None,
temperature: float = 0.7,
stream: bool = False,
system_message: Optional[str] = None,
) -> Text:

View file

@ -2,8 +2,8 @@ from typing import List, Optional
from langchain_core.messages.base import BaseMessage
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class ChatVertexAIComponent(LCModelComponent):
@ -77,11 +77,13 @@ class ChatVertexAIComponent(LCModelComponent):
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}

View file

@ -7,16 +7,22 @@ from langflow.field_typing import Text
class TextOutput(TextComponent):
display_name = "Text Output"
description = "Display a text output in the Interaction Panel."
icon = "type"
def build_config(self):
return {
"input_value": {
"display_name": "Value",
"input_types": ["Record"],
"input_types": ["Record", "Text"],
"info": "Text or Record to be passed as output.",
},
"record_template": {"display_name": "Record Template", "multiline": True},
"record_template": {
"display_name": "Record Template",
"multiline": True,
"info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.",
"advanced": True,
},
}
def build(self, input_value: Optional[Text] = "", record_template: str = "{text}") -> Text:
def build(self, input_value: Optional[Text] = "", record_template: str = "") -> Text:
return super().build(input_value=input_value, record_template=record_template)

View file

@ -8,7 +8,7 @@ from langflow.schema import Record
class AstraDBSearchComponent(LCVectorStoreComponent):
display_name = "AstraDB Search"
description = "Searches an existing AstraDB Vector Store"
description = "Searches an existing AstraDB Vector Store."
icon = "AstraDB"
field_order = ["token", "api_endpoint", "collection_name", "input_value", "embedding"]

View file

@ -9,8 +9,8 @@ from langflow.schema import Record
class AstraDBVectorStoreComponent(CustomComponent):
display_name = "AstraDB Vector Store"
description = "Builds or loads an AstraDB Vector Store"
display_name = "AstraDB"
description = "Builds or loads an AstraDB Vector Store."
icon = "AstraDB"
field_order = ["token", "api_endpoint", "collection_name", "inputs", "embedding"]

View file

@ -1,7 +1,10 @@
from typing import Literal
from pydantic import BaseModel, field_validator
class RangeSpec(BaseModel):
step_type: Literal["int", "float"] = "float"
min: float = -1.0
max: float = 1.0
step: float = 0.1
@ -15,7 +18,13 @@ class RangeSpec(BaseModel):
@field_validator("step")
@classmethod
def step_must_be_positive(cls, v):
def step_must_be_positive(cls, v, values, **kwargs):
if v <= 0:
raise ValueError("Step must be positive")
if values.data["step_type"] == "int" and isinstance(v, float) and not v.is_integer():
raise ValueError("When step_type is int, step must be an integer")
return v
@classmethod
def set_step_type(cls, step_type: Literal["int", "float"], range_spec: "RangeSpec") -> "RangeSpec":
return cls(min=range_spec.min, max=range_spec.max, step=range_spec.step, step_type=step_type)

View file

@ -101,7 +101,11 @@ class Edge:
def __eq__(self, __o: object) -> bool:
if not isinstance(__o, Edge):
return False
return self._source_handle == __o._source_handle and self._target_handle == __o._target_handle
return (
self._source_handle == __o._source_handle
and self._target_handle == __o._target_handle
and self.target_param == __o.target_param
)
class ContractEdge(Edge):

View file

@ -15,7 +15,7 @@ from langflow.graph.vertex.base import Vertex
from langflow.graph.vertex.types import ChatVertex, FileToolVertex, LLMVertex, RoutingVertex, StateVertex, ToolkitVertex
from langflow.interface.tools.constants import FILE_TOOLS
from langflow.schema import Record
from langflow.schema.schema import INPUT_FIELD_NAME
from langflow.schema.schema import INPUT_FIELD_NAME, InputType
if TYPE_CHECKING:
from langflow.graph.schema import ResultData
@ -201,7 +201,7 @@ class Graph:
self,
inputs: Dict[str, str],
input_components: list[str],
input_type: Literal["chat", "text", "json", "any"] | None,
input_type: InputType | None,
outputs: list[str],
stream: bool,
session_id: str,
@ -271,7 +271,7 @@ class Graph:
self,
inputs: list[Dict[str, str]],
input_components: Optional[list[list[str]]] = None,
types: Optional[list[Literal["chat", "text", "json", "any"] | None]] = None,
types: Optional[list[InputType | None]] = None,
outputs: Optional[list[str]] = None,
session_id: Optional[str] = None,
stream: bool = False,
@ -309,7 +309,7 @@ class Graph:
self,
inputs: list[Dict[str, str]],
inputs_components: Optional[list[list[str]]] = None,
types: Optional[list[Literal["chat", "text", "json", "any"] | None]] = None,
types: Optional[list[InputType | None]] = None,
outputs: Optional[list[str]] = None,
session_id: Optional[str] = None,
stream: bool = False,
@ -603,8 +603,7 @@ class Graph:
# This is a hack to make sure that the LLM vertex is sent to
# the toolkit vertex
self._build_vertex_params()
# remove invalid vertices
self._validate_vertices()
# Now that we have the vertices and edges
# We need to map the vertices that are connected to
# to ChatVertex instances
@ -631,14 +630,6 @@ class Graph:
if isinstance(vertex, ToolkitVertex):
vertex.params["llm"] = llm_vertex
def _validate_vertices(self) -> None:
"""Check that all vertices have edges"""
if len(self.vertices) == 1:
return
for vertex in self.vertices:
if not self._validate_vertex(vertex):
raise ValueError(f"{vertex.display_name} is not connected to any other components")
def _validate_vertex(self, vertex: Vertex) -> bool:
"""Validates a vertex."""
# All vertices that do not have edges are invalid
@ -890,8 +881,7 @@ class Graph:
# and then build the edges
# if we can't find a vertex, we raise an error
edges: List[ContractEdge] = []
edges_added = set()
edges: set[ContractEdge] = set()
for edge in self._edges:
source = self.get_vertex(edge["source"])
target = self.get_vertex(edge["target"])
@ -900,13 +890,11 @@ class Graph:
raise ValueError(f"Source vertex {edge['source']} not found")
if target is None:
raise ValueError(f"Target vertex {edge['target']} not found")
edge = ContractEdge(source, target, edge)
if (source.id, target.id) in edges_added:
continue
edges.add(edge)
edges.append(ContractEdge(source, target, edge))
edges_added.add((source.id, target.id))
return edges
return list(edges)
def _get_vertex_class(self, node_type: str, node_base_type: str, node_id: str) -> Type[Vertex]:
"""Returns the node class based on the node type."""

View file

@ -4,6 +4,7 @@ import inspect
import types
from enum import Enum
from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, Dict, Iterator, List, Optional
from loguru import logger
from langflow.graph.schema import INPUT_COMPONENTS, OUTPUT_COMPONENTS, InterfaceComponentTypes, ResultData
@ -385,6 +386,7 @@ class Vertex:
if key not in self._raw_params:
new_params.pop(key)
self._raw_params.update(new_params)
self.params = self._raw_params.copy()
self.updated_raw_params = True
async def _build(self, user_id=None):

View file

@ -1,7 +1,6 @@
import ast
import json
from typing import AsyncIterator, Callable, Dict, Iterator, List, Optional, Union
import yaml
from langchain_core.messages import AIMessage
from loguru import logger

View file

@ -1,4 +1,4 @@
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple, Type, Union, cast
from typing import TYPE_CHECKING, Any, Callable, Coroutine, List, Optional, Tuple, Union
from pydantic.v1 import BaseModel, Field, create_model
from sqlmodel import select
@ -63,30 +63,28 @@ def find_flow(flow_name: str, user_id: str) -> Optional[str]:
async def run_flow(
inputs: Optional[Union[dict, List[dict]]] = None,
inputs: Union[dict, List[dict]] = None,
tweaks: Optional[dict] = None,
flow_id: Optional[str] = None,
flow_name: Optional[str] = None,
user_id: Optional[str] = None,
) -> Any:
if not user_id:
raise ValueError("Session is invalid")
graph = await load_flow(user_id, flow_id, flow_name, tweaks)
if inputs is None:
inputs = []
inputs_list: list[dict[str, str]] = []
inputs_list = []
inputs_components = []
types = []
for input_dict in inputs:
inputs_list.append({INPUT_FIELD_NAME: cast(str, input_dict.get("input_value", ""))})
inputs_list.append({INPUT_FIELD_NAME: input_dict.get("input_value")})
inputs_components.append(input_dict.get("components", []))
types.append(input_dict.get("type", []))
return await graph.arun(inputs_list, inputs_components=inputs_components, types=types)
def generate_function_for_flow(inputs: List["Vertex"], flow_id: str) -> Callable[..., Awaitable[Any]]:
def generate_function_for_flow(inputs: List["Vertex"], flow_id: str) -> Coroutine:
"""
Generate a dynamic flow function based on the given inputs and flow ID.
@ -140,14 +138,12 @@ async def flow_function({func_args}):
"""
compiled_func = compile(func_body, "<string>", "exec")
local_scope: dict = {}
local_scope = {}
exec(compiled_func, globals(), local_scope)
return local_scope["flow_function"]
def build_function_and_schema(
flow_record: Record, graph: "Graph"
) -> Tuple[Callable[..., Awaitable[Any]], Type[BaseModel]]:
def build_function_and_schema(flow_record: Record, graph: "Graph") -> Tuple[Callable, BaseModel]:
"""
Builds a dynamic function and schema for a given flow.
@ -182,7 +178,7 @@ def get_flow_inputs(graph: "Graph") -> List["Vertex"]:
return inputs
def build_schema_from_inputs(name: str, inputs: List["Vertex"]) -> Type[BaseModel]:
def build_schema_from_inputs(name: str, inputs: List[tuple[str, str, str]]) -> BaseModel:
"""
Builds a schema from the given inputs.
@ -200,4 +196,4 @@ def build_schema_from_inputs(name: str, inputs: List["Vertex"]) -> Type[BaseMode
field_name = input_.display_name.lower().replace(" ", "_")
description = input_.description
fields[field_name] = (str, Field(default="", description=description))
return create_model(name, **fields) # type: ignore
return create_model(name, **fields)

View file

@ -29,6 +29,12 @@ def records_to_text(template: str, records: list[Record]) -> str:
if isinstance(records, Record):
records = [records]
# Check if there are any format strings in the template
_records = []
for record in records:
# If it is not a record, create one with the key "text"
if not isinstance(record, Record):
record = Record(text=record)
_records.append(record)
formated_records = [template.format(data=record.data, **record.data) for record in records]
formated_records = [template.format(data=record.data, **record.data) for record in _records]
return "\n".join(formated_records)

View file

@ -1,4 +1,6 @@
from datetime import datetime
from collections import defaultdict
from copy import deepcopy
from datetime import datetime, timezone
from pathlib import Path
import orjson
@ -6,6 +8,7 @@ from emoji import demojize, purely_emoji # type: ignore
from loguru import logger
from sqlmodel import select
from langflow.base.constants import FIELD_FORMAT_ATTRIBUTES, NODE_FORMAT_ATTRIBUTES
from langflow.interface.types import get_all_components
from langflow.services.database.models.flow.model import Flow, FlowCreate
from langflow.services.deps import get_settings_service, session_scope
@ -23,13 +26,62 @@ def update_projects_components_with_latest_component_versions(project_data, all_
# we want to run through each node and see if it exists in the all_types_dict
# if so, we go into the template key and also get the template from all_types_dict
# and update it all
for node in project_data.get("nodes", []):
node_changes_log = defaultdict(list)
project_data_copy = deepcopy(project_data)
for node in project_data_copy.get("nodes", []):
node_data = node.get("data").get("node")
if node_data.get("display_name") in all_types_dict:
latest_node = all_types_dict.get(node_data.get("display_name"))
latest_template = latest_node.get("template")
node_data["template"]["code"] = latest_template["code"]
return project_data
for attr in NODE_FORMAT_ATTRIBUTES:
if attr in latest_node:
# Check if it needs to be updated
if latest_node[attr] != node_data.get(attr):
node_changes_log[node_data["display_name"]].append(
{
"attr": attr,
"old_value": node_data.get(attr),
"new_value": latest_node[attr],
}
)
node_data[attr] = latest_node[attr]
for field_name, field_dict in latest_template.items():
if field_name not in node_data["template"]:
continue
# The idea here is to update some attributes of the field
for attr in FIELD_FORMAT_ATTRIBUTES:
if attr in field_dict and attr in node_data["template"].get(field_name):
# Check if it needs to be updated
if field_dict[attr] != node_data["template"][field_name][attr]:
node_changes_log[node_data["display_name"]].append(
{
"attr": f"{field_name}.{attr}",
"old_value": node_data["template"][field_name][attr],
"new_value": field_dict[attr],
}
)
node_data["template"][field_name][attr] = field_dict[attr]
log_node_changes(node_changes_log)
return project_data_copy
def log_node_changes(node_changes_log):
# The idea here is to log the changes that were made to the nodes in debug
# Something like:
# Node: "Node Name" was updated with the following changes:
# attr_name: old_value -> new_value
# let's create one log per node
formatted_messages = []
for node_name, changes in node_changes_log.items():
message = f"\nNode: {node_name} was updated with the following changes:"
for change in changes:
message += f"\n- {change['attr']}: {change['old_value']} -> {change['new_value']}"
formatted_messages.append(message)
if formatted_messages:
logger.debug("\n".join(formatted_messages))
def load_starter_projects():
@ -37,7 +89,7 @@ def load_starter_projects():
folder = Path(__file__).parent / "starter_projects"
for file in folder.glob("*.json"):
project = orjson.loads(file.read_text())
starter_projects.append(project)
starter_projects.append((file, project))
logger.info(f"Loaded starter project {file}")
return starter_projects
@ -48,8 +100,10 @@ def get_project_data(project):
project_is_component = project.get("is_component")
project_updated_at = project.get("updated_at")
if not project_updated_at:
project_updated_at = datetime.utcnow().isoformat()
updated_at_datetime = datetime.strptime(project_updated_at, "%Y-%m-%dT%H:%M:%S.%f")
project_updated_at = datetime.now(tz=timezone.utc).isoformat()
updated_at_datetime = datetime.strptime(project_updated_at, "%Y-%m-%dT%H:%M:%S.%f%z")
else:
updated_at_datetime = datetime.strptime(project_updated_at, "%Y-%m-%dT%H:%M:%S.%f")
project_data = project.get("data")
project_icon = project.get("icon")
if project_icon and purely_emoji(project_icon):
@ -68,6 +122,13 @@ def get_project_data(project):
)
def update_project_file(project_path, project, updated_project_data):
project["data"] = updated_project_data
with open(project_path, "w") as f:
f.write(orjson.dumps(project, option=orjson.OPT_INDENT_2).decode())
logger.info(f"Updated starter project {project['name']} file")
def update_existing_project(
existing_project,
project_name,
@ -139,7 +200,7 @@ def create_or_update_starter_projects():
with session_scope() as session:
starter_projects = load_starter_projects()
delete_start_projects(session)
for project in starter_projects:
for project_path, project in starter_projects:
(
project_name,
project_description,
@ -149,7 +210,14 @@ def create_or_update_starter_projects():
project_icon,
project_icon_bg_color,
) = get_project_data(project)
project_data = update_projects_components_with_latest_component_versions(project_data, all_types_dict)
updated_project_data = update_projects_components_with_latest_component_versions(
project_data, all_types_dict
)
if updated_project_data != project_data:
project_data = updated_project_data
# We also need to update the project data in the file
update_project_file(project_path, project, updated_project_data)
if project_name and project_data:
for existing_project in get_all_flows_similar_to_project(session, project_name):
session.delete(existing_project)

File diff suppressed because one or more lines are too long

View file

@ -269,6 +269,9 @@ def run_build_config(
# Allow user to build TemplateField as well
# as a dict with the same keys as TemplateField
field_dict = get_field_dict(field)
# Let's check if "rangeSpec" is a RangeSpec object
if "rangeSpec" in field_dict and isinstance(field_dict["rangeSpec"], RangeSpec):
field_dict["rangeSpec"] = field_dict["rangeSpec"].model_dump()
build_config[field_name] = field_dict
return build_config, custom_instance
@ -423,9 +426,6 @@ def update_field_dict(
logger.error(f"Error while running update_build_config: {str(exc)}")
raise UpdateBuildConfigError(f"Error while running update_build_config: {str(exc)}") from exc
# Let's check if "range_spec" is a RangeSpec object
if "rangeSpec" in field_dict and isinstance(field_dict["rangeSpec"], RangeSpec):
field_dict["rangeSpec"] = field_dict["rangeSpec"].model_dump()
return build_config

View file

@ -2,6 +2,7 @@ import inspect
import json
from typing import TYPE_CHECKING, Any, Callable, Dict, Sequence, Type
import orjson
from langchain.agents import agent as agent_module
from langchain.agents.agent import AgentExecutor

View file

@ -3,6 +3,7 @@ from pathlib import Path
from typing import Optional
from urllib.parse import urlencode
import nest_asyncio
import socketio # type: ignore
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
@ -20,6 +21,7 @@ from langflow.utils.logger import configure
def get_lifespan(fix_migration=False, socketio_server=None):
@asynccontextmanager
async def lifespan(app: FastAPI):
nest_asyncio.apply()
initialize_services(fix_migration=fix_migration, socketio_server=socketio_server)
setup_llm_caching()
LangfuseInstance.update()
@ -128,4 +130,5 @@ if __name__ == "__main__":
workers=get_number_of_workers(),
log_level="debug",
reload=True,
loop="asyncio",
)

View file

@ -12,6 +12,7 @@ def get_messages(
sender_name: Optional[str] = None,
session_id: Optional[str] = None,
order_by: Optional[str] = "timestamp",
order: Optional[str] = "DESC",
limit: Optional[int] = None,
):
"""
@ -34,10 +35,15 @@ def get_messages(
session_id=session_id,
order_by=order_by,
limit=limit,
order=order,
)
records: list[Record] = []
# messages_df has a timestamp
# it gets the last 5 messages, for example
# but now they are ordered from most recent to least recent
# so we need to reverse the order
messages_df = messages_df[::-1] if order == "DESC" else messages_df
for row in messages_df.itertuples():
record = Record(
data={

View file

@ -1,9 +1,10 @@
import json
from pathlib import Path
from typing import Optional, Union
from typing import List, Optional, Union
from langflow.graph import Graph
from langflow.processing.process import process_tweaks
from langflow.graph.schema import RunOutputs
from langflow.processing.process import process_tweaks, run_graph
def load_flow_from_json(flow: Union[Path, str, dict], tweaks: Optional[dict] = None) -> Graph:
@ -31,3 +32,36 @@ def load_flow_from_json(flow: Union[Path, str, dict], tweaks: Optional[dict] = N
graph = Graph.from_payload(graph_data)
return graph
def run_flow_from_json(
flow: Union[Path, str, dict],
input_value: str,
tweaks: Optional[dict] = None,
input_type: str = "chat",
output_type: str = "chat",
output_component: Optional[str] = None,
) -> List[RunOutputs]:
"""
Runs a JSON flow by loading it from a file or dictionary and executing it with the given input value.
Args:
flow (Union[Path, str, dict]): The path to the JSON file, or the JSON dictionary representing the flow.
input_value (str): The input value to be processed by the flow.
tweaks (Optional[dict], optional): Optional tweaks to be applied to the flow. Defaults to None.
input_type (str, optional): The type of the input value. Defaults to "chat".
output_type (str, optional): The type of the output value. Defaults to "chat".
output_component (Optional[str], optional): The specific output component to retrieve. Defaults to None.
Returns:
None: The result of running the flow.
"""
graph = load_flow_from_json(flow, tweaks)
result = run_graph(
graph=graph,
input_value=input_value,
input_type=input_type,
output_type=output_type,
output_component=output_component,
)
return result

View file

@ -124,10 +124,10 @@ class Result(BaseModel):
session_id: str
async def run_graph(
async def run_graph_internal(
graph: "Graph",
flow_id: str,
stream: bool,
stream: bool = False,
session_id: Optional[str] = None,
inputs: Optional[List["InputValueRequest"]] = None,
outputs: Optional[List[str]] = None,
@ -167,6 +167,58 @@ async def run_graph(
return run_outputs, session_id_str
def run_graph(
graph: "Graph",
input_value: str,
input_type: str,
output_type: str,
output_component: Optional[str] = None,
) -> List[RunOutputs]:
"""
Runs the given Langflow Graph with the specified input and returns the outputs.
Args:
graph (Graph): The graph to be executed.
input_value (str): The input value to be passed to the graph.
input_type (str): The type of the input value.
output_type (str): The type of the desired output.
output_component (Optional[str], optional): The specific output component to retrieve. Defaults to None.
Returns:
List[RunOutputs]: A list of RunOutputs objects representing the outputs of the graph.
"""
inputs = [InputValueRequest(components=[], input_value=input_value, type=input_type)]
if output_component:
outputs = [output_component]
else:
outputs = [
vertex.id
for vertex in graph.vertices
if output_type == "debug"
or (vertex.is_output and (output_type == "any" or output_type in vertex.id.lower()))
]
components = []
inputs_list = []
types = []
for input_value_request in inputs:
if input_value_request.input_value is None:
logger.warning("InputValueRequest input_value cannot be None, defaulting to an empty string.")
input_value_request.input_value = ""
components.append(input_value_request.components or [])
inputs_list.append({INPUT_FIELD_NAME: input_value_request.input_value})
types.append(input_value_request.type)
run_outputs = graph.run(
inputs_list,
components,
types,
outputs or [],
stream=False,
session_id="",
)
return run_outputs
def validate_input(
graph_data: Dict[str, Any], tweaks: Union["Tweaks", Dict[str, Dict[str, Any]]]
) -> List[Dict[str, Any]]:

View file

@ -1,5 +1,5 @@
import copy
from typing import Optional
from typing import Literal, Optional
from langchain_core.documents import Document
from pydantic import BaseModel, model_validator
@ -145,3 +145,6 @@ class Record(BaseModel):
INPUT_FIELD_NAME = "input_value"
InputType = Literal["chat", "text", "any"]
OutputType = Literal["chat", "text", "any", "debug"]

View file

@ -1,11 +1,16 @@
from gunicorn.app.base import BaseApplication # type: ignore
from uvicorn.workers import UvicornWorker
class LangflowUvicornWorker(UvicornWorker):
CONFIG_KWARGS = {"loop": "asyncio"}
class LangflowApplication(BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.options["worker_class"] = "uvicorn.workers.UvicornWorker"
self.options["worker_class"] = "langflow.server.LangflowUvicornWorker"
self.application = app
super().__init__()

View file

@ -113,6 +113,7 @@ class MonitorService(Service):
sender_name: Optional[str] = None,
session_id: Optional[str] = None,
order_by: Optional[str] = "timestamp",
order: Optional[str] = "DESC",
limit: Optional[int] = None,
):
query = "SELECT sender_name, sender, session_id, message, artifacts, timestamp FROM messages"
@ -128,7 +129,8 @@ class MonitorService(Service):
query += " WHERE " + " AND ".join(conditions)
if order_by:
query += f" ORDER BY {order_by}"
# Make sure the order is from newest to oldest
query += f" ORDER BY {order_by} {order.upper()}"
if limit is not None:
query += f" LIMIT {limit}"

View file

@ -86,7 +86,7 @@ def add_row_to_table(
validated_data = model(**monitor_data)
# Extract data for the insert statement
validated_dict = validated_data.model_dump(exclude_unset=True)
validated_dict = validated_data.model_dump()
keys = [key for key in validated_dict.keys() if key != INDEX_KEY]
columns = ", ".join(keys)

View file

@ -15,7 +15,10 @@ class SessionService(Service):
async def load_session(self, key, flow_id: str, data_graph: Optional[dict] = None):
# Check if the data is cached
if key in self.cache_service:
return self.cache_service.get(key)
result = self.cache_service.get(key)
if isinstance(result, Coroutine):
result = await result
return result
if key is None:
key = self.generate_key(session_id=None, data_graph=data_graph)

View file

@ -1,6 +1,6 @@
from typing import Any, Callable, Optional, Union
from pydantic import BaseModel, ConfigDict, Field, field_serializer, field_validator, model_serializer
from pydantic import BaseModel, ConfigDict, Field, field_serializer, field_validator, model_serializer, model_validator
from langflow.field_typing.range_spec import RangeSpec
@ -91,6 +91,13 @@ class TemplateField(BaseModel):
result["type"] = self.field_type
return result
@model_validator(mode="after")
def validate_model(self):
# if field_type is int, we need to set the range_spec
if self.field_type == "int" and self.range_spec is not None:
self.range_spec = RangeSpec.set_step_type("int", self.range_spec)
return self
@field_serializer("file_path")
def serialize_file_path(self, value):
return value if self.field_type == "file" else ""

View file

@ -0,0 +1,70 @@
from typing import List, Union
from langchain.agents import AgentExecutor, BaseMultiActionAgent, BaseSingleActionAgent
from langflow import CustomComponent
from langflow.field_typing import BaseMemory, Text, Tool
class LCAgentComponent(CustomComponent):
def build_config(self):
return {
"lc": {
"display_name": "LangChain",
"info": "The LangChain to interact with.",
},
"handle_parsing_errors": {
"display_name": "Handle Parsing Errors",
"info": "If True, the agent will handle parsing errors. If False, the agent will raise an error.",
"advanced": True,
},
"output_key": {
"display_name": "Output Key",
"info": "The key to use to get the output from the agent.",
"advanced": True,
},
"memory": {
"display_name": "Memory",
"info": "Memory to use for the agent.",
},
"tools": {
"display_name": "Tools",
"info": "Tools the agent can use.",
},
"input_value": {
"display_name": "Input",
"info": "Input text to pass to the agent.",
},
}
async def run_agent(
self,
agent: Union[BaseSingleActionAgent, BaseMultiActionAgent, AgentExecutor],
inputs: str,
input_variables: list[str],
tools: List[Tool],
memory: BaseMemory = None,
handle_parsing_errors: bool = True,
output_key: str = "output",
) -> Text:
if isinstance(agent, AgentExecutor):
runnable = agent
else:
runnable = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory, handle_parsing_errors=handle_parsing_errors
)
input_dict = {"input": inputs}
for var in input_variables:
if var not in ["agent_scratchpad", "input"]:
input_dict[var] = ""
result = await runnable.ainvoke(input_dict)
self.status = result
if output_key in result:
return result.get(output_key)
elif "output" not in result:
if output_key != "output":
raise ValueError(f"Output key not found in result. Tried '{output_key}' and 'output'.")
else:
raise ValueError("Output key not found in result. Tried 'output'.")
return result.get("output")

View file

@ -0,0 +1,3 @@
from .model import LCModelComponent
__all__ = ["LCModelComponent"]

View file

@ -0,0 +1,48 @@
from typing import Optional
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.llms import LLM
from langchain_core.messages import HumanMessage, SystemMessage
from langflow import CustomComponent
class LCModelComponent(CustomComponent):
display_name: str = "Model Name"
description: str = "Model Description"
def get_result(self, runnable: LLM, stream: bool, input_value: str):
"""
Retrieves the result from the output of a Runnable object.
Args:
output (Runnable): The output object to retrieve the result from.
stream (bool): Indicates whether to use streaming or invocation mode.
input_value (str): The input value to pass to the output object.
Returns:
The result obtained from the output object.
"""
if stream:
result = runnable.stream(input_value)
else:
message = runnable.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
def get_chat_result(
self, runnable: BaseChatModel, stream: bool, input_value: str, system_message: Optional[str] = None
):
messages = []
if input_value:
messages.append(HumanMessage(input_value))
if system_message:
messages.append(SystemMessage(system_message))
if stream:
result = runnable.stream(messages)
else:
message = runnable.invoke(messages)
result = message.content
self.status = result
return result

View file

@ -0,0 +1,37 @@
from langchain_community.tools.searchapi import SearchAPIRun
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
from langflow import CustomComponent
from langflow.field_typing import Tool
class SearchApiToolComponent(CustomComponent):
display_name: str = "SearchApi Tool"
description: str = "Real-time search engine results API."
documentation: str = "https://www.searchapi.io/docs/google"
field_config = {
"engine": {
"display_name": "Engine",
"field_type": "str",
"info": "The search engine to use.",
},
"api_key": {
"display_name": "API Key",
"field_type": "str",
"required": True,
"password": True,
"info": "The API key to use SearchApi.",
},
}
def build(
self,
engine: str,
api_key: str,
) -> Tool:
search_api_wrapper = SearchApiAPIWrapper(engine=engine, searchapi_api_key=api_key)
tool = SearchAPIRun(api_wrapper=search_api_wrapper)
self.status = tool
return tool

View file

@ -0,0 +1,103 @@
from contextlib import contextmanager
from typing import TYPE_CHECKING, Generator
from langflow.services import ServiceType, service_manager
if TYPE_CHECKING:
from sqlmodel import Session
from langflow.services.cache.service import CacheService
from langflow.services.chat.service import ChatService
from langflow.services.credentials.service import CredentialService
from langflow.services.database.service import DatabaseService
from langflow.services.monitor.service import MonitorService
from langflow.services.plugins.service import PluginService
from langflow.services.session.service import SessionService
from langflow.services.settings.service import SettingsService
from langflow.services.socket.service import SocketIOService
from langflow.services.storage.service import StorageService
from langflow.services.store.service import StoreService
from langflow.services.task.service import TaskService
def get_socket_service() -> "SocketIOService":
return service_manager.get(ServiceType.SOCKETIO_SERVICE) # type: ignore
def get_storage_service() -> "StorageService":
return service_manager.get(ServiceType.STORAGE_SERVICE) # type: ignore
def get_credential_service() -> "CredentialService":
return service_manager.get(ServiceType.CREDENTIAL_SERVICE) # type: ignore
def get_plugins_service() -> "PluginService":
return service_manager.get(ServiceType.PLUGIN_SERVICE) # type: ignore
def get_settings_service() -> "SettingsService":
try:
return service_manager.get(ServiceType.SETTINGS_SERVICE) # type: ignore
except ValueError:
# initialize settings service
from langflow.services.manager import initialize_settings_service
initialize_settings_service()
return service_manager.get(ServiceType.SETTINGS_SERVICE) # type: ignore
def get_db_service() -> "DatabaseService":
return service_manager.get(ServiceType.DATABASE_SERVICE) # type: ignore
def get_session() -> Generator["Session", None, None]:
db_service = get_db_service()
yield from db_service.get_session()
@contextmanager
def session_scope():
"""
Context manager for managing a session scope.
Yields:
session: The session object.
Raises:
Exception: If an error occurs during the session scope.
"""
session = next(get_session())
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def get_cache_service() -> "CacheService":
return service_manager.get(ServiceType.CACHE_SERVICE) # type: ignore
def get_session_service() -> "SessionService":
return service_manager.get(ServiceType.SESSION_SERVICE) # type: ignore
def get_monitor_service() -> "MonitorService":
return service_manager.get(ServiceType.MONITOR_SERVICE) # type: ignore
def get_task_service() -> "TaskService":
return service_manager.get(ServiceType.TASK_SERVICE) # type: ignore
def get_chat_service() -> "ChatService":
return service_manager.get(ServiceType.CHAT_SERVICE) # type: ignore
def get_store_service() -> "StoreService":
return service_manager.get(ServiceType.STORE_SERVICE) # type: ignore

View file

@ -0,0 +1,83 @@
import importlib
import inspect
from typing import TYPE_CHECKING, Type, get_type_hints
from cachetools import LRUCache, cached
from loguru import logger
from langflow.services.schema import ServiceType
if TYPE_CHECKING:
from langflow.services.base import Service
class ServiceFactory:
def __init__(
self,
service_class,
):
self.service_class = service_class
self.dependencies = infer_service_types(self, import_all_services_into_a_dict())
def create(self, *args, **kwargs) -> "Service":
raise self.service_class(*args, **kwargs)
def hash_factory(factory: ServiceFactory) -> str:
return factory.service_class.__name__
def hash_dict(d: dict) -> str:
return str(d)
def hash_infer_service_types_args(factory_class: Type[ServiceFactory], available_services=None) -> str:
factory_hash = hash_factory(factory_class)
services_hash = hash_dict(available_services)
return f"{factory_hash}_{services_hash}"
@cached(cache=LRUCache(maxsize=10), key=hash_infer_service_types_args)
def infer_service_types(factory_class: Type[ServiceFactory], available_services=None) -> "ServiceType":
create_method = factory_class.create
type_hints = get_type_hints(create_method, globalns=available_services)
service_types = []
for param_name, param_type in type_hints.items():
# Skip the return type if it's included in type hints
if param_name == "return":
continue
# Convert the type to the expected enum format directly without appending "_SERVICE"
type_name = param_type.__name__.upper().replace("SERVICE", "_SERVICE")
try:
# Attempt to find a matching enum value
service_type = ServiceType[type_name]
service_types.append(service_type)
except KeyError:
raise ValueError(f"No matching ServiceType for parameter type: {param_type.__name__}")
return service_types
@cached(cache=LRUCache(maxsize=1))
def import_all_services_into_a_dict():
# Services are all in langflow.services.{service_name}.service
# and are subclass of Service
# We want to import all of them and put them in a dict
# to use as globals
from langflow.services.base import Service
services = {}
for service_type in ServiceType:
try:
service_name = ServiceType(service_type).value.replace("_service", "")
module_name = f"langflow.services.{service_name}.service"
module = importlib.import_module(module_name)
for name, obj in inspect.getmembers(module, inspect.isclass):
if issubclass(obj, Service) and obj is not Service:
services[name] = obj
break
except Exception as exc:
logger.exception(exc)
raise RuntimeError("Could not initialize services. Please check your settings.") from exc
return services

View file

@ -0,0 +1,29 @@
{
"extends": ["eslint:recommended", "plugin:node/recommended"],
"parserOptions": {
"ecmaVersion": 2018
},
"rules": {
"no-console": "warn",
"no-self-assign": "warn",
"no-self-compare": "warn",
"complexity": ["error", { "max": 15 }],
"indent": ["error", 2, { "SwitchCase": 1 }],
"no-dupe-keys": "error",
"no-invalid-regexp": "error",
"no-undef": "error",
"no-return-assign": "error",
"no-redeclare": "error",
"no-empty": "error",
"no-await-in-loop": "error",
"node/exports-style": ["error", "module.exports"],
"node/file-extension-in-import": ["error", "always"],
"node/prefer-global/buffer": ["error", "always"],
"node/prefer-global/console": ["error", "always"],
"node/prefer-global/process": ["error", "always"],
"node/prefer-global/url-search-params": ["error", "always"],
"node/prefer-global/url": ["error", "always"],
"node/prefer-promises/dns": "error",
"node/prefer-promises/fs": "error"
}
}

File diff suppressed because it is too large Load diff

View file

@ -3,12 +3,8 @@
"version": "0.1.2",
"private": true,
"dependencies": {
"@emotion/react": "^11.11.1",
"@emotion/styled": "^11.11.0",
"@headlessui/react": "^1.7.17",
"@heroicons/react": "^2.0.18",
"@mui/material": "^5.14.7",
"@preact/signals-react": "^2.0.0",
"@million/lint": "^0.0.73",
"@radix-ui/react-accordion": "^1.1.2",
"@radix-ui/react-checkbox": "^1.0.4",
"@radix-ui/react-dialog": "^1.0.4",
@ -19,7 +15,7 @@
"@radix-ui/react-menubar": "^1.0.3",
"@radix-ui/react-popover": "^1.0.6",
"@radix-ui/react-progress": "^1.0.3",
"@radix-ui/react-select": "^1.2.2",
"@radix-ui/react-select": "^2.0.0",
"@radix-ui/react-separator": "^1.0.3",
"@radix-ui/react-slot": "^1.0.2",
"@radix-ui/react-switch": "^1.0.3",
@ -29,9 +25,7 @@
"@tailwindcss/forms": "^0.5.6",
"@tailwindcss/line-clamp": "^0.4.4",
"@types/axios": "^0.14.0",
"accordion": "^3.0.2",
"ace-builds": "^1.24.1",
"add": "^2.0.6",
"ansi-to-html": "^0.7.2",
"axios": "^1.5.0",
"base64-js": "^1.5.1",
@ -43,6 +37,7 @@
"framer-motion": "^11.0.6",
"lodash": "^4.17.21",
"lucide-react": "^0.331.0",
"million": "^3.0.6",
"moment": "^2.29.4",
"playwright": "^1.42.0",
"react": "^18.2.0",
@ -55,8 +50,6 @@
"react-markdown": "^8.0.7",
"react-router-dom": "^6.15.0",
"react-syntax-highlighter": "^15.5.0",
"react-tabs": "^6.0.2",
"react-tooltip": "^5.21.1",
"react18-json-view": "^0.2.3",
"reactflow": "^11.9.2",
"rehype-mathjax": "^4.0.3",
@ -64,8 +57,6 @@
"remark-math": "^5.1.1",
"shadcn-ui": "^0.2.3",
"short-unique-id": "^4.4.4",
"switch": "^0.0.0",
"table": "^6.8.1",
"tailwind-merge": "^1.14.0",
"tailwindcss-animate": "^1.0.7",
"uuid": "^9.0.0",
@ -117,6 +108,8 @@
"@vitejs/plugin-react-swc": "^3.3.2",
"autoprefixer": "^10.4.15",
"daisyui": "^4.0.4",
"eslint": "^8.57.0",
"eslint-plugin-node": "^11.1.0",
"postcss": "^8.4.29",
"prettier": "^2.8.8",
"prettier-plugin-organize-imports": "^3.2.3",

View file

@ -27,11 +27,15 @@ import useAlertStore from "../../../../stores/alertStore";
import useFlowStore from "../../../../stores/flowStore";
import useFlowsManagerStore from "../../../../stores/flowsManagerStore";
import { useTypesStore } from "../../../../stores/typesStore";
import { APIClassType, ResponseErrorTypeAPI } from "../../../../types/api";
import {
APIClassType,
ResponseErrorDetailAPI,
ResponseErrorTypeAPI,
} from "../../../../types/api";
import { ParameterComponentType } from "../../../../types/components";
import {
debouncedHandleUpdateValues,
handleUpdateValues,
throttledHandleUpdateValues,
} from "../../../../utils/parameterUtils";
import {
convertObjToArray,
@ -104,10 +108,11 @@ export default function ParameterComponent({
});
}
} catch (error) {
let responseError = error as ResponseErrorTypeAPI;
let responseError = error as ResponseErrorDetailAPI;
setErrorData({
title: "Error while updating the Component",
list: [responseError.response.data.detail.error ?? "Unknown error"],
list: [responseError.response.data.detail ?? "Unknown error"],
});
}
setIsLoading(false);
@ -136,10 +141,11 @@ export default function ParameterComponent({
});
}
} catch (error) {
let responseError = error as ResponseErrorTypeAPI;
let responseError = error as ResponseErrorDetailAPI;
setErrorData({
title: "Error while updating the Component",
list: [responseError.response.data.detail.error ?? "Unknown error"],
list: [responseError.response.data.detail ?? "Unknown error"],
});
}
setIsLoading(false);
@ -164,7 +170,7 @@ export default function ParameterComponent({
if (shouldUpdate) {
setIsLoading(true);
try {
newTemplate = await throttledHandleUpdateValues(name, data);
newTemplate = await debouncedHandleUpdateValues(name, data);
} catch (error) {
let responseError = error as ResponseErrorTypeAPI;
setErrorData({
@ -377,7 +383,7 @@ export default function ParameterComponent({
!showNode ? "mt-0" : ""
)}
style={{
borderColor: color,
borderColor: color ?? nodeColors.unknown,
}}
onClick={() => {
setFilterEdge(groupedEdge.current);
@ -406,7 +412,7 @@ export default function ParameterComponent({
}
>
{!left && data.node?.frozen && (
<div>
<div className="pr-1">
<IconComponent className="h-5 w-5 text-ice" name={"Snowflake"} />
</div>
)}
@ -421,7 +427,7 @@ export default function ParameterComponent({
{title}
</span>
)}
<span className={(info === "" ? "" : "ml-1 ") + " text-status-red pl-1"}>
<span className={(required ? "ml-2 " : "") + "text-status-red"}>
{required ? "*" : ""}
</span>
<div className="">
@ -445,7 +451,7 @@ export default function ParameterComponent({
<div className="flex">
<ShadTooltip
styleClasses={"tooltip-fixed-width custom-scroll nowheel"}
delayDuration={0}
delayDuration={1000}
content={refHtml.current}
side={left ? "left" : "right"}
>
@ -473,7 +479,7 @@ export default function ParameterComponent({
"h-3 w-3 rounded-full border-2 bg-background"
)}
style={{
borderColor: color,
borderColor: color ?? nodeColors.unknown,
}}
onClick={() => {
setFilterEdge(groupedEdge.current);
@ -679,6 +685,7 @@ export default function ParameterComponent({
) : left === true && type === "int" ? (
<div className="mt-2 w-full">
<IntComponent
rangeSpec={data.node?.template[name].rangeSpec}
disabled={disabled}
value={data.node?.template[name].value ?? ""}
onChange={handleOnNewValue}

View file

@ -1,5 +1,5 @@
import { cloneDeep } from "lodash";
import { useCallback, useEffect, useState } from "react";
import { useCallback, useEffect, useMemo, useState } from "react";
import { NodeToolbar, useUpdateNodeInternals } from "reactflow";
import ShadTooltip from "../../components/ShadTooltipComponent";
import IconComponent from "../../components/genericIconComponent";
@ -86,7 +86,11 @@ export default function GenericNode({
if (!thisNodeTemplate.code) return;
const currentCode = thisNodeTemplate.code?.value;
const thisNodesCode = data.node!.template?.code?.value;
if (currentCode !== thisNodesCode) {
const componentsToIgnore = ["Custom Component", "Prompt"];
if (
currentCode !== thisNodesCode &&
!componentsToIgnore.includes(data.node!.display_name)
) {
setIsOutdated(true);
} else {
setIsOutdated(false);
@ -293,7 +297,6 @@ export default function GenericNode({
);
}
};
const getSpecificClassFromBuildStatus = (
buildStatus: BuildStatus | undefined,
validationStatus: validationStatusType | null
@ -342,17 +345,16 @@ export default function GenericNode({
const getNodeSizeClass = (showNode) =>
showNode ? "w-96 rounded-lg" : "w-26 h-26 rounded-full";
return (
<>
const memoizedNodeToolbarComponent = useMemo(() => {
return (
<NodeToolbar>
<NodeToolbarComponent
position={{ x: xPos, y: yPos }}
data={data}
deleteNode={(id) => {
takeSnapshot();
deleteNode(id);
}}
setShowNode={(show: boolean) => {
setShowNode={(show) => {
setNode(data.id, (old) => ({
...old,
data: { ...old.data, showNode: show },
@ -366,8 +368,25 @@ export default function GenericNode({
updateNodeCode={updateNodeCode}
isOutdated={isOutdated}
selected={selected}
></NodeToolbarComponent>
/>
</NodeToolbar>
);
}, [
data,
deleteNode,
takeSnapshot,
setNode,
setShowNode,
handles,
showNode,
updateNodeCode,
isOutdated,
selected,
]);
return (
<>
{memoizedNodeToolbarComponent}
<div
className={getNodeBorderClassName(
selected,

View file

@ -1,40 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" style="margin: auto; background: none; display: block; shape-rendering: auto;" width="271px" height="271px" viewBox="0 0 100 100" preserveAspectRatio="xMidYMid">
<defs>
<filter id="ldio-978hsxudfzl-filter" x="-100%" y="-100%" width="300%" height="300%" color-interpolation-filters="sRGB">
<feGaussianBlur in="SourceGraphic" stdDeviation="3.6"></feGaussianBlur>
<feComponentTransfer result="cutoff">
<feFuncA type="table" tableValues="0 0 0 0 0 0 1 1 1 1 1"></feFuncA>
</feComponentTransfer>
</filter>
</defs>
<g filter="url(#ldio-978hsxudfzl-filter)"><g transform="translate(50 50)">
<g>
<circle cx="8" cy="0" r="5" fill="#2edbb5">
<animate attributeName="r" keyTimes="0;0.5;1" values="5.3999999999999995;12.6;5.3999999999999995" dur="5s" repeatCount="indefinite" begin="-0.2s"></animate>
</circle>
<animateTransform attributeName="transform" type="rotate" keyTimes="0;1" values="0;360" dur="5s" repeatCount="indefinite" begin="0s"></animateTransform>
</g>
</g><g transform="translate(50 50)">
<g>
<circle cx="8" cy="0" r="5" fill="#1d99ff">
<animate attributeName="r" keyTimes="0;0.5;1" values="5.3999999999999995;12.6;5.3999999999999995" dur="2.5s" repeatCount="indefinite" begin="-0.15000000000000002s"></animate>
</circle>
<animateTransform attributeName="transform" type="rotate" keyTimes="0;1" values="0;360" dur="2.5s" repeatCount="indefinite" begin="-0.05s"></animateTransform>
</g>
</g><g transform="translate(50 50)">
<g>
<circle cx="8" cy="0" r="5" fill="#4f41ff">
<animate attributeName="r" keyTimes="0;0.5;1" values="5.3999999999999995;12.6;5.3999999999999995" dur="1.6666666666666665s" repeatCount="indefinite" begin="-0.1s"></animate>
</circle>
<animateTransform attributeName="transform" type="rotate" keyTimes="0;1" values="0;360" dur="1.6666666666666665s" repeatCount="indefinite" begin="-0.1s"></animateTransform>
</g>
</g><g transform="translate(50 50)">
<g>
<circle cx="8" cy="0" r="5" fill="#8400ff">
<animate attributeName="r" keyTimes="0;0.5;1" values="5.3999999999999995;12.6;5.3999999999999995" dur="1.25s" repeatCount="indefinite" begin="-0.05s"></animate>
</circle>
<animateTransform attributeName="transform" type="rotate" keyTimes="0;1" values="0;360" dur="1.25s" repeatCount="indefinite" begin="-0.15000000000000002s"></animateTransform>
</g>
</g></g>
<!-- [ldio] generated by https://loading.io/ --></svg>

Before

Width:  |  Height:  |  Size: 2.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

View file

@ -12,6 +12,7 @@ export default function AccordionComponent({
children,
open = [],
keyValue,
sideBar,
}: AccordionComponentType): JSX.Element {
const [value, setValue] = useState(
open.length === 0 ? "" : getOpenAccordion()
@ -45,7 +46,9 @@ export default function AccordionComponent({
onClick={() => {
handleClick();
}}
className="ml-3"
className={
sideBar ? "w-full bg-muted px-[0.75rem] py-[0.5rem]" : "ml-3"
}
>
{trigger}
</AccordionTrigger>

View file

@ -74,7 +74,7 @@ export const EditFlowSettings: React.FC<InputProps> = ({
onChange={handleDescriptionChange}
value={description!}
placeholder="Flow description"
className="mt-2 max-h-[100px] font-normal"
className="mt-2 max-h-[100px] resize-none font-normal"
rows={3}
onDoubleClickCapture={(event) => {
handleFocus(event);

View file

@ -1,69 +0,0 @@
import { cloneDeep } from "lodash";
import useFlowStore from "../../stores/flowStore";
import { IOInputProps } from "../../types/components";
import IOFileInput from "../IOInputs/FileInput";
import { Textarea } from "../ui/textarea";
export default function IOInputField({
inputType,
inputId,
left,
}: IOInputProps): JSX.Element | undefined {
const nodes = useFlowStore((state) => state.nodes);
const setNode = useFlowStore((state) => state.setNode);
const node = nodes.find((node) => node.id === inputId);
function handleInputType() {
if (!node) return <>"No node found!"</>;
switch (inputType) {
case "TextInput":
return (
<Textarea
className={`w-full ${left ? "" : " h-full"}`}
placeholder={"Enter text..."}
value={node.data.node!.template["input_value"].value}
onChange={(e) => {
e.target.value;
if (node) {
let newNode = cloneDeep(node);
newNode.data.node!.template["input_value"].value =
e.target.value;
setNode(node.id, newNode);
}
}}
/>
);
case "FileLoader":
return (
<IOFileInput
field={node.data.node!.template["file_path"]["value"]}
updateValue={(e) => {
if (node) {
let newNode = cloneDeep(node);
newNode.data.node!.template["file_path"].value = e;
setNode(node.id, newNode);
}
}}
/>
);
default:
return (
<Textarea
className="w-full custom-scroll"
placeholder={"Enter text..."}
value={node.data.node!.template["input_value"]}
onChange={(e) => {
e.target.value;
if (node) {
let newNode = cloneDeep(node);
newNode.data.node!.template["input_value"].value =
e.target.value;
setNode(node.id, newNode);
}
}}
/>
);
}
}
return handleInputType();
}

View file

@ -1,47 +0,0 @@
import useFlowStore from "../../stores/flowStore";
import { IOOutputProps } from "../../types/components";
import { Textarea } from "../ui/textarea";
export default function IOOutputView({
outputType,
outputId,
left,
}: IOOutputProps): JSX.Element | undefined {
const nodes = useFlowStore((state) => state.nodes);
const setNode = useFlowStore((state) => state.setNode);
const flowPool = useFlowStore((state) => state.flowPool);
const node = nodes.find((node) => node.id === outputId);
function handleOutputType() {
if (!node) return <>"No node found!"</>;
switch (outputType) {
case "TextOutput":
return (
<Textarea
className={`w-full custom-scroll ${left ? "" : " h-full"}`}
placeholder={"Empty"}
// update to real value on flowPool
value={
(flowPool[node.id] ?? [])[(flowPool[node.id]?.length ?? 1) - 1]
?.params ?? ""
}
readOnly
/>
);
default:
return (
<Textarea
className={`w-full custom-scroll ${left ? "" : " h-full"}`}
placeholder={"Empty"}
// update to real value on flowPool
value={
(flowPool[node.id] ?? [])[(flowPool[node.id]?.length ?? 1) - 1]
?.params ?? ""
}
readOnly
/>
);
}
}
return handleOutputType();
}

View file

@ -1,17 +0,0 @@
import Tooltip, { TooltipProps, tooltipClasses } from "@mui/material/Tooltip";
import { styled } from "@mui/material/styles";
export const LightTooltip = styled(({ className, ...props }: TooltipProps) => (
<Tooltip {...props} classes={{ popper: className }} />
))(({ theme }) => ({
[`& .${tooltipClasses.tooltip}`]: {
backgroundColor: theme.palette.common.white,
color: "rgba(0, 0, 0, 0.87)",
boxShadow: theme.shadows[2],
fontSize: 14,
},
[`& .${tooltipClasses.arrow}:before`]: {
color: theme.palette.common.white,
boxShadow: theme.shadows[1],
},
}));

View file

@ -1,3 +0,0 @@
export default function LoadingSpinner({}) {
return <></>;
}

View file

@ -1,33 +0,0 @@
import { useNavigate } from "react-router-dom";
import useFlowsManagerStore from "../../stores/flowsManagerStore";
import { cn } from "../../utils/utils";
import IconComponent from "../genericIconComponent";
import { Card, CardContent } from "../ui/card";
export default function NewFlowCardComponent({}: {}) {
const addFlow = useFlowsManagerStore((state) => state.addFlow);
const navigate = useNavigate();
return (
<Card
className={cn(
"group relative flex h-48 w-2/6 flex-col justify-between overflow-hidden transition-all hover:shadow-md"
)}
>
<CardContent className="flex h-full w-full items-center justify-center align-middle">
<button
onClick={() => {
addFlow(true).then((id) => {
navigate("/flow/" + id);
});
}}
>
<IconComponent
className={cn("h-12 w-12 text-muted-foreground")}
name="PlusCircle"
/>
</button>
</CardContent>
</Card>
);
}

View file

@ -1,18 +0,0 @@
import { RadialProgressType } from "../../types/components";
export default function RadialProgressComponent({
value,
color,
}: RadialProgressType): JSX.Element {
const style = {
"--value": value! * 100,
"--size": "1.5rem",
"--thickness": "2px",
} as React.CSSProperties;
return (
<div className={"radial-progress " + color} style={style}>
<strong className="text-[8px]">{Math.trunc(value! * 100)}%</strong>
</div>
);
}

View file

@ -1,45 +0,0 @@
"use client";
import type { FC } from "react";
import React from "react";
import { Tooltip as ReactTooltip } from "react-tooltip";
import "react-tooltip/dist/react-tooltip.css";
import { TooltipProps } from "../../types/components";
import { classNames } from "../../utils/utils";
const TooltipReact: FC<TooltipProps> = ({
selector,
content,
disabled,
position = "top",
children,
htmlContent,
className,
clickable,
delayShow,
}: TooltipProps): JSX.Element => {
return (
<div className="tooltip-container">
{React.cloneElement(children as React.ReactElement, {
"data-tooltip-id": selector,
})}
<ReactTooltip
id={selector}
content={content}
className={classNames(
"z-[9999] !bg-white !text-xs !font-normal !text-foreground !opacity-100 !shadow-md",
className!
)}
place={position}
clickable={clickable}
isOpen={disabled ? false : undefined}
delayShow={delayShow}
positionStrategy="absolute"
float={true}
>
{htmlContent && htmlContent}
</ReactTooltip>
</div>
);
};
export default TooltipReact;

View file

@ -1,14 +0,0 @@
import { TooltipComponentType } from "../../types/components";
import { LightTooltip } from "../LightTooltipComponent";
export default function Tooltip({
children,
title,
placement,
}: TooltipComponentType): JSX.Element {
return (
<LightTooltip placement={placement} title={title} arrow>
{children}
</LightTooltip>
);
}

Some files were not shown because too many files have changed in this diff Show more