diff --git a/src/backend/base/langflow/components/models/AnthropicModel.py b/src/backend/base/langflow/components/models/AnthropicModel.py index 1c0fb6338..154bf970a 100644 --- a/src/backend/base/langflow/components/models/AnthropicModel.py +++ b/src/backend/base/langflow/components/models/AnthropicModel.py @@ -1,4 +1,3 @@ -from typing import Optional from langchain_anthropic.chat_models import ChatAnthropic from pydantic.v1 import SecretStr diff --git a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py index 2d0d1c7a0..99eda2afd 100644 --- a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py +++ b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, Optional +from typing import Optional from langchain_community.chat_models.litellm import ChatLiteLLM, ChatLiteLLMException from langflow.base.constants import STREAM_INFO_TEXT @@ -177,4 +177,4 @@ class ChatLiteLLMModelComponent(LCModelComponent): ) return output - \ No newline at end of file + diff --git a/src/backend/base/langflow/components/models/GroqModel.py b/src/backend/base/langflow/components/models/GroqModel.py index edff619b7..b114abe2a 100644 --- a/src/backend/base/langflow/components/models/GroqModel.py +++ b/src/backend/base/langflow/components/models/GroqModel.py @@ -1,4 +1,3 @@ -from typing import Optional from langchain_groq import ChatGroq from langflow.base.models.groq_constants import MODEL_NAMES @@ -103,4 +102,4 @@ class GroqModel(LCModelComponent): streaming=stream, ) - return output \ No newline at end of file + return output diff --git a/src/backend/base/langflow/components/models/HuggingFaceModel.py b/src/backend/base/langflow/components/models/HuggingFaceModel.py index 98cd8950e..f15caa786 100644 --- a/src/backend/base/langflow/components/models/HuggingFaceModel.py +++ b/src/backend/base/langflow/components/models/HuggingFaceModel.py @@ -1,4 +1,3 @@ -from typing import Optional from langchain_community.chat_models.huggingface import ChatHuggingFace from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint @@ -65,4 +64,4 @@ class HuggingFaceEndpointsComponent(LCModelComponent): raise ValueError("Could not connect to HuggingFace Endpoints API.") from e output = ChatHuggingFace(llm=llm) - return output \ No newline at end of file + return output diff --git a/src/backend/base/langflow/components/models/OllamaModel.py b/src/backend/base/langflow/components/models/OllamaModel.py index bcca0b107..095554dcb 100644 --- a/src/backend/base/langflow/components/models/OllamaModel.py +++ b/src/backend/base/langflow/components/models/OllamaModel.py @@ -1,4 +1,3 @@ -from typing import Any, Dict, List, Optional from langchain_community.chat_models import ChatOllama from langflow.base.constants import STREAM_INFO_TEXT @@ -226,4 +225,4 @@ class ChatOllamaComponent(LCModelComponent): except Exception as e: raise ValueError("Could not initialize Ollama LLM.") from e - return output \ No newline at end of file + return output diff --git a/src/backend/base/langflow/custom/custom_component/custom_component.py b/src/backend/base/langflow/custom/custom_component/custom_component.py index e324a442a..9eec57693 100644 --- a/src/backend/base/langflow/custom/custom_component/custom_component.py +++ b/src/backend/base/langflow/custom/custom_component/custom_component.py @@ -21,7 +21,6 @@ from langflow.type_extraction.type_extraction import ( extract_union_types_from_generic_alias, ) from langflow.utils import validate -from pydantic import BaseModel if TYPE_CHECKING: from langflow.graph.graph.base import Graph diff --git a/src/backend/base/langflow/graph/vertex/base.py b/src/backend/base/langflow/graph/vertex/base.py index 3e1db9063..4c1595c20 100644 --- a/src/backend/base/langflow/graph/vertex/base.py +++ b/src/backend/base/langflow/graph/vertex/base.py @@ -13,7 +13,7 @@ from langflow.graph.utils import UnbuiltObject, UnbuiltResult from langflow.interface.initialize import loading from langflow.interface.listing import lazy_load_dict from langflow.schema.artifact import ArtifactType -from langflow.schema.schema import INPUT_FIELD_NAME, Log, build_logs +from langflow.schema.schema import INPUT_FIELD_NAME, Log from langflow.services.deps import get_storage_service from langflow.services.monitor.utils import log_transaction from langflow.utils.constants import DIRECT_TYPES diff --git a/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json b/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json index 0dae27634..8b8566b12 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json @@ -8,19 +8,12 @@ "dataType": "TextOutput", "id": "TextOutput-BDknO", "name": "Text", - "output_types": [ - "Text" - ] + "output_types": ["Text"] }, "targetHandle": { "fieldName": "context", "id": "Prompt-xeI6K", - "inputTypes": [ - "Document", - "Message", - "Record", - "Text" - ], + "inputTypes": ["Document", "Message", "Record", "Text"], "type": "str" } }, @@ -41,19 +34,12 @@ "dataType": "ChatInput", "id": "ChatInput-yxMKE", "name": "message", - "output_types": [ - "Message" - ] + "output_types": ["Message"] }, "targetHandle": { "fieldName": "question", "id": "Prompt-xeI6K", - "inputTypes": [ - "Document", - "Message", - "Record", - "Text" - ], + "inputTypes": ["Document", "Message", "Record", "Text"], "type": "str" } }, @@ -74,18 +60,12 @@ "dataType": "Prompt", "id": "Prompt-xeI6K", "name": "prompt", - "output_types": [ - "Prompt" - ] + "output_types": ["Prompt"] }, "targetHandle": { "fieldName": "input_value", "id": "OpenAIModel-EjXlN", - "inputTypes": [ - "Text", - "Data", - "Prompt" - ], + "inputTypes": ["Text", "Data", "Prompt"], "type": "str" } }, @@ -106,16 +86,12 @@ "dataType": "OpenAIModel", "id": "OpenAIModel-EjXlN", "name": "text_output", - "output_types": [ - "Text" - ] + "output_types": ["Text"] }, "targetHandle": { "fieldName": "input_value", "id": "ChatOutput-Q39I8", - "inputTypes": [ - "Text" - ], + "inputTypes": ["Text"], "type": "str" } }, @@ -141,10 +117,7 @@ "targetHandle": { "fieldName": "inputs", "id": "RecursiveCharacterTextSplitter-tR9QM", - "inputTypes": [ - "Document", - "Data" - ], + "inputTypes": ["Document", "Data"], "type": "Document" } }, @@ -165,9 +138,7 @@ "dataType": "OpenAIEmbeddings", "id": "OpenAIEmbeddings-ZlOk1", "name": "embeddings", - "output_types": [ - "Embeddings" - ] + "output_types": ["Embeddings"] }, "targetHandle": { "fieldName": "embedding", @@ -192,16 +163,12 @@ "dataType": "ChatInput", "id": "ChatInput-yxMKE", "name": "message", - "output_types": [ - "Message" - ] + "output_types": ["Message"] }, "targetHandle": { "fieldName": "input_value", "id": "AstraDBSearch-41nRz", - "inputTypes": [ - "Text" - ], + "inputTypes": ["Text"], "type": "str" } }, @@ -247,9 +214,7 @@ "dataType": "OpenAIEmbeddings", "id": "OpenAIEmbeddings-9TPjc", "name": "embeddings", - "output_types": [ - "Embeddings" - ] + "output_types": ["Embeddings"] }, "targetHandle": { "fieldName": "embedding", @@ -280,10 +245,7 @@ "targetHandle": { "fieldName": "input_value", "id": "TextOutput-BDknO", - "inputTypes": [ - "Record", - "Text" - ], + "inputTypes": ["Record", "Text"], "type": "str" } }, @@ -302,12 +264,7 @@ "data": { "id": "ChatInput-yxMKE", "node": { - "base_classes": [ - "Text", - "str", - "object", - "Record" - ], + "base_classes": ["Text", "str", "object", "Record"], "beta": false, "custom_fields": { "input_value": null, @@ -331,9 +288,7 @@ "method": "message_response", "name": "message", "selected": "Message", - "types": [ - "Message" - ], + "types": ["Message"], "value": "__UNDEFINED__" }, { @@ -342,9 +297,7 @@ "method": "text_response", "name": "text", "selected": "Text", - "types": [ - "Text" - ], + "types": ["Text"], "value": "__UNDEFINED__" } ], @@ -395,17 +348,12 @@ "fileTypes": [], "file_path": "", "info": "Type of sender.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, "name": "sender", - "options": [ - "Machine", - "User" - ], + "options": ["Machine", "User"], "password": false, "placeholder": "", "required": false, @@ -421,9 +369,7 @@ "fileTypes": [], "file_path": "", "info": "Name of the sender.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -443,9 +389,7 @@ "fileTypes": [], "file_path": "", "info": "Session ID for the message.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -476,11 +420,7 @@ "data": { "id": "TextOutput-BDknO", "node": { - "base_classes": [ - "object", - "Text", - "str" - ], + "base_classes": ["object", "Text", "str"], "beta": false, "custom_fields": { "input_value": null, @@ -493,16 +433,12 @@ "field_order": [], "frozen": false, "icon": "type", - "output_types": [ - "Text" - ], + "output_types": ["Text"], "outputs": [ { "name": "Text", "selected": "Text", - "types": [ - "Text" - ] + "types": ["Text"] } ], "template": { @@ -532,10 +468,7 @@ "fileTypes": [], "file_path": "", "info": "Text or Record to be passed as output.", - "input_types": [ - "Record", - "Text" - ], + "input_types": ["Record", "Text"], "list": false, "load_from_db": false, "multiline": false, @@ -555,9 +488,7 @@ "fileTypes": [], "file_path": "", "info": "Template to convert Record to Text. If left empty, it will be dynamically set to the Record's text key.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": true, @@ -593,9 +524,7 @@ "data": { "id": "OpenAIEmbeddings-ZlOk1", "node": { - "base_classes": [ - "Embeddings" - ], + "base_classes": ["Embeddings"], "beta": false, "custom_fields": { "allowed_special": null, @@ -636,9 +565,7 @@ "method": "build_embeddings", "name": "embeddings", "selected": "Embeddings", - "types": [ - "Embeddings" - ], + "types": ["Embeddings"], "value": "__UNDEFINED__" } ], @@ -663,9 +590,7 @@ "display_name": "Client", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "client", @@ -692,7 +617,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_openai.embeddings.base import OpenAIEmbeddings\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import Embeddings\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n DropdownInput\n)\nfrom langflow.template import Output\n\n\nclass OpenAIEmbeddingsComponent(LCModelComponent):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n StrInput(name=\"client\", display_name=\"Client\", advanced=True),\n StrInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(\n name=\"embedding_ctx_length\",\n display_name=\"Embedding Context Length\",\n advanced=True,\n value=1536\n ),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=[\n \"text-embedding-3-small\",\n \"text-embedding-3-large\",\n \"text-embedding-ada-002\",\n ],\n value=\"text-embedding-3-small\"\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(\n name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True\n ),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\"),\n SecretStrInput(\n name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True\n ),\n StrInput(\n name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True\n ),\n StrInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n StrInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(\n name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True\n ),\n BoolInput(\n name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True\n ),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n StrInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\", display_name=\"TikToken Enable\", advanced=True\n ),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n tiktoken_enabled=self.tiktoken_enable,\n default_headers=self.default_headers,\n default_query=self.default_query,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n deployment=self.deployment,\n embedding_ctx_length=self.embedding_ctx_length,\n max_retries=self.max_retries,\n model=self.model,\n model_kwargs=self.model_kwargs,\n base_url=self.openai_api_base,\n api_key=self.openai_api_key,\n openai_api_type=self.openai_api_type,\n api_version=self.openai_api_version,\n organization=self.openai_organization,\n openai_proxy=self.openai_proxy,\n timeout=self.request_timeout,\n show_progress_bar=self.show_progress_bar,\n skip_empty=self.skip_empty,\n tiktoken_model_name=self.tiktoken_model_name,\n )\n" + "value": "from langchain_openai.embeddings.base import OpenAIEmbeddings\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import Embeddings\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput\nfrom langflow.template import Output\n\n\nclass OpenAIEmbeddingsComponent(LCModelComponent):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n StrInput(name=\"client\", display_name=\"Client\", advanced=True),\n StrInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=[\n \"text-embedding-3-small\",\n \"text-embedding-3-large\",\n \"text-embedding-ada-002\",\n ],\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\"),\n SecretStrInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n StrInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n StrInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n StrInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n StrInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(name=\"tiktoken_enable\", display_name=\"TikToken Enable\", advanced=True, value=True),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n tiktoken_enabled=self.tiktoken_enable,\n default_headers=self.default_headers,\n default_query=self.default_query,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n deployment=self.deployment,\n embedding_ctx_length=self.embedding_ctx_length,\n max_retries=self.max_retries,\n model=self.model,\n model_kwargs=self.model_kwargs,\n base_url=self.openai_api_base,\n api_key=self.openai_api_key,\n openai_api_type=self.openai_api_type,\n api_version=self.openai_api_version,\n organization=self.openai_organization,\n openai_proxy=self.openai_proxy,\n timeout=self.request_timeout,\n show_progress_bar=self.show_progress_bar,\n skip_empty=self.skip_empty,\n tiktoken_model_name=self.tiktoken_model_name,\n )\n" }, "default_headers": { "advanced": true, @@ -727,9 +652,7 @@ "display_name": "Deployment", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "deployment", @@ -805,9 +728,7 @@ "display_name": "OpenAI API Base", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "load_from_db": true, "name": "openai_api_base", "password": true, @@ -823,9 +744,7 @@ "display_name": "OpenAI API Key", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "load_from_db": true, "name": "openai_api_key", "password": true, @@ -841,9 +760,7 @@ "display_name": "OpenAI API Type", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "load_from_db": true, "name": "openai_api_type", "password": true, @@ -859,9 +776,7 @@ "display_name": "OpenAI API Version", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "openai_api_version", @@ -877,9 +792,7 @@ "display_name": "OpenAI Organization", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "openai_organization", @@ -895,9 +808,7 @@ "display_name": "OpenAI Proxy", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "openai_proxy", @@ -969,9 +880,7 @@ "display_name": "TikToken Model Name", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "tiktoken_model_name", @@ -1001,11 +910,7 @@ "data": { "id": "OpenAIModel-EjXlN", "node": { - "base_classes": [ - "object", - "Text", - "str" - ], + "base_classes": ["object", "Text", "str"], "beta": false, "custom_fields": { "input_value": null, @@ -1043,9 +948,7 @@ "method": "text_response", "name": "text_output", "selected": "Text", - "types": [ - "Text" - ], + "types": ["Text"], "value": "__UNDEFINED__" }, { @@ -1054,9 +957,7 @@ "method": "build_model", "name": "model_output", "selected": "BaseLanguageModel", - "types": [ - "BaseLanguageModel" - ], + "types": ["BaseLanguageModel"], "value": "__UNDEFINED__" } ], @@ -1087,11 +988,7 @@ "fileTypes": [], "file_path": "", "info": "", - "input_types": [ - "Text", - "Data", - "Prompt" - ], + "input_types": ["Text", "Data", "Prompt"], "list": false, "load_from_db": false, "multiline": false, @@ -1111,9 +1008,7 @@ "fileTypes": [], "file_path": "", "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -1133,9 +1028,7 @@ "fileTypes": [], "file_path": "", "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -1155,9 +1048,7 @@ "fileTypes": [], "file_path": "", "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, @@ -1184,9 +1075,7 @@ "fileTypes": [], "file_path": "", "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -1206,9 +1095,7 @@ "fileTypes": [], "file_path": "", "info": "The OpenAI API Key to use for the OpenAI model.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": true, "multiline": false, @@ -1228,9 +1115,7 @@ "fileTypes": [], "file_path": "", "info": "Stream the response from the model. Streaming works only in Chat.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -1250,9 +1135,7 @@ "fileTypes": [], "file_path": "", "info": "System message to pass to the model.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -1272,9 +1155,7 @@ "fileTypes": [], "file_path": "", "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -1312,17 +1193,10 @@ "display_name": "Prompt", "id": "Prompt-xeI6K", "node": { - "base_classes": [ - "object", - "Text", - "str" - ], + "base_classes": ["object", "Text", "str"], "beta": false, "custom_fields": { - "template": [ - "context", - "question" - ] + "template": ["context", "question"] }, "description": "Create a prompt template with dynamic variables.", "display_name": "Prompt", @@ -1345,9 +1219,7 @@ "method": "build_prompt", "name": "prompt", "selected": "Prompt", - "types": [ - "Prompt" - ], + "types": ["Prompt"], "value": "__UNDEFINED__" }, { @@ -1356,9 +1228,7 @@ "method": "format_prompt", "name": "text", "selected": "Text", - "types": [ - "Text" - ], + "types": ["Text"], "value": "__UNDEFINED__" } ], @@ -1390,12 +1260,7 @@ "fileTypes": [], "file_path": "", "info": "", - "input_types": [ - "Document", - "Message", - "Record", - "Text" - ], + "input_types": ["Document", "Message", "Record", "Text"], "list": false, "load_from_db": false, "multiline": true, @@ -1416,12 +1281,7 @@ "fileTypes": [], "file_path": "", "info": "", - "input_types": [ - "Document", - "Message", - "Record", - "Text" - ], + "input_types": ["Document", "Message", "Record", "Text"], "list": false, "load_from_db": false, "multiline": true, @@ -1441,9 +1301,7 @@ "fileTypes": [], "file_path": "", "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -1479,12 +1337,7 @@ "data": { "id": "ChatOutput-Q39I8", "node": { - "base_classes": [ - "object", - "Text", - "Record", - "str" - ], + "base_classes": ["object", "Text", "Record", "str"], "beta": false, "custom_fields": { "input_value": null, @@ -1509,9 +1362,7 @@ "method": "message_response", "name": "message", "selected": "Message", - "types": [ - "Message" - ], + "types": ["Message"], "value": "__UNDEFINED__" }, { @@ -1520,9 +1371,7 @@ "method": "text_response", "name": "text", "selected": "Text", - "types": [ - "Text" - ], + "types": ["Text"], "value": "__UNDEFINED__" } ], @@ -1553,9 +1402,7 @@ "fileTypes": [], "file_path": "", "info": "Message to be passed as output.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": true, @@ -1575,17 +1422,12 @@ "fileTypes": [], "file_path": "", "info": "Type of sender.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, "name": "sender", - "options": [ - "Machine", - "User" - ], + "options": ["Machine", "User"], "password": false, "placeholder": "", "required": false, @@ -1601,9 +1443,7 @@ "fileTypes": [], "file_path": "", "info": "Name of the sender.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -1623,9 +1463,7 @@ "fileTypes": [], "file_path": "", "info": "Session ID for the message.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -1661,9 +1499,7 @@ "data": { "id": "File-t0a6a", "node": { - "base_classes": [ - "Record" - ], + "base_classes": ["Record"], "beta": false, "custom_fields": { "path": null, @@ -1684,9 +1520,7 @@ "method": "load_file", "name": "data", "selected": "Data", - "types": [ - "Data" - ], + "types": ["Data"], "value": "__UNDEFINED__" } ], @@ -1781,9 +1615,7 @@ "data": { "id": "RecursiveCharacterTextSplitter-tR9QM", "node": { - "base_classes": [ - "Record" - ], + "base_classes": ["Record"], "beta": false, "custom_fields": { "chunk_overlap": null, @@ -1797,9 +1629,7 @@ "field_formatters": {}, "field_order": [], "frozen": false, - "output_types": [ - "Data" - ], + "output_types": ["Data"], "outputs": [ { "cache": true, @@ -1808,9 +1638,7 @@ "method": null, "name": "data", "selected": "Data", - "types": [ - "Data" - ], + "types": ["Data"], "value": "__UNDEFINED__" } ], @@ -1879,10 +1707,7 @@ "fileTypes": [], "file_path": "", "info": "The texts to split.", - "input_types": [ - "Document", - "Data" - ], + "input_types": ["Document", "Data"], "list": true, "load_from_db": false, "multiline": false, @@ -1901,9 +1726,7 @@ "fileTypes": [], "file_path": "", "info": "The characters to split on.\nIf left empty defaults to [\"\\n\\n\", \"\\n\", \" \", \"\"].", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, @@ -1914,9 +1737,7 @@ "show": true, "title_case": false, "type": "str", - "value": [ - "" - ] + "value": [""] } } }, @@ -1941,9 +1762,7 @@ "data": { "id": "AstraDBSearch-41nRz", "node": { - "base_classes": [ - "Record" - ], + "base_classes": ["Record"], "beta": false, "custom_fields": { "api_endpoint": null, @@ -1978,9 +1797,7 @@ ], "frozen": false, "icon": "AstraDB", - "output_types": [ - "Data" - ], + "output_types": ["Data"], "outputs": [ { "cache": true, @@ -1989,9 +1806,7 @@ "method": null, "name": "data", "selected": "Data", - "types": [ - "Data" - ], + "types": ["Data"], "value": "__UNDEFINED__" } ], @@ -2004,9 +1819,7 @@ "fileTypes": [], "file_path": "", "info": "API endpoint URL for the Astra DB service.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2134,9 +1947,7 @@ "fileTypes": [], "file_path": "", "info": "The name of the collection within Astra DB where the vectors will be stored.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2174,9 +1985,7 @@ "fileTypes": [], "file_path": "", "info": "Input value to search", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2195,9 +2004,7 @@ "fileTypes": [], "file_path": "", "info": "Optional list of metadata fields to exclude from the indexing.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, @@ -2216,9 +2023,7 @@ "fileTypes": [], "file_path": "", "info": "Optional list of metadata fields to include in the indexing.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, @@ -2237,9 +2042,7 @@ "fileTypes": [], "file_path": "", "info": "Optional distance metric for vector comparisons in the vector store.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2258,9 +2061,7 @@ "fileTypes": [], "file_path": "", "info": "Optional namespace within Astra DB to use for the collection.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2317,17 +2118,12 @@ "fileTypes": [], "file_path": "", "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, "name": "search_type", - "options": [ - "Similarity", - "MMR" - ], + "options": ["Similarity", "MMR"], "password": false, "placeholder": "", "required": false, @@ -2343,18 +2139,12 @@ "fileTypes": [], "file_path": "", "info": "Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, "name": "setup_mode", - "options": [ - "Sync", - "Async", - "Off" - ], + "options": ["Sync", "Async", "Off"], "password": false, "placeholder": "", "required": false, @@ -2370,9 +2160,7 @@ "fileTypes": [], "file_path": "", "info": "Authentication token for accessing Astra DB.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2408,9 +2196,7 @@ "data": { "id": "AstraDB-eUCSS", "node": { - "base_classes": [ - "VectorStore" - ], + "base_classes": ["VectorStore"], "beta": false, "custom_fields": { "api_endpoint": null, @@ -2443,10 +2229,7 @@ ], "frozen": false, "icon": "AstraDB", - "output_types": [ - "VectorStore", - "BaseRetriever" - ], + "output_types": ["VectorStore", "BaseRetriever"], "outputs": [ { "cache": true, @@ -2455,9 +2238,7 @@ "method": null, "name": "vectorstore", "selected": "VectorStore", - "types": [ - "VectorStore" - ], + "types": ["VectorStore"], "value": "__UNDEFINED__" }, { @@ -2467,9 +2248,7 @@ "method": null, "name": "baseretriever", "selected": "BaseRetriever", - "types": [ - "BaseRetriever" - ], + "types": ["BaseRetriever"], "value": "__UNDEFINED__" } ], @@ -2482,9 +2261,7 @@ "fileTypes": [], "file_path": "", "info": "API endpoint URL for the Astra DB service.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2612,9 +2389,7 @@ "fileTypes": [], "file_path": "", "info": "The name of the collection within Astra DB where the vectors will be stored.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2670,9 +2445,7 @@ "fileTypes": [], "file_path": "", "info": "Optional list of metadata fields to exclude from the indexing.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, @@ -2691,9 +2464,7 @@ "fileTypes": [], "file_path": "", "info": "Optional list of metadata fields to include in the indexing.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, @@ -2712,9 +2483,7 @@ "fileTypes": [], "file_path": "", "info": "Optional distance metric for vector comparisons in the vector store.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2733,9 +2502,7 @@ "fileTypes": [], "file_path": "", "info": "Optional namespace within Astra DB to use for the collection.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2773,18 +2540,12 @@ "fileTypes": [], "file_path": "", "info": "Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": true, "load_from_db": false, "multiline": false, "name": "setup_mode", - "options": [ - "Sync", - "Async", - "Off" - ], + "options": ["Sync", "Async", "Off"], "password": false, "placeholder": "", "required": false, @@ -2800,9 +2561,7 @@ "fileTypes": [], "file_path": "", "info": "Authentication token for accessing Astra DB.", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "multiline": false, @@ -2838,9 +2597,7 @@ "data": { "id": "OpenAIEmbeddings-9TPjc", "node": { - "base_classes": [ - "Embeddings" - ], + "base_classes": ["Embeddings"], "beta": false, "custom_fields": { "allowed_special": null, @@ -2881,9 +2638,7 @@ "method": "build_embeddings", "name": "embeddings", "selected": "Embeddings", - "types": [ - "Embeddings" - ], + "types": ["Embeddings"], "value": "__UNDEFINED__" } ], @@ -2908,9 +2663,7 @@ "display_name": "Client", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "client", @@ -2937,7 +2690,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_openai.embeddings.base import OpenAIEmbeddings\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import Embeddings\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n DropdownInput\n)\nfrom langflow.template import Output\n\n\nclass OpenAIEmbeddingsComponent(LCModelComponent):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n StrInput(name=\"client\", display_name=\"Client\", advanced=True),\n StrInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(\n name=\"embedding_ctx_length\",\n display_name=\"Embedding Context Length\",\n advanced=True,\n value=1536\n ),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=[\n \"text-embedding-3-small\",\n \"text-embedding-3-large\",\n \"text-embedding-ada-002\",\n ],\n value=\"text-embedding-3-small\"\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(\n name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True\n ),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\"),\n SecretStrInput(\n name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True\n ),\n StrInput(\n name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True\n ),\n StrInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n StrInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(\n name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True\n ),\n BoolInput(\n name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True\n ),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n StrInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\", display_name=\"TikToken Enable\", advanced=True\n ),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n tiktoken_enabled=self.tiktoken_enable,\n default_headers=self.default_headers,\n default_query=self.default_query,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n deployment=self.deployment,\n embedding_ctx_length=self.embedding_ctx_length,\n max_retries=self.max_retries,\n model=self.model,\n model_kwargs=self.model_kwargs,\n base_url=self.openai_api_base,\n api_key=self.openai_api_key,\n openai_api_type=self.openai_api_type,\n api_version=self.openai_api_version,\n organization=self.openai_organization,\n openai_proxy=self.openai_proxy,\n timeout=self.request_timeout,\n show_progress_bar=self.show_progress_bar,\n skip_empty=self.skip_empty,\n tiktoken_model_name=self.tiktoken_model_name,\n )\n" + "value": "from langchain_openai.embeddings.base import OpenAIEmbeddings\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import Embeddings\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput\nfrom langflow.template import Output\n\n\nclass OpenAIEmbeddingsComponent(LCModelComponent):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n StrInput(name=\"client\", display_name=\"Client\", advanced=True),\n StrInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=[\n \"text-embedding-3-small\",\n \"text-embedding-3-large\",\n \"text-embedding-ada-002\",\n ],\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\"),\n SecretStrInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n StrInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n StrInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n StrInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n StrInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(name=\"tiktoken_enable\", display_name=\"TikToken Enable\", advanced=True, value=True),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n tiktoken_enabled=self.tiktoken_enable,\n default_headers=self.default_headers,\n default_query=self.default_query,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n deployment=self.deployment,\n embedding_ctx_length=self.embedding_ctx_length,\n max_retries=self.max_retries,\n model=self.model,\n model_kwargs=self.model_kwargs,\n base_url=self.openai_api_base,\n api_key=self.openai_api_key,\n openai_api_type=self.openai_api_type,\n api_version=self.openai_api_version,\n organization=self.openai_organization,\n openai_proxy=self.openai_proxy,\n timeout=self.request_timeout,\n show_progress_bar=self.show_progress_bar,\n skip_empty=self.skip_empty,\n tiktoken_model_name=self.tiktoken_model_name,\n )\n" }, "default_headers": { "advanced": true, @@ -2972,9 +2725,7 @@ "display_name": "Deployment", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "deployment", @@ -3050,9 +2801,7 @@ "display_name": "OpenAI API Base", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "load_from_db": true, "name": "openai_api_base", "password": true, @@ -3068,9 +2817,7 @@ "display_name": "OpenAI API Key", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "load_from_db": true, "name": "openai_api_key", "password": true, @@ -3086,9 +2833,7 @@ "display_name": "OpenAI API Type", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "load_from_db": true, "name": "openai_api_type", "password": true, @@ -3104,9 +2849,7 @@ "display_name": "OpenAI API Version", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "openai_api_version", @@ -3122,9 +2865,7 @@ "display_name": "OpenAI Organization", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "openai_organization", @@ -3140,9 +2881,7 @@ "display_name": "OpenAI Proxy", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "openai_proxy", @@ -3214,9 +2953,7 @@ "display_name": "TikToken Model Name", "dynamic": false, "info": "", - "input_types": [ - "Text" - ], + "input_types": ["Text"], "list": false, "load_from_db": false, "name": "tiktoken_model_name", @@ -3258,4 +2995,4 @@ "is_component": false, "last_tested_version": "1.0.0a0", "name": "Vector Store RAG" -} \ No newline at end of file +} diff --git a/src/backend/base/langflow/interface/utils.py b/src/backend/base/langflow/interface/utils.py index 986352f15..e9a475875 100644 --- a/src/backend/base/langflow/interface/utils.py +++ b/src/backend/base/langflow/interface/utils.py @@ -117,51 +117,3 @@ def set_langchain_cache(settings): logger.warning(f"Could not import {cache_type}. ") else: logger.info("No LLM cache set.") - - -def build_template_from_class(name: str, type_to_cls_dict: Dict, add_function: bool = False): - classes = [item.__name__ for item in type_to_cls_dict.values()] - - # Raise error if name is not in chains - if name not in classes: - raise ValueError(f"{name} not found.") - - for _type, v in type_to_cls_dict.items(): - if v.__name__ == name: - _class = v - - # Get the docstring - docs = parse(_class.__doc__) - - variables = {"_type": _type} - - if "__fields__" in _class.__dict__: - for class_field_items, value in _class.__fields__.items(): - if class_field_items in ["callback_manager"]: - continue - variables[class_field_items] = {} - for name_, value_ in value.__repr_args__(): - if name_ == "default_factory": - try: - variables[class_field_items]["default"] = get_default_factory( - module=_class.__base__.__module__, - function=value_, - ) - except Exception: - variables[class_field_items]["default"] = None - elif name_ not in ["name"]: - variables[class_field_items][name_] = value_ - - variables[class_field_items]["placeholder"] = ( - docs.params[class_field_items] if class_field_items in docs.params else "" - ) - base_classes = get_base_classes(_class) - # Adding function to base classes to allow - # the output to be a function - if add_function: - base_classes.append("Callable") - return { - "template": format_dict(variables, name), - "description": docs.short_description or "", - "base_classes": base_classes, - } diff --git a/src/backend/base/langflow/schema/schema.py b/src/backend/base/langflow/schema/schema.py index 65aad288d..70e412186 100644 --- a/src/backend/base/langflow/schema/schema.py +++ b/src/backend/base/langflow/schema/schema.py @@ -1,4 +1,4 @@ -from typing import Any, Literal +from typing import Literal from typing_extensions import TypedDict diff --git a/src/frontend/src/components/inputComponent/index.tsx b/src/frontend/src/components/inputComponent/index.tsx index 10763de70..b5875b3dc 100644 --- a/src/frontend/src/components/inputComponent/index.tsx +++ b/src/frontend/src/components/inputComponent/index.tsx @@ -24,7 +24,6 @@ export default function InputComponent({ blurOnEnter = false, optionsIcon = "ChevronsUpDown", selectedOption, - setSelectedOption, selectedOptions = [], setSelectedOptions, @@ -72,7 +71,7 @@ export default function InputComponent({ editNode ? "input-edit-node" : "", password && editNode ? "pr-8" : "", password && !editNode ? "pr-10" : "", - className! + className!, )} placeholder={password && editNode ? "Key" : placeholder} onChange={(e) => { @@ -155,7 +154,7 @@ export default function InputComponent({ + + + + ); +} diff --git a/src/frontend/src/stores/flowStore.ts b/src/frontend/src/stores/flowStore.ts index 6d1c2ad5c..70e2eaff1 100644 --- a/src/frontend/src/stores/flowStore.ts +++ b/src/frontend/src/stores/flowStore.ts @@ -74,7 +74,7 @@ const useFlowStore = create((set, get) => ({ updateFlowPool: ( nodeId: string, data: VertexBuildTypeAPI | ChatOutputType | ChatInputType, - buildId?: string + buildId?: string, ) => { let newFlowPool = cloneDeep({ ...get().flowPool }); if (!newFlowPool[nodeId]) { @@ -167,7 +167,7 @@ const useFlowStore = create((set, get) => ({ flowsManager.autoSaveCurrentFlow( newChange, newEdges, - get().reactFlowInstance?.getViewport() ?? { x: 0, y: 0, zoom: 1 } + get().reactFlowInstance?.getViewport() ?? { x: 0, y: 0, zoom: 1 }, ); } }, @@ -183,7 +183,7 @@ const useFlowStore = create((set, get) => ({ flowsManager.autoSaveCurrentFlow( get().nodes, newChange, - get().reactFlowInstance?.getViewport() ?? { x: 0, y: 0, zoom: 1 } + get().reactFlowInstance?.getViewport() ?? { x: 0, y: 0, zoom: 1 }, ); } }, @@ -201,7 +201,7 @@ const useFlowStore = create((set, get) => ({ return newChange; } return node; - }) + }), ); }, getNode: (id: string) => { @@ -212,8 +212,8 @@ const useFlowStore = create((set, get) => ({ get().nodes.filter((node) => typeof nodeId === "string" ? node.id !== nodeId - : !nodeId.includes(node.id) - ) + : !nodeId.includes(node.id), + ), ); }, deleteEdge: (edgeId) => { @@ -221,8 +221,8 @@ const useFlowStore = create((set, get) => ({ get().edges.filter((edge) => typeof edgeId === "string" ? edge.id !== edgeId - : !edgeId.includes(edge.id) - ) + : !edgeId.includes(edge.id), + ), ); }, paste: (selection, position) => { @@ -288,7 +288,7 @@ const useFlowStore = create((set, get) => ({ let source = idsMap[edge.source]; let target = idsMap[edge.target]; const sourceHandleObject: sourceHandleType = scapeJSONParse( - edge.sourceHandle! + edge.sourceHandle!, ); let sourceHandle = scapedJSONStringfy({ ...sourceHandleObject, @@ -298,7 +298,7 @@ const useFlowStore = create((set, get) => ({ edge.data.sourceHandle = sourceHandleObject; const targetHandleObject: targetHandleType = scapeJSONParse( - edge.targetHandle! + edge.targetHandle!, ); let targetHandle = scapedJSONStringfy({ ...targetHandleObject, @@ -317,7 +317,7 @@ const useFlowStore = create((set, get) => ({ data: cloneDeep(edge.data), selected: false, }, - newEdges.map((edge) => ({ ...edge, selected: false })) + newEdges.map((edge) => ({ ...edge, selected: false })), ); }); get().setEdges(newEdges); @@ -336,10 +336,10 @@ const useFlowStore = create((set, get) => ({ }); const newNodes = get().nodes.filter( - (node) => !nodesIdsSelected.includes(node.id) + (node) => !nodesIdsSelected.includes(node.id), ); const newEdges = get().edges.filter( - (edge) => !edgesIdsSelected.includes(edge.id) + (edge) => !edgesIdsSelected.includes(edge.id), ); set({ nodes: newNodes, edges: newEdges }); @@ -397,7 +397,7 @@ const useFlowStore = create((set, get) => ({ // style: { stroke: "#555" }, // className: "stroke-foreground stroke-connection", }, - oldEdges + oldEdges, ); return newEdges; @@ -407,7 +407,7 @@ const useFlowStore = create((set, get) => ({ .autoSaveCurrentFlow( get().nodes, newEdges, - get().reactFlowInstance?.getViewport() ?? { x: 0, y: 0, zoom: 1 } + get().reactFlowInstance?.getViewport() ?? { x: 0, y: 0, zoom: 1 }, ); }, unselectAll: () => { @@ -442,7 +442,7 @@ const useFlowStore = create((set, get) => ({ function validateSubgraph(nodes: string[]) { const errorsObjs = validateNodes( get().nodes.filter((node) => nodes.includes(node.id)), - get().edges + get().edges, ); const errors = errorsObjs.map((obj) => obj.errors).flat(); @@ -461,13 +461,13 @@ const useFlowStore = create((set, get) => ({ function handleBuildUpdate( vertexBuildData: VertexBuildTypeAPI, status: BuildStatus, - runId: string + runId: string, ) { if (vertexBuildData && vertexBuildData.inactivated_vertices) { get().removeFromVerticesBuild(vertexBuildData.inactivated_vertices); get().updateBuildStatus( vertexBuildData.inactivated_vertices, - BuildStatus.INACTIVE + BuildStatus.INACTIVE, ); } @@ -483,14 +483,14 @@ const useFlowStore = create((set, get) => ({ // next_vertices_ids should be next_vertices_ids without the inactivated vertices const next_vertices_ids = vertexBuildData.next_vertices_ids.filter( - (id) => !vertexBuildData.inactivated_vertices?.includes(id) + (id) => !vertexBuildData.inactivated_vertices?.includes(id), ); const top_level_vertices = vertexBuildData.top_level_vertices.filter( - (vertex) => !vertexBuildData.inactivated_vertices?.includes(vertex) + (vertex) => !vertexBuildData.inactivated_vertices?.includes(vertex), ); const nextVertices: VertexLayerElementType[] = zip( next_vertices_ids, - top_level_vertices + top_level_vertices, ).map(([id, reference]) => ({ id: id!, reference })); const newLayers = [ @@ -512,7 +512,7 @@ const useFlowStore = create((set, get) => ({ get().addDataToFlowPool( { ...vertexBuildData, run_id: runId }, - vertexBuildData.id + vertexBuildData.id, ); useFlowStore.getState().updateBuildStatus([vertexBuildData.id], status); @@ -521,7 +521,7 @@ const useFlowStore = create((set, get) => ({ const newFlowBuildStatus = { ...get().flowBuildStatus }; // filter out the vertices that are not status const verticesToUpdate = verticesIds?.filter( - (id) => newFlowBuildStatus[id]?.status !== BuildStatus.BUILT + (id) => newFlowBuildStatus[id]?.status !== BuildStatus.BUILT, ); if (verticesToUpdate) { @@ -542,15 +542,15 @@ const useFlowStore = create((set, get) => ({ onBuildComplete: (allNodesValid) => { const nodeId = startNodeId || stopNodeId; if (!silent) { - if (nodeId && allNodesValid) { + if (allNodesValid) { setSuccessData({ - title: `${ - get().nodes.find((node) => node.id === nodeId)?.data.node - ?.display_name - } built successfully`, + title: nodeId + ? `${ + get().nodes.find((node) => node.id === nodeId)?.data.node + ?.display_name + } built successfully` + : FLOW_BUILD_SUCCESS_ALERT, }); - } else { - setSuccessData({ title: FLOW_BUILD_SUCCESS_ALERT }); } } get().setIsBuilding(false); @@ -591,7 +591,7 @@ const useFlowStore = create((set, get) => ({ verticesLayers: VertexLayerElementType[][]; runId: string; verticesToRun: string[]; - } | null + } | null, ) => { set({ verticesBuild: vertices }); }, @@ -616,7 +616,7 @@ const useFlowStore = create((set, get) => ({ // that are going to be built verticesIds: get().verticesBuild!.verticesIds.filter( // keep the vertices that are not in the list of vertices to remove - (vertex) => !vertices.includes(vertex) + (vertex) => !vertices.includes(vertex), ), }, }); diff --git a/src/frontend/src/utils/reactflowUtils.ts b/src/frontend/src/utils/reactflowUtils.ts index 4e8b4b84f..c14e7b616 100644 --- a/src/frontend/src/utils/reactflowUtils.ts +++ b/src/frontend/src/utils/reactflowUtils.ts @@ -41,7 +41,7 @@ import { updateEdgesHandleIdsType, } from "../types/utils/reactflowUtils"; import { createRandomKey, toTitleCase } from "./utils"; -const uid = new ShortUniqueId({ length: 5 }); +const uid = new ShortUniqueId(); export function checkChatInput(nodes: Node[]) { return nodes.some((node) => node.data.type === "ChatInput"); @@ -712,7 +712,6 @@ export function generateFlow( name: string, ): generateFlowType { const newFlowData = { nodes, edges, viewport: { zoom: 1, x: 0, y: 0 } }; - const uid = new ShortUniqueId(); /* remove edges that are not connected to selected nodes on both ends */ newFlowData.edges = edges.filter( diff --git a/tests/unit/test_template.py b/tests/unit/test_template.py index 95ea4f528..49bc7bd4d 100644 --- a/tests/unit/test_template.py +++ b/tests/unit/test_template.py @@ -2,7 +2,6 @@ import importlib from typing import Dict, List, Optional import pytest -from langflow.interface.utils import build_template_from_class from langflow.utils.util import build_template_from_function, get_base_classes, get_default_factory from pydantic import BaseModel @@ -68,25 +67,6 @@ def test_build_template_from_function(): build_template_from_function("NonExistent", type_to_loader_dict) -# Test build_template_from_class -def test_build_template_from_class(): - type_to_cls_dict: Dict[str, type] = {"parent": Parent, "child": Child} - - # Test valid input - result = build_template_from_class("Child", type_to_cls_dict) - assert result is not None - assert "template" in result - assert "description" in result - assert "base_classes" in result - assert "Child" in result["base_classes"] - assert "Parent" in result["base_classes"] - assert result["description"] == "Child Class" - - # Test invalid input - with pytest.raises(ValueError, match="InvalidClass not found."): - build_template_from_class("InvalidClass", type_to_cls_dict) - - # Test get_base_classes def test_get_base_classes(): base_classes_parent = get_base_classes(Parent)