diff --git a/src/backend/base/langflow/components/models/AmazonBedrockModel.py b/src/backend/base/langflow/components/models/AmazonBedrockModel.py index 1015f1684..2ab1f426e 100644 --- a/src/backend/base/langflow/components/models/AmazonBedrockModel.py +++ b/src/backend/base/langflow/components/models/AmazonBedrockModel.py @@ -58,7 +58,7 @@ class AmazonBedrockComponent(LCModelComponent): "advanced": True, }, "cache": {"display_name": "Cache"}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "system_message": { "display_name": "System Message", "info": "System message to pass to the model.", diff --git a/src/backend/base/langflow/components/models/AnthropicModel.py b/src/backend/base/langflow/components/models/AnthropicModel.py index cfe9ed900..8ea796f0d 100644 --- a/src/backend/base/langflow/components/models/AnthropicModel.py +++ b/src/backend/base/langflow/components/models/AnthropicModel.py @@ -63,7 +63,7 @@ class AnthropicLLM(LCModelComponent): "info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.", }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "advanced": True, diff --git a/src/backend/base/langflow/components/models/AzureOpenAIModel.py b/src/backend/base/langflow/components/models/AzureOpenAIModel.py index c296a8fae..54ac1ec84 100644 --- a/src/backend/base/langflow/components/models/AzureOpenAIModel.py +++ b/src/backend/base/langflow/components/models/AzureOpenAIModel.py @@ -78,7 +78,7 @@ class AzureChatOpenAIComponent(LCModelComponent): "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py index f5e6497d0..8ff9a424f 100644 --- a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py +++ b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py @@ -81,7 +81,7 @@ class QianfanChatEndpointComponent(LCModelComponent): "info": "Endpoint of the Qianfan LLM, required if custom model used.", }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py index 054b59d12..6d03f613e 100644 --- a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py +++ b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py @@ -111,7 +111,7 @@ class ChatLiteLLMModelComponent(LCModelComponent): "required": False, "default": False, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/CohereModel.py b/src/backend/base/langflow/components/models/CohereModel.py index 3bd12c095..a223a1455 100644 --- a/src/backend/base/langflow/components/models/CohereModel.py +++ b/src/backend/base/langflow/components/models/CohereModel.py @@ -1,10 +1,11 @@ from typing import Optional +from langchain_cohere import ChatCohere from pydantic.v1 import SecretStr -from langflow.field_typing import Text + from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langchain_cohere import ChatCohere +from langflow.field_typing import Text class CohereComponent(LCModelComponent): @@ -42,7 +43,7 @@ class CohereComponent(LCModelComponent): "type": "float", "show": True, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, @@ -69,3 +70,4 @@ class CohereComponent(LCModelComponent): temperature=temperature, ) return self.get_chat_result(output, stream, input_value, system_message) + return self.get_chat_result(output, stream, input_value, system_message) diff --git a/src/backend/base/langflow/components/models/HuggingFaceModel.py b/src/backend/base/langflow/components/models/HuggingFaceModel.py index 19750ef9f..9b7949bc7 100644 --- a/src/backend/base/langflow/components/models/HuggingFaceModel.py +++ b/src/backend/base/langflow/components/models/HuggingFaceModel.py @@ -2,9 +2,10 @@ from typing import Optional from langchain_community.chat_models.huggingface import ChatHuggingFace from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint -from langflow.field_typing import Text + from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent +from langflow.field_typing import Text class HuggingFaceEndpointsComponent(LCModelComponent): @@ -36,7 +37,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent): "advanced": True, }, "code": {"show": False}, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, @@ -72,3 +73,4 @@ class HuggingFaceEndpointsComponent(LCModelComponent): raise ValueError("Could not connect to HuggingFace Endpoints API.") from e output = ChatHuggingFace(llm=llm) return self.get_chat_result(output, stream, input_value, system_message) + return self.get_chat_result(output, stream, input_value, system_message) diff --git a/src/backend/base/langflow/components/models/MistralModel.py b/src/backend/base/langflow/components/models/MistralModel.py index 305a45e4b..dc77655b0 100644 --- a/src/backend/base/langflow/components/models/MistralModel.py +++ b/src/backend/base/langflow/components/models/MistralModel.py @@ -27,7 +27,7 @@ class MistralAIModelComponent(LCModelComponent): def build_config(self): return { - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "max_tokens": { "display_name": "Max Tokens", "advanced": True, diff --git a/src/backend/base/langflow/components/models/OllamaModel.py b/src/backend/base/langflow/components/models/OllamaModel.py index f591e4a5c..a68fff9e3 100644 --- a/src/backend/base/langflow/components/models/OllamaModel.py +++ b/src/backend/base/langflow/components/models/OllamaModel.py @@ -194,7 +194,7 @@ class ChatOllamaComponent(LCModelComponent): "info": "Template to use for generating text.", "advanced": True, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT, diff --git a/src/backend/base/langflow/components/models/OpenAIModel.py b/src/backend/base/langflow/components/models/OpenAIModel.py index 0aedce495..94e0a78ee 100644 --- a/src/backend/base/langflow/components/models/OpenAIModel.py +++ b/src/backend/base/langflow/components/models/OpenAIModel.py @@ -28,7 +28,7 @@ class OpenAIModelComponent(LCModelComponent): def build_config(self): return { - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "max_tokens": { "display_name": "Max Tokens", "advanced": True, diff --git a/src/backend/base/langflow/components/models/VertexAiModel.py b/src/backend/base/langflow/components/models/VertexAiModel.py index a992447f4..b5af1fac2 100644 --- a/src/backend/base/langflow/components/models/VertexAiModel.py +++ b/src/backend/base/langflow/components/models/VertexAiModel.py @@ -1,6 +1,5 @@ from typing import Optional - from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import Text @@ -74,7 +73,7 @@ class ChatVertexAIComponent(LCModelComponent): "value": False, "advanced": True, }, - "input_value": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "input_types": ["Text", "Record"]}, "stream": { "display_name": "Stream", "info": STREAM_INFO_TEXT,