diff --git a/src/backend/base/langflow/base/models/model.py b/src/backend/base/langflow/base/models/model.py index 395631df3..35853b2c2 100644 --- a/src/backend/base/langflow/base/models/model.py +++ b/src/backend/base/langflow/base/models/model.py @@ -1,13 +1,16 @@ import json import warnings from abc import abstractmethod -from typing import Optional, Union +from typing import Optional, Union, List from langchain_core.language_models.llms import LLM from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage +from langflow.base.constants import STREAM_INFO_TEXT from langflow.custom import Component from langflow.field_typing import LanguageModel +from langflow.inputs import MessageInput, MessageTextInput +from langflow.inputs.inputs import InputTypes, BoolInput from langflow.schema.message import Message from langflow.template.field.base import Output @@ -17,6 +20,17 @@ class LCModelComponent(Component): description: str = "Model Description" trace_type = "llm" + _base_inputs: List[InputTypes] = [ + MessageInput(name="input_value", display_name="Input"), + MessageTextInput( + name="system_message", + display_name="System Message", + info="System message to pass to the model.", + advanced=True, + ), + BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), + ] + outputs = [ Output(display_name="Text", name="text_output", method="text_response"), Output(display_name="Language Model", name="model_output", method="build_model"), @@ -142,6 +156,7 @@ class LCModelComponent(Component): messages.append(input_value.to_lc_message()) else: messages.append(HumanMessage(content=input_value)) + inputs: Union[list, dict] = messages or {} try: runnable = runnable.with_config( # type: ignore diff --git a/src/backend/base/langflow/components/embeddings/VertexAIEmbeddings.py b/src/backend/base/langflow/components/embeddings/VertexAIEmbeddings.py index 7b2374482..7149c6173 100644 --- a/src/backend/base/langflow/components/embeddings/VertexAIEmbeddings.py +++ b/src/backend/base/langflow/components/embeddings/VertexAIEmbeddings.py @@ -1,6 +1,6 @@ from langflow.base.models.model import LCModelComponent from langflow.field_typing import Embeddings -from langflow.io import BoolInput, DictInput, FileInput, FloatInput, IntInput, MessageTextInput, Output +from langflow.io import BoolInput, FileInput, FloatInput, IntInput, MessageTextInput, Output class VertexAIEmbeddingsComponent(LCModelComponent): @@ -13,81 +13,22 @@ class VertexAIEmbeddingsComponent(LCModelComponent): FileInput( name="credentials", display_name="Credentials", + info="JSON credentials file. Leave empty to fallback to environment variables", value="", - file_types=["json"], # Removed the dot - ), - DictInput( - name="instance", - display_name="Instance", - advanced=True, - ), - MessageTextInput( - name="location", - display_name="Location", - value="us-central1", - advanced=True, - ), - IntInput( - name="max_output_tokens", - display_name="Max Output Tokens", - value=128, - ), - IntInput( - name="max_retries", - display_name="Max Retries", - value=6, - advanced=True, - ), - MessageTextInput( - name="model_name", - display_name="Model Name", - value="textembedding-gecko", - ), - IntInput( - name="n", - display_name="N", - value=1, - advanced=True, - ), - MessageTextInput( - name="project", - display_name="Project", - advanced=True, - ), - IntInput( - name="request_parallelism", - display_name="Request Parallelism", - value=5, - advanced=True, - ), - MessageTextInput( - name="stop", - display_name="Stop", - advanced=True, - ), - BoolInput( - name="streaming", - display_name="Streaming", - value=False, - advanced=True, - ), - FloatInput( - name="temperature", - display_name="Temperature", - value=0.0, - ), - IntInput( - name="top_k", - display_name="Top K", - value=40, - advanced=True, - ), - FloatInput( - name="top_p", - display_name="Top P", - value=0.95, - advanced=True, + file_types=["json"], ), + MessageTextInput(name="location", display_name="Location", advanced=True), + MessageTextInput(name="project", display_name="Project", info="The project ID.", advanced=True), + IntInput(name="max_output_tokens", display_name="Max Output Tokens", advanced=True), + IntInput(name="max_retries", display_name="Max Retries", value=1, advanced=True), + MessageTextInput(name="model_name", display_name="Model Name", value="textembedding-gecko"), + IntInput(name="n", display_name="N", value=1, advanced=True), + IntInput(name="request_parallelism", value=5, display_name="Request Parallelism", advanced=True), + MessageTextInput(name="stop_sequences", display_name="Stop", advanced=True, is_list=True), + BoolInput(name="streaming", display_name="Streaming", value=False, advanced=True), + FloatInput(name="temperature", value=0.0, display_name="Temperature"), + IntInput(name="top_k", display_name="Top K", advanced=True), + FloatInput(name="top_p", display_name="Top P", value=0.95, advanced=True), ] outputs = [ @@ -102,9 +43,15 @@ class VertexAIEmbeddingsComponent(LCModelComponent): "Please install the langchain-google-vertexai package to use the VertexAIEmbeddings component." ) + from google.oauth2 import service_account + + if self.credentials: + gcloud_credentials = service_account.Credentials.from_service_account_file(self.credentials) + else: + # will fallback to environment variable or inferred from gcloud CLI + gcloud_credentials = None return VertexAIEmbeddings( - instance=self.instance, - credentials=self.credentials, + credentials=gcloud_credentials, location=self.location, max_output_tokens=self.max_output_tokens, max_retries=self.max_retries, @@ -112,7 +59,7 @@ class VertexAIEmbeddingsComponent(LCModelComponent): n=self.n, project=self.project, request_parallelism=self.request_parallelism, - stop=self.stop, + stop=self.stop_sequences or None, streaming=self.streaming, temperature=self.temperature, top_k=self.top_k, diff --git a/src/backend/base/langflow/components/models/AmazonBedrockModel.py b/src/backend/base/langflow/components/models/AmazonBedrockModel.py index d21546502..deee5798a 100644 --- a/src/backend/base/langflow/components/models/AmazonBedrockModel.py +++ b/src/backend/base/langflow/components/models/AmazonBedrockModel.py @@ -1,10 +1,9 @@ from langchain_aws import ChatBedrock -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel from langflow.inputs import MessageTextInput -from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput +from langflow.io import DictInput, DropdownInput class AmazonBedrockComponent(LCModelComponent): @@ -13,8 +12,7 @@ class AmazonBedrockComponent(LCModelComponent): icon = "Amazon" name = "AmazonBedrockModel" - inputs = [ - MessageInput(name="input_value", display_name="Input"), + inputs = LCModelComponent._base_inputs + [ DropdownInput( name="model_id", display_name="Model ID", @@ -57,13 +55,6 @@ class AmazonBedrockComponent(LCModelComponent): MessageTextInput(name="region_name", display_name="Region Name", value="us-east-1"), DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True, is_list=True), MessageTextInput(name="endpoint_url", display_name="Endpoint URL", advanced=True), - MessageTextInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), ] def build_model(self) -> LanguageModel: # type: ignore[type-var] diff --git a/src/backend/base/langflow/components/models/AnthropicModel.py b/src/backend/base/langflow/components/models/AnthropicModel.py index 0972dd577..e41b401d1 100644 --- a/src/backend/base/langflow/components/models/AnthropicModel.py +++ b/src/backend/base/langflow/components/models/AnthropicModel.py @@ -1,10 +1,9 @@ from langchain_anthropic.chat_models import ChatAnthropic from pydantic.v1 import SecretStr -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput +from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput class AnthropicModelComponent(LCModelComponent): @@ -13,8 +12,7 @@ class AnthropicModelComponent(LCModelComponent): icon = "Anthropic" name = "AnthropicModel" - inputs = [ - MessageTextInput(name="input_value", display_name="Input"), + inputs = LCModelComponent._base_inputs + [ IntInput( name="max_tokens", display_name="Max Tokens", @@ -46,13 +44,6 @@ class AnthropicModelComponent(LCModelComponent): advanced=True, info="Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.", ), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True, value=False), - MessageTextInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), MessageTextInput( name="prefill", display_name="Prefill", diff --git a/src/backend/base/langflow/components/models/AzureOpenAIModel.py b/src/backend/base/langflow/components/models/AzureOpenAIModel.py index 57b488cfa..c55e7cbc9 100644 --- a/src/backend/base/langflow/components/models/AzureOpenAIModel.py +++ b/src/backend/base/langflow/components/models/AzureOpenAIModel.py @@ -1,9 +1,8 @@ from langchain_openai import AzureChatOpenAI -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel from langflow.inputs import MessageTextInput -from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, SecretStrInput, StrInput +from langflow.io import DropdownInput, FloatInput, IntInput, SecretStrInput class AzureChatOpenAIComponent(LCModelComponent): @@ -26,7 +25,7 @@ class AzureChatOpenAIComponent(LCModelComponent): "2024-05-13", ] - inputs = [ + inputs = LCModelComponent._base_inputs + [ MessageTextInput( name="azure_endpoint", display_name="Azure Endpoint", @@ -48,14 +47,6 @@ class AzureChatOpenAIComponent(LCModelComponent): advanced=True, info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.", ), - MessageInput(name="input_value", display_name="Input"), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), - StrInput( - name="system_message", - display_name="System Message", - advanced=True, - info="System message to pass to the model.", - ), ] def build_model(self) -> LanguageModel: # type: ignore[type-var] diff --git a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py index 15ede57f7..6e6384248 100644 --- a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py +++ b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py @@ -1,10 +1,9 @@ from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint from pydantic.v1 import SecretStr -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing.constants import LanguageModel -from langflow.io import BoolInput, DropdownInput, FloatInput, MessageTextInput, SecretStrInput +from langflow.io import DropdownInput, FloatInput, MessageTextInput, SecretStrInput class QianfanChatEndpointComponent(LCModelComponent): @@ -14,11 +13,7 @@ class QianfanChatEndpointComponent(LCModelComponent): icon = "BaiduQianfan" name = "BaiduQianfanChatModel" - inputs = [ - MessageTextInput( - name="input_value", - display_name="Input", - ), + inputs = LCModelComponent._base_inputs + [ DropdownInput( name="model", display_name="Model Name", @@ -72,18 +67,6 @@ class QianfanChatEndpointComponent(LCModelComponent): display_name="Endpoint", info="Endpoint of the Qianfan LLM, required if custom model used.", ), - BoolInput( - name="stream", - display_name="Stream", - info=STREAM_INFO_TEXT, - advanced=True, - ), - MessageTextInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), ] def build_model(self) -> LanguageModel: # type: ignore[type-var] diff --git a/src/backend/base/langflow/components/models/CohereModel.py b/src/backend/base/langflow/components/models/CohereModel.py index 890f91606..498e47fd4 100644 --- a/src/backend/base/langflow/components/models/CohereModel.py +++ b/src/backend/base/langflow/components/models/CohereModel.py @@ -1,10 +1,9 @@ from langchain_cohere import ChatCohere from pydantic.v1 import SecretStr -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import BoolInput, FloatInput, MessageInput, SecretStrInput, StrInput +from langflow.io import FloatInput, SecretStrInput class CohereComponent(LCModelComponent): @@ -14,7 +13,7 @@ class CohereComponent(LCModelComponent): icon = "Cohere" name = "CohereModel" - inputs = [ + inputs = LCModelComponent._base_inputs + [ SecretStrInput( name="cohere_api_key", display_name="Cohere API Key", @@ -23,14 +22,6 @@ class CohereComponent(LCModelComponent): value="COHERE_API_KEY", ), FloatInput(name="temperature", display_name="Temperature", value=0.75), - MessageInput(name="input_value", display_name="Input"), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), - StrInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), ] def build_model(self) -> LanguageModel: # type: ignore[type-var] diff --git a/src/backend/base/langflow/components/models/GoogleGenerativeAIModel.py b/src/backend/base/langflow/components/models/GoogleGenerativeAIModel.py index e20f3150c..916831430 100644 --- a/src/backend/base/langflow/components/models/GoogleGenerativeAIModel.py +++ b/src/backend/base/langflow/components/models/GoogleGenerativeAIModel.py @@ -1,9 +1,8 @@ from pydantic.v1 import SecretStr -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.inputs import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, SecretStrInput, StrInput +from langflow.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput class GoogleGenerativeAIComponent(LCModelComponent): @@ -12,8 +11,7 @@ class GoogleGenerativeAIComponent(LCModelComponent): icon = "GoogleGenerativeAI" name = "GoogleGenerativeAIModel" - inputs = [ - MessageInput(name="input_value", display_name="Input"), + inputs = LCModelComponent._base_inputs + [ IntInput( name="max_output_tokens", display_name="Max Output Tokens", @@ -38,19 +36,12 @@ class GoogleGenerativeAIComponent(LCModelComponent): advanced=True, ), FloatInput(name="temperature", display_name="Temperature", value=0.1), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), IntInput( name="n", display_name="N", info="Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.", advanced=True, ), - StrInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), IntInput( name="top_k", display_name="Top K", diff --git a/src/backend/base/langflow/components/models/GroqModel.py b/src/backend/base/langflow/components/models/GroqModel.py index d24ba3cf6..7bad1bcf1 100644 --- a/src/backend/base/langflow/components/models/GroqModel.py +++ b/src/backend/base/langflow/components/models/GroqModel.py @@ -1,11 +1,10 @@ from langchain_groq import ChatGroq from pydantic.v1 import SecretStr -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.groq_constants import MODEL_NAMES from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput +from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput class GroqModel(LCModelComponent): @@ -14,7 +13,7 @@ class GroqModel(LCModelComponent): icon = "Groq" name = "GroqModel" - inputs = [ + inputs = LCModelComponent._base_inputs + [ SecretStrInput( name="groq_api_key", display_name="Groq API Key", @@ -50,23 +49,6 @@ class GroqModel(LCModelComponent): info="The name of the model to use.", options=MODEL_NAMES, ), - MessageTextInput( - name="input_value", - display_name="Input", - info="The input to the model.", - ), - BoolInput( - name="stream", - display_name="Stream", - info=STREAM_INFO_TEXT, - advanced=True, - ), - MessageTextInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), ] def build_model(self) -> LanguageModel: # type: ignore[type-var] diff --git a/src/backend/base/langflow/components/models/HuggingFaceModel.py b/src/backend/base/langflow/components/models/HuggingFaceModel.py index 6995eb765..313d44001 100644 --- a/src/backend/base/langflow/components/models/HuggingFaceModel.py +++ b/src/backend/base/langflow/components/models/HuggingFaceModel.py @@ -1,10 +1,9 @@ from langchain_community.chat_models.huggingface import ChatHuggingFace from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput, SecretStrInput, StrInput +from langflow.io import DictInput, DropdownInput, SecretStrInput, StrInput class HuggingFaceEndpointsComponent(LCModelComponent): @@ -13,8 +12,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent): icon = "HuggingFace" name = "HuggingFaceModel" - inputs = [ - MessageInput(name="input_value", display_name="Input"), + inputs = LCModelComponent._base_inputs + [ SecretStrInput(name="endpoint_url", display_name="Endpoint URL", password=True), StrInput( name="model_id", @@ -28,13 +26,6 @@ class HuggingFaceEndpointsComponent(LCModelComponent): ), SecretStrInput(name="huggingfacehub_api_token", display_name="API token", password=True), DictInput(name="model_kwargs", display_name="Model Keyword Arguments", advanced=True), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), - StrInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), ] def build_model(self) -> LanguageModel: # type: ignore[type-var] diff --git a/src/backend/base/langflow/components/models/Maritalk.py b/src/backend/base/langflow/components/models/Maritalk.py index c0740d2f7..e6b7c052e 100644 --- a/src/backend/base/langflow/components/models/Maritalk.py +++ b/src/backend/base/langflow/components/models/Maritalk.py @@ -1,10 +1,9 @@ from langchain_community.chat_models import ChatMaritalk -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel from langflow.field_typing.range_spec import RangeSpec -from langflow.inputs import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, SecretStrInput, StrInput +from langflow.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput class MaritalkModelComponent(LCModelComponent): @@ -12,8 +11,7 @@ class MaritalkModelComponent(LCModelComponent): description = "Generates text using Maritalk LLMs." icon = "Maritalk" name = "Maritalk" - inputs = [ - MessageInput(name="input_value", display_name="Input"), + inputs = LCModelComponent._base_inputs + [ IntInput( name="max_tokens", display_name="Max Tokens", @@ -35,13 +33,6 @@ class MaritalkModelComponent(LCModelComponent): advanced=False, ), FloatInput(name="temperature", display_name="Temperature", value=0.1, range_spec=RangeSpec(min=0, max=1)), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, value=False, advanced=True), - StrInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), ] def build_model(self) -> LanguageModel: # type: ignore[type-var] diff --git a/src/backend/base/langflow/components/models/MistralModel.py b/src/backend/base/langflow/components/models/MistralModel.py index 37339a851..41d84e043 100644 --- a/src/backend/base/langflow/components/models/MistralModel.py +++ b/src/backend/base/langflow/components/models/MistralModel.py @@ -1,10 +1,9 @@ from langchain_mistralai import ChatMistralAI from pydantic.v1 import SecretStr -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, SecretStrInput, StrInput +from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput class MistralAIModelComponent(LCModelComponent): @@ -13,8 +12,7 @@ class MistralAIModelComponent(LCModelComponent): icon = "MistralAI" name = "MistralModel" - inputs = [ - MessageInput(name="input_value", display_name="Input"), + inputs = LCModelComponent._base_inputs + [ IntInput( name="max_tokens", display_name="Max Tokens", @@ -51,13 +49,6 @@ class MistralAIModelComponent(LCModelComponent): advanced=False, ), FloatInput(name="temperature", display_name="Temperature", advanced=False, value=0.5), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), - StrInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), IntInput(name="max_retries", display_name="Max Retries", advanced=True, value=5), IntInput(name="timeout", display_name="Timeout", advanced=True, value=60), IntInput(name="max_concurrent_requests", display_name="Max Concurrent Requests", advanced=True, value=3), diff --git a/src/backend/base/langflow/components/models/NvidiaModel.py b/src/backend/base/langflow/components/models/NvidiaModel.py index 8d7871ae4..40a841d7c 100644 --- a/src/backend/base/langflow/components/models/NvidiaModel.py +++ b/src/backend/base/langflow/components/models/NvidiaModel.py @@ -1,9 +1,8 @@ from typing import Any -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.inputs import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, SecretStrInput, StrInput +from langflow.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput from langflow.schema.dotdict import dotdict @@ -12,8 +11,7 @@ class NVIDIAModelComponent(LCModelComponent): description = "Generates text using NVIDIA LLMs." icon = "NVIDIA" - inputs = [ - MessageInput(name="input_value", display_name="Input"), + inputs = LCModelComponent._base_inputs + [ IntInput( name="max_tokens", display_name="Max Tokens", @@ -42,13 +40,6 @@ class NVIDIAModelComponent(LCModelComponent): value="NVIDIA_API_KEY", ), FloatInput(name="temperature", display_name="Temperature", value=0.1), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), - StrInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), IntInput( name="seed", display_name="Seed", diff --git a/src/backend/base/langflow/components/models/OllamaModel.py b/src/backend/base/langflow/components/models/OllamaModel.py index ec8c01ff8..8edba3374 100644 --- a/src/backend/base/langflow/components/models/OllamaModel.py +++ b/src/backend/base/langflow/components/models/OllamaModel.py @@ -3,10 +3,9 @@ from typing import Any import httpx from langchain_community.chat_models import ChatOllama -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageInput, StrInput +from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, StrInput class ChatOllamaComponent(LCModelComponent): @@ -68,7 +67,7 @@ class ChatOllamaComponent(LCModelComponent): except Exception as e: raise ValueError("Could not retrieve models. Please, make sure Ollama is running.") from e - inputs = [ + inputs = LCModelComponent._base_inputs + [ StrInput( name="base_url", display_name="Base URL", @@ -204,21 +203,6 @@ class ChatOllamaComponent(LCModelComponent): info="Template to use for generating text.", advanced=True, ), - MessageInput( - name="input_value", - display_name="Input", - ), - BoolInput( - name="stream", - display_name="Stream", - info=STREAM_INFO_TEXT, - ), - StrInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), ] def build_model(self) -> LanguageModel: # type: ignore[type-var] diff --git a/src/backend/base/langflow/components/models/OpenAIModel.py b/src/backend/base/langflow/components/models/OpenAIModel.py index d6051dba6..355ab567b 100644 --- a/src/backend/base/langflow/components/models/OpenAIModel.py +++ b/src/backend/base/langflow/components/models/OpenAIModel.py @@ -4,7 +4,6 @@ from functools import reduce from langchain_openai import ChatOpenAI from pydantic.v1 import SecretStr -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.base.models.openai_constants import MODEL_NAMES from langflow.field_typing import LanguageModel @@ -14,7 +13,6 @@ from langflow.inputs import ( DropdownInput, FloatInput, IntInput, - MessageInput, SecretStrInput, StrInput, ) @@ -26,8 +24,7 @@ class OpenAIModelComponent(LCModelComponent): icon = "OpenAI" name = "OpenAIModel" - inputs = [ - MessageInput(name="input_value", display_name="Input"), + inputs = LCModelComponent._base_inputs + [ IntInput( name="max_tokens", display_name="Max Tokens", @@ -65,13 +62,6 @@ class OpenAIModelComponent(LCModelComponent): value="OPENAI_API_KEY", ), FloatInput(name="temperature", display_name="Temperature", value=0.1), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), - StrInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), IntInput( name="seed", display_name="Seed", diff --git a/src/backend/base/langflow/components/models/VertexAiModel.py b/src/backend/base/langflow/components/models/VertexAiModel.py index 32cb8f51d..763342dfc 100644 --- a/src/backend/base/langflow/components/models/VertexAiModel.py +++ b/src/backend/base/langflow/components/models/VertexAiModel.py @@ -1,9 +1,7 @@ -from langchain_google_vertexai import ChatVertexAI - -from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import BoolInput, FileInput, FloatInput, IntInput, MessageInput, MultilineInput, StrInput +from langflow.inputs import MessageTextInput +from langflow.io import BoolInput, FileInput, FloatInput, IntInput, StrInput class ChatVertexAIComponent(LCModelComponent): @@ -12,64 +10,57 @@ class ChatVertexAIComponent(LCModelComponent): icon = "VertexAI" name = "VertexAiModel" - inputs = [ - MessageInput(name="input_value", display_name="Input"), + inputs = LCModelComponent._base_inputs + [ FileInput( name="credentials", display_name="Credentials", - info="Path to the JSON file containing the credentials.", + info="JSON credentials file. Leave empty to fallback to environment variables", file_types=["json"], - advanced=True, ), - StrInput(name="project", display_name="Project", info="The project ID."), - MultilineInput( - name="examples", - display_name="Examples", - info="Examples to pass to the model.", - advanced=True, - ), - StrInput(name="location", display_name="Location", value="us-central1", advanced=True), - IntInput( - name="max_output_tokens", - display_name="Max Output Tokens", - value=128, - advanced=True, - ), - StrInput(name="model_name", display_name="Model Name", value="gemini-1.5-pro"), - FloatInput(name="temperature", display_name="Temperature", value=0.0), - IntInput(name="top_k", display_name="Top K", value=40, advanced=True), + MessageTextInput(name="model_name", display_name="Model Name", value="gemini-1.5-pro"), + StrInput(name="project", display_name="Project", info="The project ID.", advanced=True), + StrInput(name="location", display_name="Location", advanced=True), + IntInput(name="max_output_tokens", display_name="Max Output Tokens", advanced=True), + IntInput(name="max_retries", display_name="Max Retries", value=1, advanced=True), + FloatInput(name="temperature", value=0.0, display_name="Temperature"), + IntInput(name="top_k", display_name="Top K", advanced=True), FloatInput(name="top_p", display_name="Top P", value=0.95, advanced=True), BoolInput(name="verbose", display_name="Verbose", value=False, advanced=True), - BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True), - StrInput( - name="system_message", - display_name="System Message", - info="System message to pass to the model.", - advanced=True, - ), ] - def build_model(self) -> LanguageModel: # type: ignore[type-var] - credentials = self.credentials - location = self.location - max_output_tokens = self.max_output_tokens - model_name = self.model_name - project = self.project - temperature = self.temperature - top_k = self.top_k - top_p = self.top_p - verbose = self.verbose + def build_model(self) -> LanguageModel: + try: + from langchain_google_vertexai import ChatVertexAI + except ImportError: + raise ImportError( + "Please install the langchain-google-vertexai package to use the VertexAIEmbeddings component." + ) + location = self.location or None + if self.credentials: + from google.oauth2 import service_account + from google.cloud import aiplatform - output = ChatVertexAI( + credentials = service_account.Credentials.from_service_account_file(self.credentials) + project = self.project or credentials.project_id + # ChatVertexAI sometimes skip manual credentials initialization + aiplatform.init( + project=project, + location=location, + credentials=credentials, + ) + else: + project = self.project or None + credentials = None + + return ChatVertexAI( credentials=credentials, location=location, - max_output_tokens=max_output_tokens, - model_name=model_name, project=project, - temperature=temperature, - top_k=top_k, - top_p=top_p, - verbose=verbose, + max_output_tokens=self.max_output_tokens, + max_retries=self.max_retries, + model_name=self.model_name, + temperature=self.temperature, + top_k=self.top_k, + top_p=self.top_p, + verbose=self.verbose, ) - - return output # type: ignore