From c6b837380b54ce5efa03006e00cfc9d7b796fb30 Mon Sep 17 00:00:00 2001 From: Gabriel Luiz Freitas Almeida Date: Tue, 27 Feb 2024 23:48:53 -0300 Subject: [PATCH] Update model components --- .../components/models/AmazonBedrockModel.py | 14 ++++----- .../components/models/AnthropicModel.py | 14 ++++----- .../components/models/AzureOpenAIModel.py | 14 ++++----- .../models/BaiduQianfanChatModel.py | 14 ++++----- .../components/models/CTransformersModel.py | 13 +++----- .../langflow/components/models/CohereModel.py | 19 ++++++------ .../models/GoogleGenerativeAIModel.py | 30 ++++++++++--------- .../components/models/HuggingFaceModel.py | 15 ++++++---- .../components/models/LlamaCppModel.py | 13 +++----- .../langflow/components/models/OllamaModel.py | 15 ++++------ .../langflow/components/models/OpenAIModel.py | 14 ++++----- .../components/models/VertexAiModel.py | 13 +++----- 12 files changed, 78 insertions(+), 110 deletions(-) diff --git a/src/backend/langflow/components/models/AmazonBedrockModel.py b/src/backend/langflow/components/models/AmazonBedrockModel.py index 761daae65..4ae28e70c 100644 --- a/src/backend/langflow/components/models/AmazonBedrockModel.py +++ b/src/backend/langflow/components/models/AmazonBedrockModel.py @@ -2,13 +2,14 @@ from typing import Optional from langchain_community.chat_models.bedrock import BedrockChat -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent from langflow.field_typing import Text -class AmazonBedrockComponent(CustomComponent): +class AmazonBedrockComponent(LCModelComponent): display_name: str = "Amazon Bedrock Model" description: str = "Generate text using LLM model from Amazon Bedrock." + icon = "AmazonBedrock" def build_config(self): return { @@ -65,10 +66,5 @@ class AmazonBedrockComponent(CustomComponent): ) # type: ignore except Exception as e: raise ValueError("Could not connect to AmazonBedrock API.") from e - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/AnthropicModel.py b/src/backend/langflow/components/models/AnthropicModel.py index 230a5ab2a..a3ba510a4 100644 --- a/src/backend/langflow/components/models/AnthropicModel.py +++ b/src/backend/langflow/components/models/AnthropicModel.py @@ -3,15 +3,16 @@ from typing import Optional from langchain_community.chat_models.anthropic import ChatAnthropic from pydantic.v1 import SecretStr -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent from langflow.field_typing import Text -class AnthropicLLM(CustomComponent): +class AnthropicLLM(LCModelComponent): display_name: str = "AnthropicModel" description: str = ( "Generate text using Anthropic Chat&Completion large language models." ) + icon = "Anthropic" def build_config(self): return { @@ -82,10 +83,5 @@ class AnthropicLLM(CustomComponent): ) except Exception as e: raise ValueError("Could not connect to Anthropic API.") from e - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/AzureOpenAIModel.py b/src/backend/langflow/components/models/AzureOpenAIModel.py index 8931bd6e4..392f390c4 100644 --- a/src/backend/langflow/components/models/AzureOpenAIModel.py +++ b/src/backend/langflow/components/models/AzureOpenAIModel.py @@ -3,16 +3,17 @@ from typing import Optional from langchain.llms.base import BaseLanguageModel from langchain_openai import AzureChatOpenAI -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent -class AzureChatOpenAIComponent(CustomComponent): +class AzureChatOpenAIComponent(LCModelComponent): display_name: str = "AzureOpenAI Model" description: str = "Generate text using LLM model from Azure OpenAI." documentation: str = ( "https://python.langchain.com/docs/integrations/llms/azure_openai" ) beta = False + icon = "Azure" AZURE_OPENAI_MODELS = [ "gpt-35-turbo", @@ -104,10 +105,5 @@ class AzureChatOpenAIComponent(CustomComponent): ) except Exception as e: raise ValueError("Could not connect to AzureOpenAI API.") from e - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/BaiduQianfanChatModel.py b/src/backend/langflow/components/models/BaiduQianfanChatModel.py index 121dd9be6..f0815603f 100644 --- a/src/backend/langflow/components/models/BaiduQianfanChatModel.py +++ b/src/backend/langflow/components/models/BaiduQianfanChatModel.py @@ -3,16 +3,17 @@ from typing import Optional from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint from pydantic.v1 import SecretStr -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent from langflow.field_typing import Text -class QianfanChatEndpointComponent(CustomComponent): +class QianfanChatEndpointComponent(LCModelComponent): display_name: str = "QianfanChat Model" description: str = ( "Generate text using Baidu Qianfan chat models. Get more detail from " "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint." ) + icon = "BaiduQianfan" def build_config(self): return { @@ -99,10 +100,5 @@ class QianfanChatEndpointComponent(CustomComponent): ) except Exception as e: raise ValueError("Could not connect to Baidu Qianfan API.") from e - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/CTransformersModel.py b/src/backend/langflow/components/models/CTransformersModel.py index 9784f86b1..31123ad7e 100644 --- a/src/backend/langflow/components/models/CTransformersModel.py +++ b/src/backend/langflow/components/models/CTransformersModel.py @@ -2,11 +2,11 @@ from typing import Dict, Optional from langchain_community.llms.ctransformers import CTransformers -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent from langflow.field_typing import Text -class CTransformersComponent(CustomComponent): +class CTransformersComponent(LCModelComponent): display_name = "CTransformersModel" description = "Generate text using CTransformers LLM models" documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers" @@ -47,10 +47,5 @@ class CTransformersComponent(CustomComponent): output = CTransformers( model=model, model_file=model_file, model_type=model_type, config=config ) - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/CohereModel.py b/src/backend/langflow/components/models/CohereModel.py index 6af3971da..a32fb9b4b 100644 --- a/src/backend/langflow/components/models/CohereModel.py +++ b/src/backend/langflow/components/models/CohereModel.py @@ -1,14 +1,16 @@ from langchain_community.chat_models.cohere import ChatCohere -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent from langflow.field_typing import Text -class CohereComponent(CustomComponent): +class CohereComponent(LCModelComponent): display_name = "CohereModel" description = "Generate text using Cohere large language models." documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere" + icon = "Cohere" + def build_config(self): return { "cohere_api_key": { @@ -29,6 +31,10 @@ class CohereComponent(CustomComponent): "show": True, }, "input_value": {"display_name": "Input"}, + "stream": { + "display_name": "Stream", + "info": "Stream the response from the model.", + }, } def build( @@ -37,16 +43,11 @@ class CohereComponent(CustomComponent): input_value: str, max_tokens: int = 256, temperature: float = 0.75, + stream: bool = False, ) -> Text: output = ChatCohere( cohere_api_key=cohere_api_key, max_tokens=max_tokens, temperature=temperature, ) - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result return result + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/GoogleGenerativeAIModel.py b/src/backend/langflow/components/models/GoogleGenerativeAIModel.py index 40f1d385f..423a66df6 100644 --- a/src/backend/langflow/components/models/GoogleGenerativeAIModel.py +++ b/src/backend/langflow/components/models/GoogleGenerativeAIModel.py @@ -1,13 +1,16 @@ +from typing import Optional + +from langchain_google_genai import ChatGoogleGenerativeAI +from pydantic.v1 import SecretStr + +from langflow.components.models.base.model import LCModelComponent +from langflow.field_typing import RangeSpec, Text -from langflow import CustomComponent -from langflow.field_typing import RangeSpec - - -class GoogleGenerativeAIComponent(CustomComponent): +class GoogleGenerativeAIComponent(LCModelComponent): display_name: str = "Google Generative AIModel" description: str = "Generate text using Google Generative AI to generate text." - documentation: str = "http://docs.langflow.org/components/custom" + icon = "GoogleGenerativeAI" def build_config(self): return { @@ -47,7 +50,11 @@ class GoogleGenerativeAIComponent(CustomComponent): "code": { "advanced": True, }, - "input_value": {e": {"display_name": "Input"}, + "input_value": {"display_name": "Input", "info": "The input to the model."}, + "stream": { + "display_name": "Stream", + "info": "Stream the response from the model.", + }, } def build( @@ -60,6 +67,7 @@ class GoogleGenerativeAIComponent(CustomComponent): top_k: Optional[int] = None, top_p: Optional[float] = None, n: Optional[int] = 1, + stream: bool = False, ) -> Text: output = ChatGoogleGenerativeAI( model=model, @@ -70,10 +78,4 @@ class GoogleGenerativeAIComponent(CustomComponent): n=n or 1, google_api_key=SecretStr(google_api_key), ) - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result \ No newline at end of file + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/HuggingFaceModel.py b/src/backend/langflow/components/models/HuggingFaceModel.py index 3fe97aca1..3d92272e6 100644 --- a/src/backend/langflow/components/models/HuggingFaceModel.py +++ b/src/backend/langflow/components/models/HuggingFaceModel.py @@ -3,13 +3,14 @@ from typing import Optional from langchain_community.chat_models.huggingface import ChatHuggingFace from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent from langflow.field_typing import Text -class HuggingFaceEndpointsComponent(CustomComponent): +class HuggingFaceEndpointsComponent(LCModelComponent): display_name: str = "Hugging Face Inference API models" description: str = "Generate text using LLM model from Hugging Face Inference API." + icon = "HuggingFace" def build_config(self): return { @@ -25,6 +26,10 @@ class HuggingFaceEndpointsComponent(CustomComponent): }, "code": {"show": False}, "input_value": {"display_name": "Input"}, + "stream": { + "display_name": "Stream", + "info": "Stream the response from the model.", + }, } def build( @@ -34,6 +39,7 @@ class HuggingFaceEndpointsComponent(CustomComponent): task: str = "text2text-generation", huggingfacehub_api_token: Optional[str] = None, model_kwargs: Optional[dict] = None, + stream: bool = False, ) -> Text: try: llm = HuggingFaceEndpoint( @@ -45,7 +51,4 @@ class HuggingFaceEndpointsComponent(CustomComponent): except Exception as e: raise ValueError("Could not connect to HuggingFace Endpoints API.") from e output = ChatHuggingFace(llm=llm) - message = output.invoke(input_value)alue) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/LlamaCppModel.py b/src/backend/langflow/components/models/LlamaCppModel.py index 00c0ee0f4..1ca2cd3c1 100644 --- a/src/backend/langflow/components/models/LlamaCppModel.py +++ b/src/backend/langflow/components/models/LlamaCppModel.py @@ -2,11 +2,11 @@ from typing import Any, Dict, List, Optional from langchain_community.llms.llamacpp import LlamaCpp -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent from langflow.field_typing import Text -class LlamaCppComponent(CustomComponent): +class LlamaCppComponent(LCModelComponent): display_name = "LlamaCppModel" description = "Generate text using llama.cpp model." documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp" @@ -140,10 +140,5 @@ class LlamaCppComponent(CustomComponent): verbose=verbose, vocab_only=vocab_only, ) - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/OllamaModel.py b/src/backend/langflow/components/models/OllamaModel.py index 5f0a86289..7929c2b43 100644 --- a/src/backend/langflow/components/models/OllamaModel.py +++ b/src/backend/langflow/components/models/OllamaModel.py @@ -3,17 +3,19 @@ from typing import Any, Dict, List, Optional # from langchain_community.chat_models import ChatOllama from langchain_community.chat_models import ChatOllama +from langflow.components.models.base.model import LCModelComponent + # from langchain.chat_models import ChatOllama -from langflow import CustomComponent from langflow.field_typing import Text # whe When a callback component is added to Langflow, the comment must be uncommented. # from langchain.callbacks.manager import CallbackManager -class ChatOllamaComponent(CustomComponent): +class ChatOllamaComponent(LCModelComponent): display_name = "ChatOllamaModel" description = "Generate text using Local LLM for chat with Ollama." + icon = "Ollama" def build_config(self) -> dict: return { @@ -255,10 +257,5 @@ class ChatOllamaComponent(CustomComponent): output = ChatOllama(**llm_params) # type: ignore except Exception as e: raise ValueError("Could not initialize Ollama LLM.") from e - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/OpenAIModel.py b/src/backend/langflow/components/models/OpenAIModel.py index 34a0252be..7a28acee6 100644 --- a/src/backend/langflow/components/models/OpenAIModel.py +++ b/src/backend/langflow/components/models/OpenAIModel.py @@ -2,13 +2,14 @@ from typing import Optional from langchain_openai import ChatOpenAI -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent from langflow.field_typing import NestedDict, Text -class OpenAIModelComponent(CustomComponent): +class OpenAIModelComponent(LCModelComponent): display_name = "OpenAI Model" description = "Generates text using OpenAI's models." + icon = "OpenAI" def build_config(self): return { @@ -84,10 +85,5 @@ class OpenAIModelComponent(CustomComponent): api_key=openai_api_key, temperature=temperature, ) - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + + return self.get_result(output=output, stream=stream, input_value=input_value) diff --git a/src/backend/langflow/components/models/VertexAiModel.py b/src/backend/langflow/components/models/VertexAiModel.py index dcaa95860..d7eab71ed 100644 --- a/src/backend/langflow/components/models/VertexAiModel.py +++ b/src/backend/langflow/components/models/VertexAiModel.py @@ -2,11 +2,11 @@ from typing import List, Optional from langchain_core.messages.base import BaseMessage -from langflow import CustomComponent +from langflow.components.models.base.model import LCModelComponent from langflow.field_typing import Text -class ChatVertexAIComponent(CustomComponent): +class ChatVertexAIComponent(LCModelComponent): display_name = "ChatVertexAIModel" description = "Generate text using Vertex AI Chat large language models API." @@ -97,10 +97,5 @@ class ChatVertexAIComponent(CustomComponent): top_p=top_p, verbose=verbose, ) - if stream: - result = output.stream(input_value) - else: - message = output.invoke(input_value) - result = message.content if hasattr(message, "content") else message - self.status = result - return result + + return self.get_result(output=output, stream=stream, input_value=input_value)