refactor: Remove unused text_response method from model components

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-06-20 16:35:00 -03:00
commit f8ce1331b9
10 changed files with 5 additions and 100 deletions

View file

@ -69,15 +69,6 @@ class AmazonBedrockComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> LanguageModel:
model_id = self.model_id
credentials_profile_name = self.credentials_profile_name

View file

@ -78,15 +78,6 @@ class AzureChatOpenAIComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="model_response"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.model_response()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def model_response(self) -> LanguageModel:
model = self.model
azure_endpoint = self.azure_endpoint

View file

@ -3,7 +3,7 @@ from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.io import BoolInput, DropdownInput, FloatInput, Output, SecretStrInput, TextInput
from langflow.field_typing.constants import LanguageModel
from langflow.io import BoolInput, DropdownInput, FloatInput, Output, SecretStrInput, TextInput
@ -89,15 +89,6 @@ class QianfanChatEndpointComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> LanguageModel:
model = self.model
qianfan_ak = self.qianfan_ak

View file

@ -16,7 +16,6 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
from langflow.schema.message import Message
class ChatLiteLLMModelComponent(LCModelComponent):
@ -128,16 +127,6 @@ class ChatLiteLLMModelComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Message:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
message = Message(text=result)
self.status = message
return message
def build_model(self) -> LanguageModel:
try:
import litellm # type: ignore

View file

@ -4,7 +4,7 @@ from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel, Text
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, FloatInput, MessageInput, Output, SecretStrInput, StrInput
@ -37,15 +37,6 @@ class CohereComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> LanguageModel | BaseChatModel:
cohere_api_key = self.cohere_api_key
temperature = self.temperature

View file

@ -73,15 +73,6 @@ class GoogleGenerativeAIComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> LanguageModel:
try:
from langchain_google_genai import ChatGoogleGenerativeAI

View file

@ -3,7 +3,7 @@ from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel, Text
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput, Output, SecretStrInput, StrInput
@ -36,15 +36,6 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> LanguageModel:
endpoint_url = self.endpoint_url
task = self.task

View file

@ -3,7 +3,7 @@ from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel, Text
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput
@ -70,15 +70,6 @@ class MistralAIModelComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> LanguageModel:
mistral_api_key = self.mistral_api_key
temperature = self.temperature

View file

@ -7,7 +7,6 @@ from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, StrInput
from langflow.schema.message import Message
class ChatOllamaComponent(LCModelComponent):
@ -224,15 +223,6 @@ class ChatOllamaComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Message:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> LanguageModel:
# Mapping mirostat settings to their corresponding values
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}

View file

@ -1,9 +1,7 @@
from langchain_google_vertexai import ChatVertexAI
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel, Text
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, FileInput, FloatInput, IntInput, MessageInput, MultilineInput, Output, StrInput
@ -53,15 +51,6 @@ class ChatVertexAIComponent(LCModelComponent):
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> LanguageModel:
credentials = self.credentials
location = self.location