Update model components

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-02-27 23:48:53 -03:00
commit c6b837380b
12 changed files with 78 additions and 110 deletions

View file

@ -2,13 +2,14 @@ from typing import Optional
from langchain_community.chat_models.bedrock import BedrockChat
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class AmazonBedrockComponent(CustomComponent):
class AmazonBedrockComponent(LCModelComponent):
display_name: str = "Amazon Bedrock Model"
description: str = "Generate text using LLM model from Amazon Bedrock."
icon = "AmazonBedrock"
def build_config(self):
return {
@ -65,10 +66,5 @@ class AmazonBedrockComponent(CustomComponent):
) # type: ignore
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,15 +3,16 @@ from typing import Optional
from langchain_community.chat_models.anthropic import ChatAnthropic
from pydantic.v1 import SecretStr
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class AnthropicLLM(CustomComponent):
class AnthropicLLM(LCModelComponent):
display_name: str = "AnthropicModel"
description: str = (
"Generate text using Anthropic Chat&Completion large language models."
)
icon = "Anthropic"
def build_config(self):
return {
@ -82,10 +83,5 @@ class AnthropicLLM(CustomComponent):
)
except Exception as e:
raise ValueError("Could not connect to Anthropic API.") from e
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,16 +3,17 @@ from typing import Optional
from langchain.llms.base import BaseLanguageModel
from langchain_openai import AzureChatOpenAI
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
class AzureChatOpenAIComponent(CustomComponent):
class AzureChatOpenAIComponent(LCModelComponent):
display_name: str = "AzureOpenAI Model"
description: str = "Generate text using LLM model from Azure OpenAI."
documentation: str = (
"https://python.langchain.com/docs/integrations/llms/azure_openai"
)
beta = False
icon = "Azure"
AZURE_OPENAI_MODELS = [
"gpt-35-turbo",
@ -104,10 +105,5 @@ class AzureChatOpenAIComponent(CustomComponent):
)
except Exception as e:
raise ValueError("Could not connect to AzureOpenAI API.") from e
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,16 +3,17 @@ from typing import Optional
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from pydantic.v1 import SecretStr
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class QianfanChatEndpointComponent(CustomComponent):
class QianfanChatEndpointComponent(LCModelComponent):
display_name: str = "QianfanChat Model"
description: str = (
"Generate text using Baidu Qianfan chat models. Get more detail from "
"https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint."
)
icon = "BaiduQianfan"
def build_config(self):
return {
@ -99,10 +100,5 @@ class QianfanChatEndpointComponent(CustomComponent):
)
except Exception as e:
raise ValueError("Could not connect to Baidu Qianfan API.") from e
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -2,11 +2,11 @@ from typing import Dict, Optional
from langchain_community.llms.ctransformers import CTransformers
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class CTransformersComponent(CustomComponent):
class CTransformersComponent(LCModelComponent):
display_name = "CTransformersModel"
description = "Generate text using CTransformers LLM models"
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
@ -47,10 +47,5 @@ class CTransformersComponent(CustomComponent):
output = CTransformers(
model=model, model_file=model_file, model_type=model_type, config=config
)
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -1,14 +1,16 @@
from langchain_community.chat_models.cohere import ChatCohere
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class CohereComponent(CustomComponent):
class CohereComponent(LCModelComponent):
display_name = "CohereModel"
description = "Generate text using Cohere large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
icon = "Cohere"
def build_config(self):
return {
"cohere_api_key": {
@ -29,6 +31,10 @@ class CohereComponent(CustomComponent):
"show": True,
},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
@ -37,16 +43,11 @@ class CohereComponent(CustomComponent):
input_value: str,
max_tokens: int = 256,
temperature: float = 0.75,
stream: bool = False,
) -> Text:
output = ChatCohere(
cohere_api_key=cohere_api_key,
max_tokens=max_tokens,
temperature=temperature,
)
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -1,13 +1,16 @@
from typing import Optional
from langchain_google_genai import ChatGoogleGenerativeAI
from pydantic.v1 import SecretStr
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import RangeSpec, Text
from langflow import CustomComponent
from langflow.field_typing import RangeSpec
class GoogleGenerativeAIComponent(CustomComponent):
class GoogleGenerativeAIComponent(LCModelComponent):
display_name: str = "Google Generative AIModel"
description: str = "Generate text using Google Generative AI to generate text."
documentation: str = "http://docs.langflow.org/components/custom"
icon = "GoogleGenerativeAI"
def build_config(self):
return {
@ -47,7 +50,11 @@ class GoogleGenerativeAIComponent(CustomComponent):
"code": {
"advanced": True,
},
"input_value": {e": {"display_name": "Input"},
"input_value": {"display_name": "Input", "info": "The input to the model."},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
@ -60,6 +67,7 @@ class GoogleGenerativeAIComponent(CustomComponent):
top_k: Optional[int] = None,
top_p: Optional[float] = None,
n: Optional[int] = 1,
stream: bool = False,
) -> Text:
output = ChatGoogleGenerativeAI(
model=model,
@ -70,10 +78,4 @@ class GoogleGenerativeAIComponent(CustomComponent):
n=n or 1,
google_api_key=SecretStr(google_api_key),
)
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,13 +3,14 @@ from typing import Optional
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class HuggingFaceEndpointsComponent(CustomComponent):
class HuggingFaceEndpointsComponent(LCModelComponent):
display_name: str = "Hugging Face Inference API models"
description: str = "Generate text using LLM model from Hugging Face Inference API."
icon = "HuggingFace"
def build_config(self):
return {
@ -25,6 +26,10 @@ class HuggingFaceEndpointsComponent(CustomComponent):
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
}
def build(
@ -34,6 +39,7 @@ class HuggingFaceEndpointsComponent(CustomComponent):
task: str = "text2text-generation",
huggingfacehub_api_token: Optional[str] = None,
model_kwargs: Optional[dict] = None,
stream: bool = False,
) -> Text:
try:
llm = HuggingFaceEndpoint(
@ -45,7 +51,4 @@ class HuggingFaceEndpointsComponent(CustomComponent):
except Exception as e:
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
output = ChatHuggingFace(llm=llm)
message = output.invoke(input_value)alue)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -2,11 +2,11 @@ from typing import Any, Dict, List, Optional
from langchain_community.llms.llamacpp import LlamaCpp
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class LlamaCppComponent(CustomComponent):
class LlamaCppComponent(LCModelComponent):
display_name = "LlamaCppModel"
description = "Generate text using llama.cpp model."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
@ -140,10 +140,5 @@ class LlamaCppComponent(CustomComponent):
verbose=verbose,
vocab_only=vocab_only,
)
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -3,17 +3,19 @@ from typing import Any, Dict, List, Optional
# from langchain_community.chat_models import ChatOllama
from langchain_community.chat_models import ChatOllama
from langflow.components.models.base.model import LCModelComponent
# from langchain.chat_models import ChatOllama
from langflow import CustomComponent
from langflow.field_typing import Text
# whe When a callback component is added to Langflow, the comment must be uncommented.
# from langchain.callbacks.manager import CallbackManager
class ChatOllamaComponent(CustomComponent):
class ChatOllamaComponent(LCModelComponent):
display_name = "ChatOllamaModel"
description = "Generate text using Local LLM for chat with Ollama."
icon = "Ollama"
def build_config(self) -> dict:
return {
@ -255,10 +257,5 @@ class ChatOllamaComponent(CustomComponent):
output = ChatOllama(**llm_params) # type: ignore
except Exception as e:
raise ValueError("Could not initialize Ollama LLM.") from e
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -2,13 +2,14 @@ from typing import Optional
from langchain_openai import ChatOpenAI
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import NestedDict, Text
class OpenAIModelComponent(CustomComponent):
class OpenAIModelComponent(LCModelComponent):
display_name = "OpenAI Model"
description = "Generates text using OpenAI's models."
icon = "OpenAI"
def build_config(self):
return {
@ -84,10 +85,5 @@ class OpenAIModelComponent(CustomComponent):
api_key=openai_api_key,
temperature=temperature,
)
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)

View file

@ -2,11 +2,11 @@ from typing import List, Optional
from langchain_core.messages.base import BaseMessage
from langflow import CustomComponent
from langflow.components.models.base.model import LCModelComponent
from langflow.field_typing import Text
class ChatVertexAIComponent(CustomComponent):
class ChatVertexAIComponent(LCModelComponent):
display_name = "ChatVertexAIModel"
description = "Generate text using Vertex AI Chat large language models API."
@ -97,10 +97,5 @@ class ChatVertexAIComponent(CustomComponent):
top_p=top_p,
verbose=verbose,
)
if stream:
result = output.stream(input_value)
else:
message = output.invoke(input_value)
result = message.content if hasattr(message, "content") else message
self.status = result
return result
return self.get_result(output=output, stream=stream, input_value=input_value)