upd: llm models

This commit is contained in:
namastex888 2024-06-15 00:44:16 +00:00
commit 60806556b1
6 changed files with 484 additions and 527 deletions

View file

@ -1,112 +1,112 @@
from typing import Optional
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
from langflow.field_typing import BaseLanguageModel, Text
from langflow.inputs import BoolInput, FloatInput, SecretStrInput, StrInput
from langflow.template import Output
class QianfanChatEndpointComponent(LCModelComponent):
display_name: str = "Qianfan"
description: str = "Generate text using Baidu Qianfan LLMs."
documentation: str = "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint."
documentation: str = "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint"
icon = "BaiduQianfan"
field_order = [
"model",
"qianfan_ak",
"qianfan_sk",
"top_p",
"temperature",
"penalty_score",
"endpoint",
"input_value",
"system_message",
"stream",
inputs = [
StrInput(
name="input_value",
display_name="Input",
input_types=["Text", "Data", "Prompt"],
),
StrInput(
name="model",
display_name="Model Name",
options=[
"ERNIE-Bot",
"ERNIE-Bot-turbo",
"BLOOMZ-7B",
"Llama-2-7b-chat",
"Llama-2-13b-chat",
"Llama-2-70b-chat",
"Qianfan-BLOOMZ-7B-compressed",
"Qianfan-Chinese-Llama-2-7B",
"ChatGLM2-6B-32K",
"AquilaChat-7B",
],
info="https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint",
value="ERNIE-Bot-turbo",
),
SecretStrInput(
name="qianfan_ak",
display_name="Qianfan Ak",
info="which you could get from https://cloud.baidu.com/product/wenxinworkshop",
),
SecretStrInput(
name="qianfan_sk",
display_name="Qianfan Sk",
info="which you could get from https://cloud.baidu.com/product/wenxinworkshop",
),
FloatInput(
name="top_p",
display_name="Top p",
info="Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
value=0.8,
advanced=True,
),
FloatInput(
name="temperature",
display_name="Temperature",
info="Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
value=0.95,
),
FloatInput(
name="penalty_score",
display_name="Penalty Score",
info="Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
value=1.0,
advanced=True,
),
StrInput(
name="endpoint",
display_name="Endpoint",
info="Endpoint of the Qianfan LLM, required if custom model used.",
),
BoolInput(
name="stream",
display_name="Stream",
info=STREAM_INFO_TEXT,
advanced=True,
),
StrInput(
name="system_message",
display_name="System Message",
info="System message to pass to the model.",
advanced=True,
),
]
outputs = [
Output(display_name="Text", name="text_output", method="text_response"),
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"options": [
"ERNIE-Bot",
"ERNIE-Bot-turbo",
"BLOOMZ-7B",
"Llama-2-7b-chat",
"Llama-2-13b-chat",
"Llama-2-70b-chat",
"Qianfan-BLOOMZ-7B-compressed",
"Qianfan-Chinese-Llama-2-7B",
"ChatGLM2-6B-32K",
"AquilaChat-7B",
],
"info": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint",
"value": "ERNIE-Bot-turbo",
},
"qianfan_ak": {
"display_name": "Qianfan Ak",
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
"qianfan_sk": {
"display_name": "Qianfan Sk",
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
"top_p": {
"display_name": "Top p",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 0.8,
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 0.95,
},
"penalty_score": {
"display_name": "Penalty Score",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 1.0,
"advanced": True,
},
"endpoint": {
"display_name": "Endpoint",
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
},
"code": {"show": False},
"input_value": {"display_name": "Input", "input_types": ["Text", "Data", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> BaseLanguageModel:
model = self.model
qianfan_ak = self.qianfan_ak
qianfan_sk = self.qianfan_sk
top_p = self.top_p
temperature = self.temperature
penalty_score = self.penalty_score
endpoint = self.endpoint
def build(
self,
input_value: Text,
qianfan_ak: str,
qianfan_sk: str,
model: str,
top_p: Optional[float] = None,
temperature: Optional[float] = None,
penalty_score: Optional[float] = None,
endpoint: Optional[str] = None,
stream: bool = False,
system_message: Optional[str] = None,
) -> Text:
try:
output = QianfanChatEndpoint( # type: ignore
model=model,
@ -120,4 +120,4 @@ class QianfanChatEndpointComponent(LCModelComponent):
except Exception as e:
raise ValueError("Could not connect to Baidu Qianfan API.") from e
return self.get_chat_result(output, stream, input_value, system_message)
return output

View file

@ -1,155 +1,143 @@
from typing import Any, Dict, Optional
from langchain_community.chat_models.litellm import ChatLiteLLM, ChatLiteLLMException
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
from langflow.field_typing import BaseLanguageModel, Text
from langflow.inputs import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, StrInput
from langflow.template import Output
class ChatLiteLLMModelComponent(LCModelComponent):
display_name = "LiteLLM"
description = "`LiteLLM` collection of large language models."
documentation = "https://python.langchain.com/docs/integrations/chat/litellm"
field_order = [
"model",
"api_key",
"provider",
"temperature",
"model_kwargs",
"top_p",
"top_k",
"n",
"max_tokens",
"max_retries",
"verbose",
"stream",
"input_value",
"system_message",
icon = "LiteLLM"
inputs = [
StrInput(name="input_value", display_name="Input", input_types=["Text", "Data", "Prompt"]),
StrInput(
name="model",
display_name="Model name",
advanced=False,
required=True,
info="The name of the model to use. For example, `gpt-3.5-turbo`.",
),
StrInput(
name="api_key",
display_name="API key",
advanced=False,
required=False,
password=True,
),
DropdownInput(
name="provider",
display_name="Provider",
info="The provider of the API key.",
options=[
"OpenAI",
"Azure",
"Anthropic",
"Replicate",
"Cohere",
"OpenRouter",
],
),
FloatInput(
name="temperature",
display_name="Temperature",
advanced=False,
required=False,
default=0.7,
),
DictInput(
name="model_kwargs",
display_name="Model kwargs",
advanced=True,
required=False,
default={},
),
FloatInput(
name="top_p",
display_name="Top p",
advanced=True,
required=False,
),
IntInput(
name="top_k",
display_name="Top k",
advanced=True,
required=False,
),
IntInput(
name="n",
display_name="N",
advanced=True,
required=False,
info="Number of chat completions to generate for each prompt. "
"Note that the API may not return the full n completions if duplicates are generated.",
default=1,
),
IntInput(
name="max_tokens",
display_name="Max tokens",
advanced=False,
default=256,
info="The maximum number of tokens to generate for each chat completion.",
),
IntInput(
name="max_retries",
display_name="Max retries",
advanced=True,
required=False,
default=6,
),
BoolInput(
name="verbose",
display_name="Verbose",
advanced=True,
required=False,
default=False,
),
BoolInput(
name="stream",
display_name="Stream",
info=STREAM_INFO_TEXT,
advanced=True,
),
StrInput(
name="system_message",
display_name="System Message",
info="System message to pass to the model.",
advanced=True,
),
]
def build_config(self):
return {
"model": {
"display_name": "Model name",
"field_type": "str",
"advanced": False,
"required": True,
"info": "The name of the model to use. For example, `gpt-3.5-turbo`.",
},
"api_key": {
"display_name": "API key",
"field_type": "str",
"advanced": False,
"required": False,
"password": True,
},
"provider": {
"display_name": "Provider",
"info": "The provider of the API key.",
"options": [
"OpenAI",
"Azure",
"Anthropic",
"Replicate",
"Cohere",
"OpenRouter",
],
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"advanced": False,
"required": False,
"default": 0.7,
},
"model_kwargs": {
"display_name": "Model kwargs",
"field_type": "dict",
"advanced": True,
"required": False,
"default": {},
},
"top_p": {
"display_name": "Top p",
"field_type": "float",
"advanced": True,
"required": False,
},
"top_k": {
"display_name": "Top k",
"field_type": "int",
"advanced": True,
"required": False,
},
"n": {
"display_name": "N",
"field_type": "int",
"advanced": True,
"required": False,
"info": "Number of chat completions to generate for each prompt. "
"Note that the API may not return the full n completions if duplicates are generated.",
"default": 1,
},
"max_tokens": {
"display_name": "Max tokens",
"advanced": False,
"default": 256,
"info": "The maximum number of tokens to generate for each chat completion.",
},
"max_retries": {
"display_name": "Max retries",
"field_type": "int",
"advanced": True,
"required": False,
"default": 6,
},
"verbose": {
"display_name": "Verbose",
"field_type": "bool",
"advanced": True,
"required": False,
"default": False,
},
"input_value": {"display_name": "Input", "input_types": ["Text", "Data", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}
outputs = [
Output(display_name="Text", name="text_output", method="text_response"),
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def build(
self,
input_value: Text,
model: str,
provider: str,
api_key: Optional[str] = None,
stream: bool = False,
temperature: Optional[float] = 0.7,
model_kwargs: Optional[Dict[str, Any]] = {},
top_p: Optional[float] = None,
top_k: Optional[int] = None,
n: int = 1,
max_tokens: int = 256,
max_retries: int = 6,
verbose: bool = False,
system_message: Optional[str] = None,
) -> Text:
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> BaseLanguageModel:
try:
import litellm # type: ignore
litellm.drop_params = True
litellm.set_verbose = verbose
litellm.set_verbose = self.verbose
except ImportError:
raise ChatLiteLLMException(
"Could not import litellm python package. " "Please install it with `pip install litellm`"
)
provider_map = {
"OpenAI": "openai_api_key",
"Azure": "azure_api_key",
@ -158,27 +146,28 @@ class ChatLiteLLMModelComponent(LCModelComponent):
"Cohere": "cohere_api_key",
"OpenRouter": "openrouter_api_key",
}
# Set the API key based on the provider
api_keys: dict[str, Optional[str]] = {v: None for v in provider_map.values()}
if variable_name := provider_map.get(provider):
api_keys[variable_name] = api_key
if variable_name := provider_map.get(self.provider):
api_keys[variable_name] = self.api_key
else:
raise ChatLiteLLMException(
f"Provider {provider} is not supported. Supported providers are: {', '.join(provider_map.keys())}"
f"Provider {self.provider} is not supported. Supported providers are: {', '.join(provider_map.keys())}"
)
output = ChatLiteLLM(
model=model,
model=self.model,
client=None,
streaming=stream,
temperature=temperature,
model_kwargs=model_kwargs if model_kwargs is not None else {},
top_p=top_p,
top_k=top_k,
n=n,
max_tokens=max_tokens,
max_retries=max_retries,
streaming=self.stream,
temperature=self.temperature,
model_kwargs=self.model_kwargs if self.model_kwargs is not None else {},
top_p=self.top_p,
top_k=self.top_k,
n=self.n,
max_tokens=self.max_tokens,
max_retries=self.max_retries,
openai_api_key=api_keys["openai_api_key"],
azure_api_key=api_keys["azure_api_key"],
anthropic_api_key=api_keys["anthropic_api_key"],
@ -186,4 +175,6 @@ class ChatLiteLLMModelComponent(LCModelComponent):
cohere_api_key=api_keys["cohere_api_key"],
openrouter_api_key=api_keys["openrouter_api_key"],
)
return self.get_chat_result(output, stream, input_value, system_message)
return output

View file

@ -1,102 +1,100 @@
from typing import Optional
from langchain_google_genai import ChatGoogleGenerativeAI
from pydantic.v1 import SecretStr
from langflow.field_typing import Text, RangeSpec
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import BaseLanguageModel, Text
from langflow.inputs import BoolInput, FloatInput, IntInput, SecretStrInput, StrInput, DropdownInput
from langflow.template import Output
class GoogleGenerativeAIComponent(LCModelComponent):
display_name: str = "Google Generative AI"
description: str = "Generate text using Google Generative AI."
icon = "GoogleGenerativeAI"
field_order = [
"google_api_key",
"model",
"max_output_tokens",
"temperature",
"top_k",
"top_p",
"n",
"input_value",
"system_message",
"stream",
inputs = [
SecretStrInput(
name="google_api_key",
display_name="Google API Key",
info="The Google API Key to use for the Google Generative AI.",
),
DropdownInput(
name="model",
display_name="Model",
info="The name of the model to use.",
options=["gemini-1.5-pro", "gemini-1.5-flash"],
value="gemini-1.5-pro",
),
IntInput(
name="max_output_tokens",
display_name="Max Output Tokens",
info="The maximum number of tokens to generate.",
advanced=True,
),
FloatInput(
name="temperature",
display_name="Temperature",
info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
value=0.1,
),
IntInput(
name="top_k",
display_name="Top K",
info="Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
advanced=True,
),
FloatInput(
name="top_p",
display_name="Top P",
info="The maximum cumulative probability of tokens to consider when sampling.",
advanced=True,
),
IntInput(
name="n",
display_name="N",
info="Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
advanced=True,
),
StrInput(name="input_value", display_name="Input", info="The input to the model.", input_types=["Text", "Data", "Prompt"]),
BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True),
StrInput(
name="system_message",
display_name="System Message",
info="System message to pass to the model.",
advanced=True,
),
]
outputs = [
Output(display_name="Text", name="text_output", method="text_response"),
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def build_config(self):
return {
"google_api_key": {
"display_name": "Google API Key",
"info": "The Google API Key to use for the Google Generative AI.",
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"info": "The maximum number of tokens to generate.",
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"info": "Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
},
"top_k": {
"display_name": "Top K",
"info": "Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
"rangeSpec": RangeSpec(min=0, max=2, step=0.1),
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"info": "The maximum cumulative probability of tokens to consider when sampling.",
"advanced": True,
},
"n": {
"display_name": "N",
"info": "Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
"advanced": True,
},
"model": {
"display_name": "Model",
"info": "The name of the model to use. Supported examples: gemini-pro",
"options": ["gemini-pro", "gemini-pro-vision"],
},
"code": {
"advanced": True,
},
"input_value": {"display_name": "Input", "info": "The input to the model."},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> BaseLanguageModel:
google_api_key = self.google_api_key
model = self.model
max_output_tokens = self.max_output_tokens
temperature = self.temperature
top_k = self.top_k
top_p = self.top_p
n = self.n
def build(
self,
google_api_key: str,
model: str,
input_value: Text,
max_output_tokens: Optional[int] = None,
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
n: Optional[int] = 1,
stream: bool = False,
system_message: Optional[str] = None,
) -> Text:
output = ChatGoogleGenerativeAI(
model=model,
max_output_tokens=max_output_tokens or None, # type: ignore
max_output_tokens=max_output_tokens or None,
temperature=temperature,
top_k=top_k or None,
top_p=top_p or None, # type: ignore
top_p=top_p or None,
n=n or 1,
google_api_key=SecretStr(google_api_key),
)
return self.get_chat_result(output, stream, input_value, system_message)
return output

View file

@ -1,102 +1,106 @@
from typing import Optional
from langchain_groq import ChatGroq
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.groq_constants import MODEL_NAMES
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import BaseLanguageModel, Text
from langflow.template import Input, Output
from langflow.inputs import BoolInput, FloatInput, IntInput, SecretStrInput, StrInput, DropdownInput
from langflow.template import Output
class GroqModelComponent(LCModelComponent):
class GroqModel(LCModelComponent):
display_name: str = "Groq"
description: str = "Generate text using Groq."
icon = "Groq"
inputs = [
Input(
SecretStrInput(
name="groq_api_key",
field_type=str,
display_name="Groq API Key",
info="API key for the Groq API.",
password=True,
),
Input(
StrInput(
name="groq_api_base",
field_type=Optional[str],
display_name="Groq API Base",
advanced=True,
info="Base URL path for API requests, leave blank if not using a proxy or service emulator.",
advanced=True,
),
Input(
IntInput(
name="max_tokens",
field_type=Optional[int],
display_name="Max Output Tokens",
advanced=True,
info="The maximum number of tokens to generate.",
),
Input(
name="temperature",
field_type=float,
display_name="Temperature",
info="Run inference with this temperature. Must be in the closed interval [0.0, 1.0].",
),
Input(
name="n",
field_type=Optional[int],
display_name="N",
advanced=True,
info="Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
),
Input(
FloatInput(
name="temperature",
display_name="Temperature",
info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
value=0.1,
),
IntInput(
name="n",
display_name="N",
info="Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
advanced=True,
),
DropdownInput(
name="model_name",
field_type=str,
display_name="Model",
info="The name of the model to use. Supported examples: gemini-pro",
info="The name of the model to use.",
options=MODEL_NAMES,
),
Input(name="input_value", field_type=str, display_name="Input", input_types=["Text", "Data", "Prompt"]),
Input(name="stream", field_type=bool, display_name="Stream", advanced=True, info=STREAM_INFO_TEXT),
Input(
name="system_message",
field_type=Optional[str],
display_name="System Message",
StrInput(
name="input_value",
display_name="Input",
info="The input to the model.",
input_types=["Text", "Data", "Prompt"],
),
BoolInput(
name="stream",
display_name="Stream",
info=STREAM_INFO_TEXT,
advanced=True,
),
StrInput(
name="system_message",
display_name="System Message",
info="System message to pass to the model.",
advanced=True,
),
]
outputs = [
Output(display_name="Text", name="text_output", method="text_response"),
Output(display_name="Language Model", name="model_output", method="model_response"),
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.model_response()
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def model_response(self) -> BaseLanguageModel:
def build_model(self) -> BaseLanguageModel:
groq_api_key = self.groq_api_key
model_name = self.model_name
groq_api_base = self.groq_api_base or None
max_tokens = self.max_tokens
temperature = self.temperature
n = self.n or 1
groq_api_base = self.groq_api_base
n = self.n
stream = self.stream
output = ChatGroq(
model_name=model_name,
max_tokens=max_tokens or None, # type: ignore
max_tokens=max_tokens or None,
temperature=temperature,
groq_api_base=groq_api_base,
n=n,
n=n or 1,
groq_api_key=SecretStr(groq_api_key),
streaming=stream,
)
return output
return output

View file

@ -5,7 +5,9 @@ from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
from langflow.field_typing import BaseLanguageModel, Text
from langflow.inputs import BoolInput, DictInput, DropdownInput, SecretStrInput, StrInput
from langflow.template import Output
class HuggingFaceEndpointsComponent(LCModelComponent):
@ -13,64 +15,54 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
description: str = "Generate text using Hugging Face Inference APIs."
icon = "HuggingFace"
field_order = [
"endpoint_url",
"task",
"huggingfacehub_api_token",
"model_kwargs",
"input_value",
"system_message",
"stream",
inputs = [
StrInput(name="input_value", display_name="Input", input_types=["Text", "Data", "Prompt"]),
SecretStrInput(name="endpoint_url", display_name="Endpoint URL", password=True),
DropdownInput(
name="task",
display_name="Task",
options=["text2text-generation", "text-generation", "summarization"],
),
SecretStrInput(name="huggingfacehub_api_token", display_name="API token", password=True),
DictInput(name="model_kwargs", display_name="Model Keyword Arguments", advanced=True),
BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True),
StrInput(
name="system_message",
display_name="System Message",
info="System message to pass to the model.",
advanced=True,
),
]
def build_config(self):
return {
"endpoint_url": {"display_name": "Endpoint URL", "password": True},
"task": {
"display_name": "Task",
"options": ["text2text-generation", "text-generation", "summarization"],
},
"huggingfacehub_api_token": {"display_name": "API token", "password": True},
"model_kwargs": {
"display_name": "Model Keyword Arguments",
"field_type": "code",
"advanced": True,
},
"code": {"show": False},
"input_value": {"display_name": "Input", "input_types": ["Text", "Data", "Prompt"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
}
outputs = [
Output(display_name="Text", name="text_output", method="text_response"),
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> BaseLanguageModel:
endpoint_url = self.endpoint_url
task = self.task
huggingfacehub_api_token = self.huggingfacehub_api_token
model_kwargs = self.model_kwargs or {}
def build(
self,
input_value: Text,
endpoint_url: str,
model: Optional[str] = None,
task: str = "text2text-generation",
huggingfacehub_api_token: Optional[str] = None,
model_kwargs: Optional[dict] = None,
stream: bool = False,
system_message: Optional[str] = None,
) -> Text:
try:
llm = HuggingFaceEndpoint( # type: ignore
llm = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
task=task,
huggingfacehub_api_token=huggingfacehub_api_token,
model_kwargs=model_kwargs or {},
model=model or "",
model_kwargs=model_kwargs,
)
except Exception as e:
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
output = ChatHuggingFace(llm=llm)
return self.get_chat_result(output, stream, input_value, system_message)
return self.get_chat_result(output, stream, input_value, system_message)
return output

View file

@ -1,131 +1,103 @@
from typing import Optional
from langchain_mistralai import ChatMistralAI
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
from langflow.field_typing import BaseLanguageModel, Text
from langflow.inputs import BoolInput, FloatInput, IntInput, SecretStrInput, StrInput, DropdownInput
from langflow.template import Output
class MistralAIModelComponent(LCModelComponent):
display_name = "MistralAI"
description = "Generates text using MistralAI LLMs."
icon = "MistralAI"
field_order = [
"max_tokens",
"model_kwargs",
"model_name",
"mistral_api_base",
"mistral_api_key",
"temperature",
"input_value",
"system_message",
"stream",
inputs = [
StrInput(name="input_value", display_name="Input", input_types=["Text", "Data", "Prompt"]),
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
),
DropdownInput(
name="model_name",
display_name="Model Name",
advanced=False,
options=[
"open-mixtral-8x7b",
"open-mixtral-8x22b",
"mistral-small-latest",
"mistral-medium-latest",
"mistral-large-latest",
"codestral-latest",
],
value="codestral-latest",
),
StrInput(
name="mistral_api_base",
display_name="Mistral API Base",
advanced=True,
info=(
"The base URL of the Mistral API. Defaults to https://api.mistral.ai/v1. "
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
),
),
SecretStrInput(
name="mistral_api_key",
display_name="Mistral API Key",
info="The Mistral API Key to use for the Mistral model.",
advanced=False,
),
FloatInput(name="temperature", display_name="Temperature", advanced=False, value=0.1),
BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True),
StrInput(
name="system_message",
display_name="System Message",
info="System message to pass to the model.",
advanced=True,
),
IntInput(name="max_retries", display_name="Max Retries", advanced=True),
IntInput(name="timeout", display_name="Timeout", advanced=True),
IntInput(name="max_concurrent_requests", display_name="Max Concurrent Requests", advanced=True),
FloatInput(name="top_p", display_name="Top P", advanced=True),
IntInput(name="random_seed", display_name="Random Seed", value=1, advanced=True),
BoolInput(name="safe_mode", display_name="Safe Mode", advanced=True),
]
def build_config(self):
return {
"input_value": {"display_name": "Input", "input_types": ["Text", "Data", "Prompt"]},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"model_name": {
"display_name": "Model Name",
"advanced": False,
"options": [
"open-mistral-7b",
"open-mixtral-8x7b",
"open-mixtral-8x22b",
"mistral-small-latest",
"mistral-medium-latest",
"mistral-large-latest",
],
"value": "open-mistral-7b",
},
"mistral_api_base": {
"display_name": "Mistral API Base",
"advanced": True,
"info": (
"The base URL of the Mistral API. Defaults to https://api.mistral.ai.\n\n"
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
),
},
"mistral_api_key": {
"display_name": "Mistral API Key",
"info": "The Mistral API Key to use for the Mistral model.",
"advanced": False,
"password": True,
},
"temperature": {
"display_name": "Temperature",
"advanced": False,
"value": 0.1,
},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
"max_retries": {
"display_name": "Max Retries",
"advanced": True,
},
"timeout": {
"display_name": "Timeout",
"advanced": True,
},
"max_concurrent_requests": {
"display_name": "Max Concurrent Requests",
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"advanced": True,
},
"random_seed": {
"display_name": "Random Seed",
"advanced": True,
},
"safe_mode": {
"display_name": "Safe Mode",
"advanced": True,
},
}
outputs = [
Output(display_name="Text", name="text_output", method="text_response"),
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build_model(self) -> BaseLanguageModel:
mistral_api_key = self.mistral_api_key
temperature = self.temperature
model_name = self.model_name
max_tokens = self.max_tokens
mistral_api_base = self.mistral_api_base or "https://api.mistral.ai/v1"
max_retries = self.max_retries
timeout = self.timeout
max_concurrent_requests = self.max_concurrent_requests
top_p = self.top_p
random_seed = self.random_seed
safe_mode = self.safe_mode
def build(
self,
input_value: Text,
mistral_api_key: str,
model_name: str,
temperature: float = 0.1,
max_tokens: Optional[int] = 256,
mistral_api_base: Optional[str] = None,
stream: bool = False,
system_message: Optional[str] = None,
max_retries: int = 5,
timeout: int = 120,
max_concurrent_requests: int = 64,
top_p: float = 1,
random_seed: Optional[int] = None,
safe_mode: bool = False,
) -> Text:
if not mistral_api_base:
mistral_api_base = "https://api.mistral.ai"
if mistral_api_key:
api_key = SecretStr(mistral_api_key)
else:
api_key = None
chat_model = ChatMistralAI(
output = ChatMistralAI(
max_tokens=max_tokens or None,
model_name=model_name,
endpoint=mistral_api_base,
@ -139,4 +111,4 @@ class MistralAIModelComponent(LCModelComponent):
safe_mode=safe_mode,
)
return self.get_chat_result(chat_model, stream, input_value, system_message)
return output