refactor: remove model_specs

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-06-22 15:37:34 -03:00
commit 6b7dcaaef6
18 changed files with 0 additions and 1672 deletions

View file

@ -6,7 +6,6 @@ __all__ = [
"experimental",
"inputs",
"memories",
"model_specs",
"outputs",
"retrievers",
"textsplitters",

View file

@ -1,62 +0,0 @@
from typing import Optional
from langchain_community.llms.bedrock import Bedrock
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class AmazonBedrockComponent(CustomComponent):
display_name: str = "Amazon Bedrock"
description: str = "LLM model from Amazon Bedrock."
icon = "Amazon"
def build_config(self):
return {
"model_id": {
"display_name": "Model Id",
"options": [
"ai21.j2-grande-instruct",
"ai21.j2-jumbo-instruct",
"ai21.j2-mid",
"ai21.j2-mid-v1",
"ai21.j2-ultra",
"ai21.j2-ultra-v1",
"anthropic.claude-instant-v1",
"anthropic.claude-v1",
"anthropic.claude-v2",
"cohere.command-text-v14",
],
},
"credentials_profile_name": {"display_name": "Credentials Profile Name"},
"streaming": {"display_name": "Streaming", "field_type": "bool"},
"endpoint_url": {"display_name": "Endpoint URL"},
"region_name": {"display_name": "Region Name"},
"model_kwargs": {"display_name": "Model Kwargs"},
"cache": {"display_name": "Cache"},
"code": {"advanced": True},
}
def build(
self,
model_id: str = "anthropic.claude-instant-v1",
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
model_kwargs: Optional[dict] = None,
endpoint_url: Optional[str] = None,
streaming: bool = False,
cache: Optional[bool] = None,
) -> LanguageModel:
try:
output = Bedrock(
credentials_profile_name=credentials_profile_name,
model_id=model_id,
region_name=region_name,
model_kwargs=model_kwargs,
endpoint_url=endpoint_url,
streaming=streaming,
cache=cache,
) # type: ignore
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e
return output

View file

@ -1,75 +0,0 @@
from typing import Optional
from langchain_anthropic import ChatAnthropic
from pydantic.v1 import SecretStr
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class ChatAntropicSpecsComponent(CustomComponent):
display_name: str = "Anthropic"
description: str = "Anthropic Chat&Completion large language models."
icon = "Anthropic"
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"options": [
"claude-2.1",
"claude-2.0",
"claude-instant-1.2",
"claude-instant-1",
# Add more models as needed
],
"info": "https://python.langchain.com/docs/integrations/chat/anthropic",
"required": True,
"value": "claude-2.1",
},
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"required": True,
"password": True,
"info": "Your Anthropic API key.",
},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.7,
},
"api_endpoint": {
"display_name": "API Endpoint",
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
}
def build(
self,
model: str,
anthropic_api_key: Optional[str] = None,
max_tokens: Optional[int] = 1000,
temperature: Optional[float] = None,
api_endpoint: Optional[str] = None,
) -> LanguageModel:
# Set default API endpoint if not provided
if not api_endpoint:
api_endpoint = "https://api.anthropic.com"
try:
output = ChatAnthropic(
model_name=model,
anthropic_api_key=SecretStr(anthropic_api_key) if anthropic_api_key else None,
max_tokens_to_sample=max_tokens, # type: ignore
temperature=temperature,
anthropic_api_url=api_endpoint,
)
except Exception as e:
raise ValueError("Could not connect to Anthropic API.") from e
return output

View file

@ -1,101 +0,0 @@
from typing import Optional
from langchain_openai import AzureChatOpenAI
from pydantic.v1 import SecretStr
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class AzureChatOpenAISpecsComponent(CustomComponent):
display_name: str = "AzureChatOpenAI"
description: str = "LLM model from Azure OpenAI."
documentation: str = "https://python.langchain.com/docs/integrations/llms/azure_openai"
beta = False
icon = "Azure"
AZURE_OPENAI_MODELS = [
"gpt-35-turbo",
"gpt-35-turbo-16k",
"gpt-35-turbo-instruct",
"gpt-4",
"gpt-4-32k",
"gpt-4-vision",
]
AZURE_OPENAI_API_VERSIONS = [
"2023-03-15-preview",
"2023-05-15",
"2023-06-01-preview",
"2023-07-01-preview",
"2023-08-01-preview",
"2023-09-01-preview",
"2023-12-01-preview",
]
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"value": self.AZURE_OPENAI_MODELS[0],
"options": self.AZURE_OPENAI_MODELS,
"required": True,
},
"azure_endpoint": {
"display_name": "Azure Endpoint",
"required": True,
"info": "Your Azure endpoint, including the resource.. Example: `https://example-resource.azure.openai.com/`",
},
"azure_deployment": {
"display_name": "Deployment Name",
"required": True,
},
"api_version": {
"display_name": "API Version",
"options": self.AZURE_OPENAI_API_VERSIONS,
"value": self.AZURE_OPENAI_API_VERSIONS[-1],
"required": True,
"advanced": True,
},
"api_key": {"display_name": "API Key", "required": True, "password": True},
"temperature": {
"display_name": "Temperature",
"value": 0.7,
"field_type": "float",
"required": False,
},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"code": {"show": False},
}
def build(
self,
model: str,
azure_endpoint: str,
azure_deployment: str,
api_key: str,
api_version: str,
temperature: float = 0.7,
max_tokens: Optional[int] = 1000,
) -> LanguageModel:
if api_key:
azure_api_key = SecretStr(api_key)
else:
azure_api_key = None
try:
llm = AzureChatOpenAI(
model=model,
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
api_version=api_version,
api_key=azure_api_key,
temperature=temperature,
max_tokens=max_tokens or None,
)
except Exception as e:
raise ValueError("Could not connect to AzureOpenAI API.") from e
return llm

View file

@ -1,95 +0,0 @@
from typing import Optional
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from pydantic.v1 import SecretStr
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class QianfanChatEndpointComponent(CustomComponent):
display_name: str = "QianfanChatEndpoint"
description: str = (
"Baidu Qianfan chat models. Get more detail from "
"https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint."
)
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"options": [
"ERNIE-Bot",
"ERNIE-Bot-turbo",
"BLOOMZ-7B",
"Llama-2-7b-chat",
"Llama-2-13b-chat",
"Llama-2-70b-chat",
"Qianfan-BLOOMZ-7B-compressed",
"Qianfan-Chinese-Llama-2-7B",
"ChatGLM2-6B-32K",
"AquilaChat-7B",
],
"info": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint",
"required": True,
},
"qianfan_ak": {
"display_name": "Qianfan Ak",
"required": True,
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
"qianfan_sk": {
"display_name": "Qianfan Sk",
"required": True,
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
"top_p": {
"display_name": "Top p",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 0.8,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 0.95,
},
"penalty_score": {
"display_name": "Penalty Score",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 1.0,
},
"endpoint": {
"display_name": "Endpoint",
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
},
"code": {"show": False},
}
def build(
self,
model: str = "ERNIE-Bot-turbo",
qianfan_ak: Optional[str] = None,
qianfan_sk: Optional[str] = None,
top_p: Optional[float] = None,
temperature: Optional[float] = None,
penalty_score: Optional[float] = None,
endpoint: Optional[str] = None,
) -> LanguageModel:
try:
output = QianfanChatEndpoint( # type: ignore
model=model,
qianfan_ak=SecretStr(qianfan_ak) if qianfan_ak else None,
qianfan_sk=SecretStr(qianfan_sk) if qianfan_sk else None,
top_p=top_p,
temperature=temperature,
penalty_score=penalty_score,
endpoint=endpoint,
)
except Exception as e:
raise ValueError("Could not connect to Baidu Qianfan API.") from e
return output # type: ignore

View file

@ -1,94 +0,0 @@
from typing import Optional
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class QianfanLLMEndpointComponent(CustomComponent):
display_name: str = "QianfanLLMEndpoint"
description: str = (
"Baidu Qianfan hosted open source or customized models. "
"Get more detail from https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint"
)
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"options": [
"ERNIE-Bot",
"ERNIE-Bot-turbo",
"BLOOMZ-7B",
"Llama-2-7b-chat",
"Llama-2-13b-chat",
"Llama-2-70b-chat",
"Qianfan-BLOOMZ-7B-compressed",
"Qianfan-Chinese-Llama-2-7B",
"ChatGLM2-6B-32K",
"AquilaChat-7B",
],
"info": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint",
"required": True,
},
"qianfan_ak": {
"display_name": "Qianfan Ak",
"required": True,
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
"qianfan_sk": {
"display_name": "Qianfan Sk",
"required": True,
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
"top_p": {
"display_name": "Top p",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 0.8,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 0.95,
},
"penalty_score": {
"display_name": "Penalty Score",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 1.0,
},
"endpoint": {
"display_name": "Endpoint",
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
},
"code": {"show": False},
}
def build(
self,
model: str = "ERNIE-Bot-turbo",
qianfan_ak: Optional[str] = None,
qianfan_sk: Optional[str] = None,
top_p: Optional[float] = None,
temperature: Optional[float] = None,
penalty_score: Optional[float] = None,
endpoint: Optional[str] = None,
) -> LanguageModel:
try:
output = QianfanLLMEndpoint( # type: ignore
model=model,
qianfan_ak=qianfan_ak,
qianfan_sk=qianfan_sk,
top_p=top_p,
temperature=temperature,
penalty_score=penalty_score,
endpoint=endpoint,
)
except Exception as e:
raise ValueError("Could not connect to Baidu Qianfan API.") from e
return output # type: ignore

View file

@ -1,165 +0,0 @@
from typing import Any, Dict, Optional
from langchain_community.chat_models.litellm import ChatLiteLLM, ChatLiteLLMException
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class ChatLiteLLMComponent(CustomComponent):
display_name = "ChatLiteLLM"
description = "`LiteLLM` collection of large language models."
documentation = "https://python.langchain.com/docs/integrations/chat/litellm"
def build_config(self):
return {
"model": {
"display_name": "Model name",
"field_type": "str",
"advanced": False,
"required": True,
"info": "The name of the model to use. For example, `gpt-3.5-turbo`.",
},
"api_key": {
"display_name": "API key",
"field_type": "str",
"advanced": False,
"required": False,
"password": True,
},
"provider": {
"display_name": "Provider",
"info": "The provider of the API key.",
"options": [
"OpenAI",
"Azure",
"Anthropic",
"Replicate",
"Cohere",
"OpenRouter",
],
},
"streaming": {
"display_name": "Streaming",
"field_type": "bool",
"advanced": True,
"required": False,
"default": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"advanced": False,
"required": False,
"default": 0.7,
},
"model_kwargs": {
"display_name": "Model kwargs",
"field_type": "dict",
"advanced": True,
"required": False,
"default": {},
},
"top_p": {
"display_name": "Top p",
"field_type": "float",
"advanced": True,
"required": False,
},
"top_k": {
"display_name": "Top k",
"field_type": "int",
"advanced": True,
"required": False,
},
"n": {
"display_name": "N",
"field_type": "int",
"advanced": True,
"required": False,
"info": "Number of chat completions to generate for each prompt. "
"Note that the API may not return the full n completions if duplicates are generated.",
"default": 1,
},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"max_retries": {
"display_name": "Max retries",
"field_type": "int",
"advanced": True,
"required": False,
"default": 6,
},
"verbose": {
"display_name": "Verbose",
"field_type": "bool",
"advanced": True,
"required": False,
"default": False,
},
}
def build(
self,
model: str,
provider: str,
api_key: Optional[str] = None,
streaming: bool = True,
temperature: Optional[float] = 0.7,
model_kwargs: Optional[Dict[str, Any]] = {},
top_p: Optional[float] = None,
top_k: Optional[int] = None,
n: int = 1,
max_tokens: int = 256,
max_retries: int = 6,
verbose: bool = False,
) -> LanguageModel:
try:
import litellm # type: ignore
litellm.drop_params = True
litellm.set_verbose = verbose
except ImportError:
raise ChatLiteLLMException(
"Could not import litellm python package. " "Please install it with `pip install litellm`"
)
provider_map = {
"OpenAI": "openai_api_key",
"Azure": "azure_api_key",
"Anthropic": "anthropic_api_key",
"Replicate": "replicate_api_key",
"Cohere": "cohere_api_key",
"OpenRouter": "openrouter_api_key",
}
# Set the API key based on the provider
api_keys: dict[str, Optional[str]] = {v: None for v in provider_map.values()}
if variable_name := provider_map.get(provider):
api_keys[variable_name] = api_key
else:
raise ChatLiteLLMException(
f"Provider {provider} is not supported. Supported providers are: {', '.join(provider_map.keys())}"
)
LLM = ChatLiteLLM(
model=model,
client=None,
streaming=streaming,
temperature=temperature,
model_kwargs=model_kwargs if model_kwargs is not None else {},
top_p=top_p,
top_k=top_k,
n=n,
max_tokens=max_tokens,
max_retries=max_retries,
openai_api_key=api_keys["openai_api_key"],
azure_api_key=api_keys["azure_api_key"],
anthropic_api_key=api_keys["anthropic_api_key"],
replicate_api_key=api_keys["replicate_api_key"],
cohere_api_key=api_keys["cohere_api_key"],
openrouter_api_key=api_keys["openrouter_api_key"],
)
return LLM

View file

@ -1,87 +0,0 @@
from typing import Optional
from langchain_mistralai import ChatMistralAI
from pydantic.v1 import SecretStr
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class MistralAIModelComponent(CustomComponent):
display_name: str = "MistralAI"
description: str = "Generate text using MistralAI LLMs."
icon = "MistralAI"
field_order = [
"model",
"mistral_api_key",
"max_tokens",
"temperature",
"mistral_api_base",
]
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"options": [
"open-mistral-7b",
"open-mixtral-8x7b",
"open-mixtral-8x22b",
"mistral-small-latest",
"mistral-medium-latest",
"mistral-large-latest",
],
"info": "Name of the model to use.",
"required": True,
"value": "open-mistral-7b",
},
"mistral_api_key": {
"display_name": "Mistral API Key",
"required": True,
"password": True,
"info": "Your Mistral API key.",
},
"max_tokens": {
"display_name": "Max Tokens",
"field_type": "int",
"advanced": True,
"value": 256,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.1,
},
"mistral_api_base": {
"display_name": "Mistral API Base",
"advanced": True,
"info": "Endpoint of the Mistral API. Defaults to 'https://api.mistral.ai' if not specified.",
},
"code": {"show": False},
}
def build(
self,
model: str,
temperature: float = 0.1,
mistral_api_key: Optional[str] = None,
max_tokens: Optional[int] = None,
mistral_api_base: Optional[str] = None,
) -> LanguageModel:
# Set default API endpoint if not provided
if not mistral_api_base:
mistral_api_base = "https://api.mistral.ai"
try:
output = ChatMistralAI(
model_name=model,
api_key=(SecretStr(mistral_api_key) if mistral_api_key else None),
max_tokens=max_tokens or None,
temperature=temperature,
endpoint=mistral_api_base,
)
except Exception as e:
raise ValueError("Could not connect to Mistral API.") from e
return output

View file

@ -1,251 +0,0 @@
from typing import Dict, List, Optional
# from langchain_community.chat_models import ChatOllama
from langchain_community.chat_models import ChatOllama
from langchain_core.language_models.chat_models import BaseChatModel
# from langchain.chat_models import ChatOllama
from langflow.custom import CustomComponent
# from langchain.callbacks.manager import CallbackManager
class ChatOllamaComponent(CustomComponent):
display_name = "ChatOllama"
description = "Local LLM for chat with Ollama."
def build_config(self) -> dict:
return {
"base_url": {
"display_name": "Base URL",
"info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
},
"model": {
"display_name": "Model Name",
"value": "llama2",
"info": "Refer to https://ollama.ai/library for more models.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.8,
"info": "Controls the creativity of model responses.",
},
"cache": {
"display_name": "Cache",
"field_type": "bool",
"info": "Enable or disable caching.",
"advanced": True,
"value": False,
},
### When a callback component is added to Langflow, the comment must be uncommented. ###
# "callback_manager": {
# "display_name": "Callback Manager",
# "info": "Optional callback manager for additional functionality.",
# "advanced": True,
# },
# "callbacks": {
# "display_name": "Callbacks",
# "info": "Callbacks to execute during model runtime.",
# "advanced": True,
# },
########################################################################################
"format": {
"display_name": "Format",
"field_type": "str",
"info": "Specify the format of the output (e.g., json).",
"advanced": True,
},
"metadata": {
"display_name": "Metadata",
"info": "Metadata to add to the run trace.",
"advanced": True,
},
"mirostat": {
"display_name": "Mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"value": "Disabled",
"advanced": True,
},
"mirostat_eta": {
"display_name": "Mirostat Eta",
"field_type": "float",
"info": "Learning rate for Mirostat algorithm. (Default: 0.1)",
"advanced": True,
},
"mirostat_tau": {
"display_name": "Mirostat Tau",
"field_type": "float",
"info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)",
"advanced": True,
},
"num_ctx": {
"display_name": "Context Window Size",
"field_type": "int",
"info": "Size of the context window for generating tokens. (Default: 2048)",
"advanced": True,
},
"num_gpu": {
"display_name": "Number of GPUs",
"field_type": "int",
"info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
"advanced": True,
},
"num_thread": {
"display_name": "Number of Threads",
"field_type": "int",
"info": "Number of threads to use during computation. (Default: detected for optimal performance)",
"advanced": True,
},
"repeat_last_n": {
"display_name": "Repeat Last N",
"field_type": "int",
"info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
"advanced": True,
},
"repeat_penalty": {
"display_name": "Repeat Penalty",
"field_type": "float",
"info": "Penalty for repetitions in generated text. (Default: 1.1)",
"advanced": True,
},
"tfs_z": {
"display_name": "TFS Z",
"field_type": "float",
"info": "Tail free sampling value. (Default: 1)",
"advanced": True,
},
"timeout": {
"display_name": "Timeout",
"field_type": "int",
"info": "Timeout for the request stream.",
"advanced": True,
},
"top_k": {
"display_name": "Top K",
"field_type": "int",
"info": "Limits token selection to top K. (Default: 40)",
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"field_type": "float",
"info": "Works together with top-k. (Default: 0.9)",
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"field_type": "bool",
"info": "Whether to print out response text.",
},
"tags": {
"display_name": "Tags",
"field_type": "list",
"info": "Tags to add to the run trace.",
"advanced": True,
},
"stop": {
"display_name": "Stop Tokens",
"field_type": "list",
"info": "List of tokens to signal the model to stop generating text.",
"advanced": True,
},
"system": {
"display_name": "System",
"field_type": "str",
"info": "System to use for generating text.",
"advanced": True,
},
"template": {
"display_name": "Template",
"field_type": "str",
"info": "Template to use for generating text.",
"advanced": True,
},
}
def build(
self,
base_url: Optional[str],
model: str,
mirostat: Optional[str],
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
### When a callback component is added to Langflow, the comment must be uncommented.###
# callback_manager: Optional[CallbackManager] = None,
# callbacks: Optional[List[Callbacks]] = None,
#######################################################################################
repeat_last_n: Optional[int] = None,
verbose: Optional[bool] = None,
cache: Optional[bool] = None,
num_ctx: Optional[int] = None,
num_gpu: Optional[int] = None,
format: Optional[str] = None,
metadata: Optional[Dict] = None,
num_thread: Optional[int] = None,
repeat_penalty: Optional[float] = None,
stop: Optional[List[str]] = None,
system: Optional[str] = None,
tags: Optional[List[str]] = None,
temperature: Optional[float] = None,
template: Optional[str] = None,
tfs_z: Optional[float] = None,
timeout: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[int] = None,
) -> BaseChatModel:
if not base_url:
base_url = "http://localhost:11434"
# Mapping mirostat settings to their corresponding values
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
# Default to 0 for 'Disabled'
mirostat_value = mirostat_options.get(mirostat, 0) # type: ignore
# Set mirostat_eta and mirostat_tau to None if mirostat is disabled
if mirostat_value == 0:
mirostat_eta = None
mirostat_tau = None
# Mapping system settings to their corresponding values
llm_params = {
"base_url": base_url,
"cache": cache,
"model": model,
"mirostat": mirostat_value,
"format": format,
"metadata": metadata,
"tags": tags,
## When a callback component is added to Langflow, the comment must be uncommented.##
# "callback_manager": callback_manager,
# "callbacks": callbacks,
#####################################################################################
"mirostat_eta": mirostat_eta,
"mirostat_tau": mirostat_tau,
"num_ctx": num_ctx,
"num_gpu": num_gpu,
"num_thread": num_thread,
"repeat_last_n": repeat_last_n,
"repeat_penalty": repeat_penalty,
"temperature": temperature,
"stop": stop,
"system": system,
"template": template,
"tfs_z": tfs_z,
"timeout": timeout,
"top_k": top_k,
"top_p": top_p,
"verbose": verbose,
}
# None Value remove
llm_params = {k: v for k, v in llm_params.items() if v is not None}
try:
output = ChatOllama(**llm_params) # type: ignore
except Exception as e:
raise ValueError("Could not initialize Ollama LLM.") from e
return output # type: ignore

View file

@ -1,74 +0,0 @@
from typing import Optional
from langchain_openai import ChatOpenAI
from pydantic.v1 import SecretStr
from langflow.base.models.openai_constants import MODEL_NAMES
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel, NestedDict
class ChatOpenAIComponent(CustomComponent):
display_name = "ChatOpenAI"
description = "`OpenAI` Chat large language models API."
icon = "OpenAI"
def build_config(self):
return {
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"model_kwargs": {
"display_name": "Model Kwargs",
"advanced": True,
"required": False,
},
"model_name": {"display_name": "Model Name", "advanced": False, "options": MODEL_NAMES},
"openai_api_base": {
"display_name": "OpenAI API Base",
"advanced": True,
"required": False,
"info": (
"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\n"
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
),
},
"openai_api_key": {
"display_name": "OpenAI API Key",
"advanced": False,
"required": False,
"password": True,
},
"temperature": {
"display_name": "Temperature",
"advanced": False,
"required": False,
"value": 0.7,
},
}
def build(
self,
max_tokens: Optional[int] = 0,
model_kwargs: NestedDict = {},
model_name: str = "gpt-3.5-turbo",
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = None,
temperature: float = 0.7,
) -> LanguageModel:
if not openai_api_base:
openai_api_base = "https://api.openai.com/v1"
if openai_api_key:
api_key = SecretStr(openai_api_key)
else:
api_key = None
return ChatOpenAI(
max_tokens=max_tokens or None,
model_kwargs=model_kwargs,
model=model_name,
base_url=openai_api_base,
api_key=api_key,
temperature=temperature,
)

View file

@ -1,86 +0,0 @@
from typing import Optional
from langchain_community.chat_models.vertexai import ChatVertexAI
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class ChatVertexAIComponent(CustomComponent):
display_name = "ChatVertexAI"
description = "`Vertex AI` Chat large language models API."
icon = "VertexAI"
def build_config(self):
return {
"credentials": {
"display_name": "Credentials",
"field_type": "file",
"file_types": ["json"],
"file_path": None,
},
"examples": {
"display_name": "Examples",
"multiline": True,
},
"location": {
"display_name": "Location",
"value": "us-central1",
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"value": 128,
"advanced": True,
},
"model_name": {
"display_name": "Model Name",
"value": "chat-bison",
},
"project": {
"display_name": "Project",
},
"temperature": {
"display_name": "Temperature",
"value": 0.0,
},
"top_k": {
"display_name": "Top K",
"value": 40,
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"value": 0.95,
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"value": False,
"advanced": True,
},
}
def build(
self,
credentials: Optional[str],
project: str,
location: str = "us-central1",
max_output_tokens: int = 128,
model_name: str = "chat-bison",
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
verbose: bool = False,
) -> LanguageModel:
return ChatVertexAI(
credentials=credentials,
location=location,
max_output_tokens=max_output_tokens,
model_name=model_name,
project=project,
temperature=temperature,
top_k=top_k,
top_p=top_p,
verbose=verbose,
)

View file

@ -1,37 +0,0 @@
from typing import Optional
from langchain_cohere import ChatCohere
from langflow.field_typing import LanguageModel
from pydantic.v1 import SecretStr
from langflow.custom import CustomComponent
class CohereComponent(CustomComponent):
display_name = "Cohere"
description = "Cohere large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
icon = "Cohere"
def build_config(self):
return {
"cohere_api_key": {"display_name": "Cohere API Key", "type": "password", "password": True},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"temperature": {"display_name": "Temperature", "default": 0.75, "type": "float", "show": True},
}
def build(
self,
cohere_api_key: str,
max_tokens: Optional[int] = 256,
temperature: float = 0.75,
) -> LanguageModel:
if cohere_api_key:
api_key = SecretStr(cohere_api_key)
else:
api_key = None
return ChatCohere(cohere_api_key=api_key, max_tokens=max_tokens or None, temperature=temperature) # type: ignore

View file

@ -1,74 +0,0 @@
from typing import Optional
from langchain_google_genai import ChatGoogleGenerativeAI # type: ignore
from pydantic.v1.types import SecretStr
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel, RangeSpec
class GoogleGenerativeAIComponent(CustomComponent):
display_name: str = "Google Generative AI"
description: str = "A component that uses Google Generative AI to generate text."
documentation: str = "http://docs.langflow.org/components/custom"
icon = "Google"
def build_config(self):
return {
"google_api_key": {
"display_name": "Google API Key",
"info": "The Google API Key to use for the Google Generative AI.",
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"info": "The maximum number of tokens to generate.",
},
"temperature": {
"display_name": "Temperature",
"info": "Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
},
"top_k": {
"display_name": "Top K",
"info": "Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
"rangeSpec": RangeSpec(min=0, max=2, step=0.1),
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"info": "The maximum cumulative probability of tokens to consider when sampling.",
"advanced": True,
},
"n": {
"display_name": "N",
"info": "Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
"advanced": True,
},
"model": {
"display_name": "Model",
"info": "The name of the model to use. Supported examples: gemini-pro",
"options": ["gemini-pro", "gemini-pro-vision"],
},
"code": {
"advanced": True,
},
}
def build(
self,
google_api_key: str,
model: str,
max_output_tokens: Optional[int] = None,
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
n: Optional[int] = 1,
) -> LanguageModel:
return ChatGoogleGenerativeAI(
model=model,
max_output_tokens=max_output_tokens or None, # type: ignore
temperature=temperature,
top_k=top_k or None,
top_p=top_p or None, # type: ignore
n=n or 1,
google_api_key=SecretStr(google_api_key),
)

View file

@ -1,86 +0,0 @@
from typing import Optional
from langchain_groq import ChatGroq
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.groq_constants import MODEL_NAMES
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
class GroqModelSpecs(LCModelComponent):
display_name: str = "Groq"
description: str = "Generate text using Groq."
icon = "Groq"
field_order = [
"groq_api_key",
"model",
"max_output_tokens",
"temperature",
"top_k",
"top_p",
"n",
"input_value",
"system_message",
"stream",
]
def build_config(self):
return {
"groq_api_key": {
"display_name": "Groq API Key",
"info": "API key for the Groq API.",
"password": True,
},
"groq_api_base": {
"display_name": "Groq API Base",
"info": "Base URL path for API requests, leave blank if not using a proxy or service emulator.",
"advanced": True,
},
"max_tokens": {
"display_name": "Max Output Tokens",
"info": "The maximum number of tokens to generate.",
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"info": "Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
},
"n": {
"display_name": "N",
"info": "Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
"advanced": True,
},
"model_name": {
"display_name": "Model",
"info": "The name of the model to use. Supported examples: gemini-pro",
"options": MODEL_NAMES,
},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
}
def build(
self,
groq_api_key: str,
model_name: str,
groq_api_base: Optional[str] = None,
max_tokens: Optional[int] = None,
temperature: float = 0.1,
n: Optional[int] = 1,
stream: bool = False,
) -> LanguageModel:
return ChatGroq(
model_name=model_name,
max_tokens=max_tokens or None, # type: ignore
temperature=temperature,
groq_api_base=groq_api_base,
n=n or 1,
groq_api_key=SecretStr(groq_api_key),
streaming=stream,
)

View file

@ -1,45 +0,0 @@
from typing import Optional
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class HuggingFaceEndpointsComponent(CustomComponent):
display_name: str = "Hugging Face Inference API"
description: str = "LLM model from Hugging Face Inference API."
icon = "HuggingFace"
def build_config(self):
return {
"endpoint_url": {"display_name": "Endpoint URL", "password": True},
"task": {
"display_name": "Task",
"options": ["text2text-generation", "text-generation", "summarization"],
},
"huggingfacehub_api_token": {"display_name": "API token", "password": True},
"model_kwargs": {
"display_name": "Model Keyword Arguments",
"field_type": "code",
},
"code": {"show": False},
}
def build(
self,
endpoint_url: str,
task: str = "text2text-generation",
huggingfacehub_api_token: Optional[str] = None,
model_kwargs: Optional[dict] = None,
) -> LanguageModel:
try:
output = HuggingFaceEndpoint( # type: ignore
endpoint_url=endpoint_url,
task=task,
huggingfacehub_api_token=huggingfacehub_api_token,
model_kwargs=model_kwargs or {},
)
except Exception as e:
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
return output

View file

@ -1,158 +0,0 @@
from typing import List, Optional
from langchain_community.llms.ollama import Ollama
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class OllamaLLM(CustomComponent):
display_name = "Ollama"
description = "Local LLM with Ollama."
def build_config(self) -> dict:
return {
"base_url": {
"display_name": "Base URL",
"info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
},
"model": {
"display_name": "Model Name",
"value": "llama2",
"info": "Refer to https://ollama.ai/library for more models.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.8,
"info": "Controls the creativity of model responses.",
},
"mirostat": {
"display_name": "Mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"value": "Disabled",
"advanced": True,
},
"mirostat_eta": {
"display_name": "Mirostat Eta",
"field_type": "float",
"info": "Learning rate influencing the algorithm's response to feedback.",
"advanced": True,
},
"mirostat_tau": {
"display_name": "Mirostat Tau",
"field_type": "float",
"info": "Controls balance between coherence and diversity.",
"advanced": True,
},
"num_ctx": {
"display_name": "Context Window Size",
"field_type": "int",
"info": "Size of the context window for generating the next token.",
"advanced": True,
},
"num_gpu": {
"display_name": "Number of GPUs",
"field_type": "int",
"info": "Number of GPUs to use for computation.",
"advanced": True,
},
"num_thread": {
"display_name": "Number of Threads",
"field_type": "int",
"info": "Number of threads to use during computation.",
"advanced": True,
},
"repeat_last_n": {
"display_name": "Repeat Last N",
"field_type": "int",
"info": "Sets how far back the model looks to prevent repetition.",
"advanced": True,
},
"repeat_penalty": {
"display_name": "Repeat Penalty",
"field_type": "float",
"info": "Penalty for repetitions in generated text.",
"advanced": True,
},
"stop": {
"display_name": "Stop Tokens",
"info": "List of tokens to signal the model to stop generating text.",
"advanced": True,
},
"tfs_z": {
"display_name": "TFS Z",
"field_type": "float",
"info": "Tail free sampling to reduce impact of less probable tokens.",
"advanced": True,
},
"top_k": {
"display_name": "Top K",
"field_type": "int",
"info": "Limits token selection to top K for reducing nonsense generation.",
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"field_type": "int",
"info": "Works with top-k to control diversity of generated text.",
"advanced": True,
},
}
def build(
self,
base_url: Optional[str],
model: str,
temperature: Optional[float],
mirostat: Optional[str],
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
num_ctx: Optional[int] = None,
num_gpu: Optional[int] = None,
num_thread: Optional[int] = None,
repeat_last_n: Optional[int] = None,
repeat_penalty: Optional[float] = None,
stop: Optional[List[str]] = None,
tfs_z: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[int] = None,
) -> LanguageModel:
if not base_url:
base_url = "http://localhost:11434"
# Mapping mirostat settings to their corresponding values
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
# Default to 0 for 'Disabled'
mirostat_value = mirostat_options.get(mirostat, 0) # type: ignore
# Set mirostat_eta and mirostat_tau to None if mirostat is disabled
if mirostat_value == 0:
mirostat_eta = None
mirostat_tau = None
try:
llm = Ollama(
base_url=base_url,
model=model,
mirostat=mirostat_value,
mirostat_eta=mirostat_eta,
mirostat_tau=mirostat_tau,
num_ctx=num_ctx,
num_gpu=num_gpu,
num_thread=num_thread,
repeat_last_n=repeat_last_n,
repeat_penalty=repeat_penalty,
temperature=temperature,
stop=stop,
tfs_z=tfs_z,
top_k=top_k,
top_p=top_p,
)
except Exception as e:
raise ValueError("Could not connect to Ollama.") from e
return llm

View file

@ -1,151 +0,0 @@
from typing import Dict, Optional
from langchain_community.llms.vertexai import VertexAI
from langflow.custom import CustomComponent
from langflow.field_typing import LanguageModel
class VertexAIComponent(CustomComponent):
display_name = "VertexAI"
description = "Google Vertex AI large language models"
icon = "VertexAI"
def build_config(self):
return {
"credentials": {
"display_name": "Credentials",
"field_type": "file",
"file_types": ["json"],
"required": False,
"value": None,
},
"location": {
"display_name": "Location",
"type": "str",
"advanced": True,
"value": "us-central1",
"required": False,
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"field_type": "int",
"value": 128,
"required": False,
"advanced": True,
},
"max_retries": {
"display_name": "Max Retries",
"type": "int",
"value": 6,
"required": False,
"advanced": True,
},
"metadata": {
"display_name": "Metadata",
"field_type": "dict",
"required": False,
"default": {},
},
"model_name": {
"display_name": "Model Name",
"type": "str",
"value": "text-bison",
"required": False,
},
"n": {
"advanced": True,
"display_name": "N",
"field_type": "int",
"value": 1,
"required": False,
},
"project": {
"display_name": "Project",
"type": "str",
"required": False,
"default": None,
},
"request_parallelism": {
"display_name": "Request Parallelism",
"field_type": "int",
"value": 5,
"required": False,
"advanced": True,
},
"streaming": {
"display_name": "Streaming",
"field_type": "bool",
"value": False,
"required": False,
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.0,
"required": False,
"advanced": True,
},
"top_k": {"display_name": "Top K", "type": "int", "default": 40, "required": False, "advanced": True},
"top_p": {
"display_name": "Top P",
"field_type": "float",
"value": 0.95,
"required": False,
"advanced": True,
},
"tuned_model_name": {
"display_name": "Tuned Model Name",
"type": "str",
"required": False,
"value": None,
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"field_type": "bool",
"value": False,
"required": False,
},
"name": {"display_name": "Name", "field_type": "str"},
}
def build(
self,
credentials: Optional[str] = None,
location: str = "us-central1",
max_output_tokens: int = 128,
max_retries: int = 6,
metadata: Dict = {},
model_name: str = "text-bison",
n: int = 1,
name: Optional[str] = None,
project: Optional[str] = None,
request_parallelism: int = 5,
streaming: bool = False,
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
tuned_model_name: Optional[str] = None,
verbose: bool = False,
) -> LanguageModel:
return VertexAI(
credentials=credentials,
location=location,
max_output_tokens=max_output_tokens,
max_retries=max_retries,
metadata=metadata,
model_name=model_name,
n=n,
name=name,
project=project,
request_parallelism=request_parallelism,
streaming=streaming,
temperature=temperature,
top_k=top_k,
top_p=top_p,
tuned_model_name=tuned_model_name,
verbose=verbose,
)

View file

@ -1,30 +0,0 @@
from .AmazonBedrockSpecs import AmazonBedrockComponent
from .BaiduQianfanChatEndpointsSpecs import QianfanChatEndpointComponent
from .BaiduQianfanLLMEndpointsSpecs import QianfanLLMEndpointComponent
from .ChatLiteLLMSpecs import ChatLiteLLMComponent
from .ChatOllamaEndpointSpecs import ChatOllamaComponent
from .ChatOpenAISpecs import ChatOpenAIComponent
from .ChatVertexAISpecs import ChatVertexAIComponent
from .CohereSpecs import CohereComponent
from .GoogleGenerativeAISpecs import GoogleGenerativeAIComponent
from .HuggingFaceEndpointsSpecs import HuggingFaceEndpointsComponent
from .OllamaLLMSpecs import OllamaLLM
from .VertexAISpecs import VertexAIComponent
__all__ = [
"AmazonBedrockComponent",
"AzureChatOpenAISpecsComponent",
"QianfanChatEndpointComponent",
"QianfanLLMEndpointComponent",
"AnthropicLLM",
"ChatLiteLLMComponent",
"ChatOllamaComponent",
"ChatOpenAIComponent",
"ChatVertexAIComponent",
"CohereComponent",
"GoogleGenerativeAIComponent",
"HuggingFaceEndpointsComponent",
"OllamaLLM",
"VertexAIComponent",
]