added the new llms on run mode
This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-02-19 20:57:58 -03:00 committed by GitHub
commit 977e72bb9b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 1071 additions and 38 deletions

View file

@ -2,7 +2,7 @@ from typing import Optional
from langchain_google_genai import ChatGoogleGenerativeAI # type: ignore
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, RangeSpec, TemplateField
from langflow.field_typing import BaseLanguageModel, RangeSpec
from pydantic.v1.types import SecretStr
@ -13,42 +13,42 @@ class GoogleGenerativeAIComponent(CustomComponent):
def build_config(self):
return {
"google_api_key": TemplateField(
display_name="Google API Key",
info="The Google API Key to use for the Google Generative AI.",
),
"max_output_tokens": TemplateField(
display_name="Max Output Tokens",
info="The maximum number of tokens to generate.",
),
"temperature": TemplateField(
display_name="Temperature",
info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
),
"top_k": TemplateField(
display_name="Top K",
info="Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
range_spec=RangeSpec(min=0, max=2, step=0.1),
advanced=True,
),
"top_p": TemplateField(
display_name="Top P",
info="The maximum cumulative probability of tokens to consider when sampling.",
advanced=True,
),
"n": TemplateField(
display_name="N",
info="Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
advanced=True,
),
"model": TemplateField(
display_name="Model",
info="The name of the model to use. Supported examples: gemini-pro",
options=["gemini-pro", "gemini-pro-vision"],
),
"code": TemplateField(
advanced=True,
),
"google_api_key": {
"display_name": "Google API Key",
"info": "The Google API Key to use for the Google Generative AI.",
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"info": "The maximum number of tokens to generate.",
},
"temperature": {
"display_name": "Temperature",
"info": "Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
},
"top_k": {
"display_name": "Top K",
"info": "Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
"range_spec": RangeSpec(min=0, max=2, step=0.1),
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"info": "The maximum cumulative probability of tokens to consider when sampling.",
"advanced": True,
},
"n": {
"display_name": "N",
"info": "Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
"advanced": True,
},
"model": {
"display_name": "Model",
"info": "The name of the model to use. Supported examples: gemini-pro",
"options": ["gemini-pro", "gemini-pro-vision"],
},
"code": {
"advanced": True,
},
}
def build(

View file

@ -0,0 +1,66 @@
from typing import Optional
from langchain_community.chat_models.bedrock import BedrockChat
from langflow.field_typing import Text
from langflow import CustomComponent
class AmazonBedrockComponent(CustomComponent):
display_name: str = "Amazon Bedrock model"
description: str = "Generate text using LLM model from Amazon Bedrock."
def build_config(self):
return {
"model_id": {
"display_name": "Model Id",
"options": [
"ai21.j2-grande-instruct",
"ai21.j2-jumbo-instruct",
"ai21.j2-mid",
"ai21.j2-mid-v1",
"ai21.j2-ultra",
"ai21.j2-ultra-v1",
"anthropic.claude-instant-v1",
"anthropic.claude-v1",
"anthropic.claude-v2",
"cohere.command-text-v14",
],
},
"credentials_profile_name": {"display_name": "Credentials Profile Name"},
"streaming": {"display_name": "Streaming", "field_type": "bool"},
"endpoint_url": {"display_name": "Endpoint URL"},
"region_name": {"display_name": "Region Name"},
"model_kwargs": {"display_name": "Model Kwargs"},
"cache": {"display_name": "Cache"},
"code": {"advanced": True},
"inputs": {"display_name": "Input"},
}
def build(
self,
inputs: str,
model_id: str = "anthropic.claude-instant-v1",
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
model_kwargs: Optional[dict] = None,
endpoint_url: Optional[str] = None,
streaming: bool = False,
cache: Optional[bool] = None,
) -> Text:
try:
output = BedrockChat(
credentials_profile_name=credentials_profile_name,
model_id=model_id,
region_name=region_name,
model_kwargs=model_kwargs,
endpoint_url=endpoint_url,
streaming=streaming,
cache=cache,
) # type: ignore
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -0,0 +1,81 @@
from typing import Optional
from langchain_community.chat_models.anthropic import ChatAnthropic
from pydantic.v1 import SecretStr
from langflow.field_typing import Text
from langflow import CustomComponent
class AnthropicLLM(CustomComponent):
display_name: str = "Anthropic model"
description: str = "Generate text using Anthropic Chat&Completion large language models."
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"options": [
"claude-2.1",
"claude-2.0",
"claude-instant-1.2",
"claude-instant-1",
# Add more models as needed
],
"info": "https://python.langchain.com/docs/integrations/chat/anthropic",
"required": True,
"value": "claude-2.1",
},
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"required": True,
"password": True,
"info": "Your Anthropic API key.",
},
"max_tokens": {
"display_name": "Max Tokens",
"field_type": "int",
"value": 256,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.7,
},
"api_endpoint": {
"display_name": "API Endpoint",
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
"inputs": {"display_name": "Input"},
}
def build(
self,
model: str,
inputs:str,
anthropic_api_key: Optional[str] = None,
max_tokens: Optional[int] = None,
temperature: Optional[float] = None,
api_endpoint: Optional[str] = None,
) -> Text:
# Set default API endpoint if not provided
if not api_endpoint:
api_endpoint = "https://api.anthropic.com"
try:
output = ChatAnthropic(
model_name=model,
anthropic_api_key=SecretStr(anthropic_api_key) if anthropic_api_key else None,
max_tokens_to_sample=max_tokens, # type: ignore
temperature=temperature,
anthropic_api_url=api_endpoint,
)
except Exception as e:
raise ValueError("Could not connect to Anthropic API.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -0,0 +1,104 @@
from typing import Optional
from langflow import CustomComponent
from langchain.llms.base import BaseLanguageModel
from langchain_openai import AzureChatOpenAI
class AzureChatOpenAIComponent(CustomComponent):
display_name: str = "AzureOpenAI model"
description: str = "Generate text using LLM model from Azure OpenAI."
documentation: str = (
"https://python.langchain.com/docs/integrations/llms/azure_openai"
)
beta = False
AZURE_OPENAI_MODELS = [
"gpt-35-turbo",
"gpt-35-turbo-16k",
"gpt-35-turbo-instruct",
"gpt-4",
"gpt-4-32k",
"gpt-4-vision",
]
AZURE_OPENAI_API_VERSIONS = [
"2023-03-15-preview",
"2023-05-15",
"2023-06-01-preview",
"2023-07-01-preview",
"2023-08-01-preview",
"2023-09-01-preview",
"2023-12-01-preview",
]
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"value": self.AZURE_OPENAI_MODELS[0],
"options": self.AZURE_OPENAI_MODELS,
"required": True,
},
"azure_endpoint": {
"display_name": "Azure Endpoint",
"required": True,
"info": "Your Azure endpoint, including the resource.. Example: `https://example-resource.azure.openai.com/`",
},
"azure_deployment": {
"display_name": "Deployment Name",
"required": True,
},
"api_version": {
"display_name": "API Version",
"options": self.AZURE_OPENAI_API_VERSIONS,
"value": self.AZURE_OPENAI_API_VERSIONS[-1],
"required": True,
"advanced": True,
},
"api_key": {"display_name": "API Key", "required": True, "password": True},
"temperature": {
"display_name": "Temperature",
"value": 0.7,
"field_type": "float",
"required": False,
},
"max_tokens": {
"display_name": "Max Tokens",
"value": 1000,
"required": False,
"field_type": "int",
"advanced": True,
"info": "Maximum number of tokens to generate.",
},
"code": {"show": False},
"inputs": {"display_name": "Input"},
"inputs": {"display_name": "Input"},
}
def build(
self,
model: str,
azure_endpoint: str,
inputs: str,
azure_deployment: str,
api_key: str,
api_version: str,
temperature: float = 0.7,
max_tokens: Optional[int] = 1000,
) -> BaseLanguageModel:
try:
output = AzureChatOpenAI(
model=model,
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
api_version=api_version,
api_key=api_key,
temperature=temperature,
max_tokens=max_tokens,
)
except Exception as e:
raise ValueError("Could not connect to AzureOpenAI API.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -0,0 +1,100 @@
from typing import Optional
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from pydantic.v1 import SecretStr
from langflow import CustomComponent
from langflow.field_typing import Text
class QianfanChatEndpointComponent(CustomComponent):
display_name: str = "QianfanChat Model"
description: str = (
"Generate text using Baidu Qianfan chat models. Get more detail from "
"https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint."
)
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"options": [
"ERNIE-Bot",
"ERNIE-Bot-turbo",
"BLOOMZ-7B",
"Llama-2-7b-chat",
"Llama-2-13b-chat",
"Llama-2-70b-chat",
"Qianfan-BLOOMZ-7B-compressed",
"Qianfan-Chinese-Llama-2-7B",
"ChatGLM2-6B-32K",
"AquilaChat-7B",
],
"info": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint",
"required": True,
},
"qianfan_ak": {
"display_name": "Qianfan Ak",
"required": True,
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
"qianfan_sk": {
"display_name": "Qianfan Sk",
"required": True,
"password": True,
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
},
"top_p": {
"display_name": "Top p",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 0.8,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 0.95,
},
"penalty_score": {
"display_name": "Penalty Score",
"field_type": "float",
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
"value": 1.0,
},
"endpoint": {
"display_name": "Endpoint",
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
},
"code": {"show": False},
"inputs": {"display_name": "Input"},
}
def build(
self,
inputs: str,
model: str = "ERNIE-Bot-turbo",
qianfan_ak: Optional[str] = None,
qianfan_sk: Optional[str] = None,
top_p: Optional[float] = None,
temperature: Optional[float] = None,
penalty_score: Optional[float] = None,
endpoint: Optional[str] = None,
) -> Text:
try:
output = QianfanChatEndpoint( # type: ignore
model=model,
qianfan_ak=SecretStr(qianfan_ak) if qianfan_ak else None,
qianfan_sk=SecretStr(qianfan_sk) if qianfan_sk else None,
top_p=top_p,
temperature=temperature,
penalty_score=penalty_score,
endpoint=endpoint,
)
except Exception as e:
raise ValueError("Could not connect to Baidu Qianfan API.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -0,0 +1,39 @@
from typing import Dict, Optional
from langchain_community.llms.ctransformers import CTransformers
from langflow.field_typing import Text
from langflow import CustomComponent
class CTransformersComponent(CustomComponent):
display_name = "CTransformers model"
description = "Generate text using CTransformers LLM models"
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
def build_config(self):
return {
"model": {"display_name": "Model", "required": True},
"model_file": {
"display_name": "Model File",
"required": False,
"field_type": "file",
"file_types": [".bin"],
},
"model_type": {"display_name": "Model Type", "required": True},
"config": {
"display_name": "Config",
"advanced": True,
"required": False,
"field_type": "dict",
"value": '{"top_k":40,"top_p":0.95,"temperature":0.8,"repetition_penalty":1.1,"last_n_tokens":64,"seed":-1,"max_new_tokens":256,"stop":"","stream":"False","reset":"True","batch_size":8,"threads":-1,"context_length":-1,"gpu_layers":0}',
},
"inputs": {"display_name": "Input"},
}
def build(self, model: str, model_file: str,inputs:str, model_type: str, config: Optional[Dict] = None) -> Text:
output = CTransformers(model=model, model_file=model_file, model_type=model_type, config=config)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -0,0 +1,30 @@
from langchain_community.chat_models.cohere import ChatCohere
from langflow import CustomComponent
from langflow.field_typing import Text
class CohereComponent(CustomComponent):
display_name = "Cohere model"
description = "Generate text using Cohere large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
def build_config(self):
return {
"cohere_api_key": {"display_name": "Cohere API Key", "type": "password", "password": True},
"max_tokens": {"display_name": "Max Tokens", "default": 256, "type": "int", "show": True},
"temperature": {"display_name": "Temperature", "default": 0.75, "type": "float", "show": True},
"inputs": {"display_name": "Input"},
}
def build(
self,
cohere_api_key: str,
inputs: str,
max_tokens: int = 256,
temperature: float = 0.75,
) -> Text:
output = ChatCohere(cohere_api_key=cohere_api_key, max_tokens=max_tokens, temperature=temperature)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -0,0 +1,79 @@
from typing import Optional
from langchain_google_genai import ChatGoogleGenerativeAI # type: ignore
from langflow import CustomComponent
from langflow.field_typing import RangeSpec
from pydantic.v1.types import SecretStr
from langflow.field_typing import Text
class GoogleGenerativeAIComponent(CustomComponent):
display_name: str = "Google Generative AI model"
description: str = "Generate text using Google Generative AI to generate text."
documentation: str = "http://docs.langflow.org/components/custom"
def build_config(self):
return {
"google_api_key":
{ "display_name":"Google API Key",
"info":"The Google API Key to use for the Google Generative AI.",
} ,
"max_output_tokens":{
"display_name":"Max Output Tokens",
"info":"The maximum number of tokens to generate.",
},
"temperature": {
"display_name":"Temperature",
"info":"Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
},
"top_k": {
"display_name":"Top K",
"info":"Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
"range_spec":RangeSpec(min=0, max=2, step=0.1),
"advanced":True,
},
"top_p": {
"display_name":"Top P",
"info":"The maximum cumulative probability of tokens to consider when sampling.",
"advanced":True,
},
"n": {
"display_name":"N",
"info":"Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
"advanced":True,
},
"model": {
"display_name":"Model",
"info":"The name of the model to use. Supported examples: gemini-pro",
"options":["gemini-pro", "gemini-pro-vision"],
},
"code": {
"advanced":True,
},
"inputs": {"display_name": "Input"},
}
def build(
self,
google_api_key: str,
model: str,
inputs:str,
max_output_tokens: Optional[int] = None,
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
n: Optional[int] = 1,
) -> Text:
output = ChatGoogleGenerativeAI(
model=model,
max_output_tokens=max_output_tokens or None, # type: ignore
temperature=temperature,
top_k=top_k or None,
top_p=top_p or None, # type: ignore
n=n or 1,
google_api_key=SecretStr(google_api_key),
)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -0,0 +1,50 @@
from typing import Optional
from langflow import CustomComponent
from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langflow.field_typing import Text
class HuggingFaceEndpointsComponent(CustomComponent):
display_name: str = "Hugging Face Inference API models"
description: str = "Generate text using LLM model from Hugging Face Inference API."
def build_config(self):
return {
"endpoint_url": {"display_name": "Endpoint URL", "password": True},
"task": {
"display_name": "Task",
"options": ["text2text-generation", "text-generation", "summarization"],
},
"huggingfacehub_api_token": {"display_name": "API token", "password": True},
"model_kwargs": {
"display_name": "Model Keyword Arguments",
"field_type": "code",
},
"code": {"show": False},
"inputs": {"display_name": "Input"},
}
def build(
self,
inputs: str,
endpoint_url: str,
task: str = "text2text-generation",
huggingfacehub_api_token: Optional[str] = None,
model_kwargs: Optional[dict] = None,
) -> Text:
try:
llm = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
task=task,
huggingfacehub_api_token=huggingfacehub_api_token,
model_kwargs=model_kwargs,
)
except Exception as e:
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
output = ChatHuggingFace(llm=llm)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -0,0 +1,136 @@
from typing import Optional, List, Dict, Any
from langflow import CustomComponent
from langchain_community.llms.llamacpp import LlamaCpp
from langflow.field_typing import Text
class LlamaCppComponent(CustomComponent):
display_name = "LlamaCpp model"
description = "Generate text using llama.cpp model."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
def build_config(self):
return {
"grammar": {"display_name": "Grammar", "advanced": True},
"cache": {"display_name": "Cache", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"echo": {"display_name": "Echo", "advanced": True},
"f16_kv": {"display_name": "F16 KV", "advanced": True},
"grammar_path": {"display_name": "Grammar Path", "advanced": True},
"last_n_tokens_size": {"display_name": "Last N Tokens Size", "advanced": True},
"logits_all": {"display_name": "Logits All", "advanced": True},
"logprobs": {"display_name": "Logprobs", "advanced": True},
"lora_base": {"display_name": "Lora Base", "advanced": True},
"lora_path": {"display_name": "Lora Path", "advanced": True},
"max_tokens": {"display_name": "Max Tokens", "advanced": True},
"metadata": {"display_name": "Metadata", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"model_path": {
"display_name": "Model Path",
"field_type": "file",
"file_types": [".bin"],
"required": True,
},
"n_batch": {"display_name": "N Batch", "advanced": True},
"n_ctx": {"display_name": "N Ctx", "advanced": True},
"n_gpu_layers": {"display_name": "N GPU Layers", "advanced": True},
"n_parts": {"display_name": "N Parts", "advanced": True},
"n_threads": {"display_name": "N Threads", "advanced": True},
"repeat_penalty": {"display_name": "Repeat Penalty", "advanced": True},
"rope_freq_base": {"display_name": "Rope Freq Base", "advanced": True},
"rope_freq_scale": {"display_name": "Rope Freq Scale", "advanced": True},
"seed": {"display_name": "Seed", "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "advanced": True},
"suffix": {"display_name": "Suffix", "advanced": True},
"tags": {"display_name": "Tags", "advanced": True},
"temperature": {"display_name": "Temperature"},
"top_k": {"display_name": "Top K", "advanced": True},
"top_p": {"display_name": "Top P", "advanced": True},
"use_mlock": {"display_name": "Use Mlock", "advanced": True},
"use_mmap": {"display_name": "Use Mmap", "advanced": True},
"verbose": {"display_name": "Verbose", "advanced": True},
"vocab_only": {"display_name": "Vocab Only", "advanced": True},
"inputs": {"display_name": "Input"},
}
def build(
self,
model_path: str,
inputs:str,
grammar: Optional[str] = None,
cache: Optional[bool] = None,
client: Optional[Any] = None,
echo: Optional[bool] = False,
f16_kv: bool = True,
grammar_path: Optional[str] = None,
last_n_tokens_size: Optional[int] = 64,
logits_all: bool = False,
logprobs: Optional[int] = None,
lora_base: Optional[str] = None,
lora_path: Optional[str] = None,
max_tokens: Optional[int] = 256,
metadata: Optional[Dict] = None,
model_kwargs: Dict = {},
n_batch: Optional[int] = 8,
n_ctx: int = 512,
n_gpu_layers: Optional[int] = 1,
n_parts: int = -1,
n_threads: Optional[int] = 1,
repeat_penalty: Optional[float] = 1.1,
rope_freq_base: float = 10000.0,
rope_freq_scale: float = 1.0,
seed: int = -1,
stop: Optional[List[str]] = [],
streaming: bool = True,
suffix: Optional[str] = "",
tags: Optional[List[str]] = [],
temperature: Optional[float] = 0.8,
top_k: Optional[int] = 40,
top_p: Optional[float] = 0.95,
use_mlock: bool = False,
use_mmap: Optional[bool] = True,
verbose: bool = True,
vocab_only: bool = False,
) -> Text:
output = LlamaCpp(
model_path=model_path,
grammar=grammar,
cache=cache,
client=client,
echo=echo,
f16_kv=f16_kv,
grammar_path=grammar_path,
last_n_tokens_size=last_n_tokens_size,
logits_all=logits_all,
logprobs=logprobs,
lora_base=lora_base,
lora_path=lora_path,
max_tokens=max_tokens,
metadata=metadata,
model_kwargs=model_kwargs,
n_batch=n_batch,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_parts=n_parts,
n_threads=n_threads,
repeat_penalty=repeat_penalty,
rope_freq_base=rope_freq_base,
rope_freq_scale=rope_freq_scale,
seed=seed,
stop=stop,
streaming=streaming,
suffix=suffix,
tags=tags,
temperature=temperature,
top_k=top_k,
top_p=top_p,
use_mlock=use_mlock,
use_mmap=use_mmap,
verbose=verbose,
vocab_only=vocab_only,
)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -0,0 +1,256 @@
from typing import Any, Dict, List, Optional
# from langchain_community.chat_models import ChatOllama
from langchain_community.chat_models import ChatOllama
# from langchain.chat_models import ChatOllama
from langflow import CustomComponent
from langflow.field_typing import Text
# whe When a callback component is added to Langflow, the comment must be uncommented.
# from langchain.callbacks.manager import CallbackManager
class ChatOllamaComponent(CustomComponent):
display_name = "ChatOllama model"
description = "Generate text using Local LLM for chat with Ollama."
def build_config(self) -> dict:
return {
"base_url": {
"display_name": "Base URL",
"info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
},
"model": {
"display_name": "Model Name",
"value": "llama2",
"info": "Refer to https://ollama.ai/library for more models.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.8,
"info": "Controls the creativity of model responses.",
},
"cache": {
"display_name": "Cache",
"field_type": "bool",
"info": "Enable or disable caching.",
"advanced": True,
"value": False,
},
### When a callback component is added to Langflow, the comment must be uncommented. ###
# "callback_manager": {
# "display_name": "Callback Manager",
# "info": "Optional callback manager for additional functionality.",
# "advanced": True,
# },
# "callbacks": {
# "display_name": "Callbacks",
# "info": "Callbacks to execute during model runtime.",
# "advanced": True,
# },
########################################################################################
"format": {
"display_name": "Format",
"field_type": "str",
"info": "Specify the format of the output (e.g., json).",
"advanced": True,
},
"metadata": {
"display_name": "Metadata",
"info": "Metadata to add to the run trace.",
"advanced": True,
},
"mirostat": {
"display_name": "Mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"value": "Disabled",
"advanced": True,
},
"mirostat_eta": {
"display_name": "Mirostat Eta",
"field_type": "float",
"info": "Learning rate for Mirostat algorithm. (Default: 0.1)",
"advanced": True,
},
"mirostat_tau": {
"display_name": "Mirostat Tau",
"field_type": "float",
"info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)",
"advanced": True,
},
"num_ctx": {
"display_name": "Context Window Size",
"field_type": "int",
"info": "Size of the context window for generating tokens. (Default: 2048)",
"advanced": True,
},
"num_gpu": {
"display_name": "Number of GPUs",
"field_type": "int",
"info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
"advanced": True,
},
"num_thread": {
"display_name": "Number of Threads",
"field_type": "int",
"info": "Number of threads to use during computation. (Default: detected for optimal performance)",
"advanced": True,
},
"repeat_last_n": {
"display_name": "Repeat Last N",
"field_type": "int",
"info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
"advanced": True,
},
"repeat_penalty": {
"display_name": "Repeat Penalty",
"field_type": "float",
"info": "Penalty for repetitions in generated text. (Default: 1.1)",
"advanced": True,
},
"tfs_z": {
"display_name": "TFS Z",
"field_type": "float",
"info": "Tail free sampling value. (Default: 1)",
"advanced": True,
},
"timeout": {
"display_name": "Timeout",
"field_type": "int",
"info": "Timeout for the request stream.",
"advanced": True,
},
"top_k": {
"display_name": "Top K",
"field_type": "int",
"info": "Limits token selection to top K. (Default: 40)",
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"field_type": "float",
"info": "Works together with top-k. (Default: 0.9)",
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"field_type": "bool",
"info": "Whether to print out response text.",
},
"tags": {
"display_name": "Tags",
"field_type": "list",
"info": "Tags to add to the run trace.",
"advanced": True,
},
"stop": {
"display_name": "Stop Tokens",
"field_type": "list",
"info": "List of tokens to signal the model to stop generating text.",
"advanced": True,
},
"system": {
"display_name": "System",
"field_type": "str",
"info": "System to use for generating text.",
"advanced": True,
},
"template": {
"display_name": "Template",
"field_type": "str",
"info": "Template to use for generating text.",
"advanced": True,
},
"inputs": {"display_name": "Input"},
}
def build(
self,
base_url: Optional[str],
model: str,
inputs:str,
mirostat: Optional[str],
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
### When a callback component is added to Langflow, the comment must be uncommented.###
# callback_manager: Optional[CallbackManager] = None,
# callbacks: Optional[List[Callbacks]] = None,
#######################################################################################
repeat_last_n: Optional[int] = None,
verbose: Optional[bool] = None,
cache: Optional[bool] = None,
num_ctx: Optional[int] = None,
num_gpu: Optional[int] = None,
format: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
num_thread: Optional[int] = None,
repeat_penalty: Optional[float] = None,
stop: Optional[List[str]] = None,
system: Optional[str] = None,
tags: Optional[List[str]] = None,
temperature: Optional[float] = None,
template: Optional[str] = None,
tfs_z: Optional[float] = None,
timeout: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[int] = None,
) -> Text:
if not base_url:
base_url = "http://localhost:11434"
# Mapping mirostat settings to their corresponding values
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
# Default to 0 for 'Disabled'
mirostat_value = mirostat_options.get(mirostat, 0) # type: ignore
# Set mirostat_eta and mirostat_tau to None if mirostat is disabled
if mirostat_value == 0:
mirostat_eta = None
mirostat_tau = None
# Mapping system settings to their corresponding values
llm_params = {
"base_url": base_url,
"cache": cache,
"model": model,
"mirostat": mirostat_value,
"format": format,
"metadata": metadata,
"tags": tags,
## When a callback component is added to Langflow, the comment must be uncommented.##
# "callback_manager": callback_manager,
# "callbacks": callbacks,
#####################################################################################
"mirostat_eta": mirostat_eta,
"mirostat_tau": mirostat_tau,
"num_ctx": num_ctx,
"num_gpu": num_gpu,
"num_thread": num_thread,
"repeat_last_n": repeat_last_n,
"repeat_penalty": repeat_penalty,
"temperature": temperature,
"stop": stop,
"system": system,
"template": template,
"tfs_z": tfs_z,
"timeout": timeout,
"top_k": top_k,
"top_p": top_p,
"verbose": verbose,
}
# None Value remove
llm_params = {k: v for k, v in llm_params.items() if v is not None}
try:
output = ChatOllama(**llm_params) # type: ignore
except Exception as e:
raise ValueError("Could not initialize Ollama LLM.") from e
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -1,6 +1,6 @@
from typing import Optional
from langchain_community.chat_models.openai import ChatOpenAI
from langchain_openai import ChatOpenAI
from langflow import CustomComponent
from langflow.field_typing import NestedDict, Text

View file

@ -0,0 +1,92 @@
from typing import List, Optional
from langchain_google_vertexai import ChatVertexAI
from langchain_core.messages.base import BaseMessage
from langflow import CustomComponent
from langflow.field_typing import Text
class ChatVertexAIComponent(CustomComponent):
display_name = "ChatVertexAI model"
description = "Generate text using Vertex AI Chat large language models API."
def build_config(self):
return {
"credentials": {
"display_name": "Credentials",
"field_type": "file",
"file_types": [".json"],
"file_path": None,
},
"examples": {
"display_name": "Examples",
"multiline": True,
},
"location": {
"display_name": "Location",
"value": "us-central1",
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"value": 128,
"advanced": True,
},
"model_name": {
"display_name": "Model Name",
"value": "chat-bison",
},
"project": {
"display_name": "Project",
},
"temperature": {
"display_name": "Temperature",
"value": 0.0,
},
"top_k": {
"display_name": "Top K",
"value": 40,
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"value": 0.95,
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"value": False,
"advanced": True,
},
"inputs": {"display_name": "Input"},
}
def build(
self,
inputs:str,
credentials: Optional[str],
project: str,
examples: Optional[List[BaseMessage]] = [],
location: str = "us-central1",
max_output_tokens: int = 128,
model_name: str = "chat-bison",
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
verbose: bool = False,
) -> Text:
output = ChatVertexAI(
credentials=credentials,
examples=examples,
location=location,
max_output_tokens=max_output_tokens,
model_name=model_name,
project=project,
temperature=temperature,
top_k=top_k,
top_p=top_p,
verbose=verbose,
)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result