Update LLMS references to Models and Specs
This commit is contained in:
parent
d9d2545d9b
commit
4a6e15533e
35 changed files with 71 additions and 34 deletions
|
|
@ -1,13 +1,13 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_community.chat_models.bedrock import BedrockChat
|
||||
from langflow.field_typing import Text
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Text
|
||||
|
||||
|
||||
class AmazonBedrockComponent(CustomComponent):
|
||||
display_name: str = "Amazon Bedrock model"
|
||||
display_name: str = "Amazon Bedrock Model"
|
||||
description: str = "Generate text using LLM model from Amazon Bedrock."
|
||||
|
||||
def build_config(self):
|
||||
|
|
@ -2,14 +2,13 @@ from typing import Optional
|
|||
|
||||
from langchain_community.chat_models.anthropic import ChatAnthropic
|
||||
from pydantic.v1 import SecretStr
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Text
|
||||
|
||||
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class AnthropicLLM(CustomComponent):
|
||||
display_name: str = "Anthropic model"
|
||||
display_name: str = "AnthropicModel"
|
||||
description: str = "Generate text using Anthropic Chat&Completion large language models."
|
||||
|
||||
def build_config(self):
|
||||
|
|
@ -67,7 +66,7 @@ class AnthropicLLM(CustomComponent):
|
|||
try:
|
||||
output = ChatAnthropic(
|
||||
model_name=model,
|
||||
anthropic_api_key=SecretStr(anthropic_api_key) if anthropic_api_key else None,
|
||||
anthropic_api_key=(SecretStr(anthropic_api_key) if anthropic_api_key else None),
|
||||
max_tokens_to_sample=max_tokens, # type: ignore
|
||||
temperature=temperature,
|
||||
anthropic_api_url=api_endpoint,
|
||||
|
|
@ -7,7 +7,7 @@ from langflow import CustomComponent
|
|||
|
||||
|
||||
class AzureChatOpenAIComponent(CustomComponent):
|
||||
display_name: str = "AzureOpenAI model"
|
||||
display_name: str = "AzureOpenAIModel"
|
||||
description: str = "Generate text using LLM model from Azure OpenAI."
|
||||
documentation: str = "https://python.langchain.com/docs/integrations/llms/azure_openai"
|
||||
beta = False
|
||||
|
|
@ -1,13 +1,13 @@
|
|||
from typing import Dict, Optional
|
||||
|
||||
from langchain_community.llms.ctransformers import CTransformers
|
||||
from langflow.field_typing import Text
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Text
|
||||
|
||||
|
||||
class CTransformersComponent(CustomComponent):
|
||||
display_name = "CTransformers model"
|
||||
display_name = "CTransformersModel"
|
||||
description = "Generate text using CTransformers LLM models"
|
||||
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
|
||||
|
||||
|
|
@ -31,7 +31,14 @@ class CTransformersComponent(CustomComponent):
|
|||
"inputs": {"display_name": "Input"},
|
||||
}
|
||||
|
||||
def build(self, model: str, model_file: str, inputs: str, model_type: str, config: Optional[Dict] = None) -> Text:
|
||||
def build(
|
||||
self,
|
||||
model: str,
|
||||
model_file: str,
|
||||
inputs: str,
|
||||
model_type: str,
|
||||
config: Optional[Dict] = None,
|
||||
) -> Text:
|
||||
output = CTransformers(model=model, model_file=model_file, model_type=model_type, config=config)
|
||||
message = output.invoke(inputs)
|
||||
result = message.content if hasattr(message, "content") else message
|
||||
|
|
@ -1,18 +1,33 @@
|
|||
from langchain_community.chat_models.cohere import ChatCohere
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Text
|
||||
|
||||
|
||||
class CohereComponent(CustomComponent):
|
||||
display_name = "Cohere model"
|
||||
display_name = "CohereModel"
|
||||
description = "Generate text using Cohere large language models."
|
||||
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"cohere_api_key": {"display_name": "Cohere API Key", "type": "password", "password": True},
|
||||
"max_tokens": {"display_name": "Max Tokens", "default": 256, "type": "int", "show": True},
|
||||
"temperature": {"display_name": "Temperature", "default": 0.75, "type": "float", "show": True},
|
||||
"cohere_api_key": {
|
||||
"display_name": "Cohere API Key",
|
||||
"type": "password",
|
||||
"password": True,
|
||||
},
|
||||
"max_tokens": {
|
||||
"display_name": "Max Tokens",
|
||||
"default": 256,
|
||||
"type": "int",
|
||||
"show": True,
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"default": 0.75,
|
||||
"type": "float",
|
||||
"show": True,
|
||||
},
|
||||
"inputs": {"display_name": "Input"},
|
||||
}
|
||||
|
||||
|
|
@ -23,8 +38,13 @@ class CohereComponent(CustomComponent):
|
|||
max_tokens: int = 256,
|
||||
temperature: float = 0.75,
|
||||
) -> Text:
|
||||
output = ChatCohere(cohere_api_key=cohere_api_key, max_tokens=max_tokens, temperature=temperature)
|
||||
output = ChatCohere(
|
||||
cohere_api_key=cohere_api_key,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
)
|
||||
message = output.invoke(inputs)
|
||||
result = message.content if hasattr(message, "content") else message
|
||||
self.status = result
|
||||
return result
|
||||
return result
|
||||
|
|
@ -1,14 +1,14 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI # type: ignore
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import RangeSpec
|
||||
from pydantic.v1.types import SecretStr
|
||||
from langflow.field_typing import Text
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import RangeSpec, Text
|
||||
|
||||
|
||||
class GoogleGenerativeAIComponent(CustomComponent):
|
||||
display_name: str = "Google Generative AI model"
|
||||
display_name: str = "Google Generative AIModel"
|
||||
description: str = "Generate text using Google Generative AI to generate text."
|
||||
documentation: str = "http://docs.langflow.org/components/custom"
|
||||
|
||||
|
|
@ -1,11 +1,13 @@
|
|||
from typing import Optional, List, Dict, Any
|
||||
from langflow import CustomComponent
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain_community.llms.llamacpp import LlamaCpp
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Text
|
||||
|
||||
|
||||
class LlamaCppComponent(CustomComponent):
|
||||
display_name = "LlamaCpp model"
|
||||
display_name = "LlamaCppModel"
|
||||
description = "Generate text using llama.cpp model."
|
||||
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
|
||||
|
||||
|
|
@ -17,7 +19,10 @@ class LlamaCppComponent(CustomComponent):
|
|||
"echo": {"display_name": "Echo", "advanced": True},
|
||||
"f16_kv": {"display_name": "F16 KV", "advanced": True},
|
||||
"grammar_path": {"display_name": "Grammar Path", "advanced": True},
|
||||
"last_n_tokens_size": {"display_name": "Last N Tokens Size", "advanced": True},
|
||||
"last_n_tokens_size": {
|
||||
"display_name": "Last N Tokens Size",
|
||||
"advanced": True,
|
||||
},
|
||||
"logits_all": {"display_name": "Logits All", "advanced": True},
|
||||
"logprobs": {"display_name": "Logprobs", "advanced": True},
|
||||
"lora_base": {"display_name": "Lora Base", "advanced": True},
|
||||
|
|
@ -134,3 +139,5 @@ class LlamaCppComponent(CustomComponent):
|
|||
result = message.content if hasattr(message, "content") else message
|
||||
self.status = result
|
||||
return result
|
||||
self.status = result
|
||||
return result
|
||||
|
|
@ -12,7 +12,7 @@ from langflow.field_typing import Text
|
|||
|
||||
|
||||
class ChatOllamaComponent(CustomComponent):
|
||||
display_name = "ChatOllama model"
|
||||
display_name = "ChatOllamaModel"
|
||||
description = "Generate text using Local LLM for chat with Ollama."
|
||||
|
||||
def build_config(self) -> dict:
|
||||
|
|
@ -7,7 +7,7 @@ from langflow.field_typing import Text
|
|||
|
||||
|
||||
class ChatVertexAIComponent(CustomComponent):
|
||||
display_name = "ChatVertexAI model"
|
||||
display_name = "ChatVertexAIModel"
|
||||
description = "Generate text using Vertex AI Chat large language models API."
|
||||
|
||||
def build_config(self):
|
||||
|
|
@ -67,7 +67,7 @@ class LLMVertex(StatelessVertex):
|
|||
class_built_object = None
|
||||
|
||||
def __init__(self, data: Dict, graph, params: Optional[Dict] = None):
|
||||
super().__init__(data, graph=graph, base_type="llms", params=params)
|
||||
super().__init__(data, graph=graph, base_type="models", params=params)
|
||||
self.steps: List[Callable] = [self._custom_build]
|
||||
|
||||
async def _custom_build(self, *args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ def import_by_type(_type: str, name: str) -> Any:
|
|||
func_dict = {
|
||||
"agents": import_agent,
|
||||
"prompts": import_prompt,
|
||||
"llms": {"llm": import_llm, "chat": import_chat_llm},
|
||||
"models": {"llm": import_llm, "chat": import_chat_llm},
|
||||
"tools": import_tool,
|
||||
"chains": import_chain,
|
||||
"toolkits": import_toolkit,
|
||||
|
|
@ -50,7 +50,7 @@ def import_by_type(_type: str, name: str) -> Any:
|
|||
"retrievers": import_retriever,
|
||||
"custom_components": import_custom_component,
|
||||
}
|
||||
if _type == "llms":
|
||||
if _type == "models":
|
||||
key = "chat" if "chat" in name.lower() else "llm"
|
||||
loaded_func = func_dict[_type][key] # type: ignore
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,11 @@ from langflow.interface.custom.utils import get_function
|
|||
from langflow.interface.custom_lists import CUSTOM_NODES
|
||||
from langflow.interface.importing.utils import import_by_type
|
||||
from langflow.interface.initialize.llm import initialize_vertexai
|
||||
from langflow.interface.initialize.utils import handle_format_kwargs, handle_node_type, handle_partial_variables
|
||||
from langflow.interface.initialize.utils import (
|
||||
handle_format_kwargs,
|
||||
handle_node_type,
|
||||
handle_partial_variables,
|
||||
)
|
||||
from langflow.interface.initialize.vector_store import vecstore_initializer
|
||||
from langflow.interface.output_parsers.base import output_parser_creator
|
||||
from langflow.interface.retrievers.base import retriever_creator
|
||||
|
|
@ -105,7 +109,7 @@ async def instantiate_based_on_type(class_object, base_type, node_type, params,
|
|||
return instantiate_chains(node_type, class_object, params)
|
||||
elif base_type == "output_parsers":
|
||||
return instantiate_output_parser(node_type, class_object, params)
|
||||
elif base_type == "llms":
|
||||
elif base_type == "models":
|
||||
return instantiate_llm(node_type, class_object, params)
|
||||
elif base_type == "retrievers":
|
||||
return instantiate_retriever(node_type, class_object, params)
|
||||
|
|
|
|||
|
|
@ -1,16 +1,16 @@
|
|||
from typing import Dict, List, Optional, Type
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from langflow.interface.base import LangChainTypeCreator
|
||||
from langflow.interface.custom_lists import llm_type_to_cls_dict
|
||||
from langflow.services.deps import get_settings_service
|
||||
|
||||
from langflow.template.frontend_node.llms import LLMFrontendNode
|
||||
from loguru import logger
|
||||
from langflow.utils.util import build_template_from_class
|
||||
|
||||
|
||||
class LLMCreator(LangChainTypeCreator):
|
||||
type_name: str = "llms"
|
||||
type_name: str = "models"
|
||||
|
||||
@property
|
||||
def frontend_node_class(self) -> Type[LLMFrontendNode]:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue