update some description and implement LlamaCpp

This commit is contained in:
anovazzi1 2024-02-16 16:51:59 -03:00
commit 0dbd1763f3
11 changed files with 149 additions and 12 deletions

View file

@ -8,7 +8,7 @@ from langflow import CustomComponent
class AmazonBedrockComponent(CustomComponent):
display_name: str = "Amazon Bedrock model"
description: str = "LLM model from Amazon Bedrock."
description: str = "Generate text using LLM model from Amazon Bedrock."
def build_config(self):
return {

View file

@ -10,7 +10,7 @@ from langflow import CustomComponent
class AnthropicLLM(CustomComponent):
display_name: str = "Anthropic model"
description: str = "Anthropic Chat&Completion large language models."
description: str = "Generate text using Anthropic Chat&Completion large language models."
def build_config(self):
return {

View file

@ -6,7 +6,7 @@ from langchain_openai import AzureChatOpenAI
class AzureChatOpenAIComponent(CustomComponent):
display_name: str = "AzureOpenAI model"
description: str = "LLM model from Azure OpenAI."
description: str = "Generate text using LLM model from Azure OpenAI."
documentation: str = (
"https://python.langchain.com/docs/integrations/llms/azure_openai"
)

View file

@ -10,7 +10,7 @@ from langflow.field_typing import Text
class QianfanChatEndpointComponent(CustomComponent):
display_name: str = "QianfanChat Model"
description: str = (
"Baidu Qianfan chat models. Get more detail from "
"Generate text using Baidu Qianfan chat models. Get more detail from "
"https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint."
)

View file

@ -8,7 +8,7 @@ from langflow import CustomComponent
class CTransformersComponent(CustomComponent):
display_name = "CTransformers model"
description = "C Transformers LLM models"
description = "Generate text using CTransformers LLM models"
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
def build_config(self):
@ -34,5 +34,6 @@ class CTransformersComponent(CustomComponent):
def build(self, model: str, model_file: str,inputs:str, model_type: str, config: Optional[Dict] = None) -> Text:
output = CTransformers(model=model, model_file=model_file, model_type=model_type, config=config)
message = output.invoke(inputs)
self.status = message
return message
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -5,7 +5,7 @@ from langflow.field_typing import Text
class CohereComponent(CustomComponent):
display_name = "Cohere model"
description = "Cohere large language models."
description = "Generate text using Cohere large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
def build_config(self):

View file

@ -9,7 +9,7 @@ from langflow.field_typing import Text
class GoogleGenerativeAIComponent(CustomComponent):
display_name: str = "Google Generative AI model"
description: str = "A component that uses Google Generative AI to generate text."
description: str = "Generate text using Google Generative AI to generate text."
documentation: str = "http://docs.langflow.org/components/custom"
def build_config(self):

View file

@ -7,7 +7,7 @@ from langflow.field_typing import Text
class HuggingFaceEndpointsComponent(CustomComponent):
display_name: str = "Hugging Face Inference API models"
description: str = "LLM model from Hugging Face Inference API."
description: str = "Generate text using LLM model from Hugging Face Inference API."
def build_config(self):
return {

View file

@ -0,0 +1,136 @@
from typing import Optional, List, Dict, Any
from langflow import CustomComponent
from langchain_community.llms.llamacpp import LlamaCpp
from langflow.field_typing import Text
class LlamaCppComponent(CustomComponent):
display_name = "LlamaCpp model"
description = "Generate text using llama.cpp model."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
def build_config(self):
return {
"grammar": {"display_name": "Grammar", "advanced": True},
"cache": {"display_name": "Cache", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"echo": {"display_name": "Echo", "advanced": True},
"f16_kv": {"display_name": "F16 KV", "advanced": True},
"grammar_path": {"display_name": "Grammar Path", "advanced": True},
"last_n_tokens_size": {"display_name": "Last N Tokens Size", "advanced": True},
"logits_all": {"display_name": "Logits All", "advanced": True},
"logprobs": {"display_name": "Logprobs", "advanced": True},
"lora_base": {"display_name": "Lora Base", "advanced": True},
"lora_path": {"display_name": "Lora Path", "advanced": True},
"max_tokens": {"display_name": "Max Tokens", "advanced": True},
"metadata": {"display_name": "Metadata", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"model_path": {
"display_name": "Model Path",
"field_type": "file",
"file_types": [".bin"],
"required": True,
},
"n_batch": {"display_name": "N Batch", "advanced": True},
"n_ctx": {"display_name": "N Ctx", "advanced": True},
"n_gpu_layers": {"display_name": "N GPU Layers", "advanced": True},
"n_parts": {"display_name": "N Parts", "advanced": True},
"n_threads": {"display_name": "N Threads", "advanced": True},
"repeat_penalty": {"display_name": "Repeat Penalty", "advanced": True},
"rope_freq_base": {"display_name": "Rope Freq Base", "advanced": True},
"rope_freq_scale": {"display_name": "Rope Freq Scale", "advanced": True},
"seed": {"display_name": "Seed", "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "advanced": True},
"suffix": {"display_name": "Suffix", "advanced": True},
"tags": {"display_name": "Tags", "advanced": True},
"temperature": {"display_name": "Temperature"},
"top_k": {"display_name": "Top K", "advanced": True},
"top_p": {"display_name": "Top P", "advanced": True},
"use_mlock": {"display_name": "Use Mlock", "advanced": True},
"use_mmap": {"display_name": "Use Mmap", "advanced": True},
"verbose": {"display_name": "Verbose", "advanced": True},
"vocab_only": {"display_name": "Vocab Only", "advanced": True},
"inputs": {"display_name": "Input"},
}
def build(
self,
model_path: str,
inputs:str,
grammar: Optional[str] = None,
cache: Optional[bool] = None,
client: Optional[Any] = None,
echo: Optional[bool] = False,
f16_kv: bool = True,
grammar_path: Optional[str] = None,
last_n_tokens_size: Optional[int] = 64,
logits_all: bool = False,
logprobs: Optional[int] = None,
lora_base: Optional[str] = None,
lora_path: Optional[str] = None,
max_tokens: Optional[int] = 256,
metadata: Optional[Dict] = None,
model_kwargs: Dict = {},
n_batch: Optional[int] = 8,
n_ctx: int = 512,
n_gpu_layers: Optional[int] = 1,
n_parts: int = -1,
n_threads: Optional[int] = 1,
repeat_penalty: Optional[float] = 1.1,
rope_freq_base: float = 10000.0,
rope_freq_scale: float = 1.0,
seed: int = -1,
stop: Optional[List[str]] = [],
streaming: bool = True,
suffix: Optional[str] = "",
tags: Optional[List[str]] = [],
temperature: Optional[float] = 0.8,
top_k: Optional[int] = 40,
top_p: Optional[float] = 0.95,
use_mlock: bool = False,
use_mmap: Optional[bool] = True,
verbose: bool = True,
vocab_only: bool = False,
) -> Text:
output = LlamaCpp(
model_path=model_path,
grammar=grammar,
cache=cache,
client=client,
echo=echo,
f16_kv=f16_kv,
grammar_path=grammar_path,
last_n_tokens_size=last_n_tokens_size,
logits_all=logits_all,
logprobs=logprobs,
lora_base=lora_base,
lora_path=lora_path,
max_tokens=max_tokens,
metadata=metadata,
model_kwargs=model_kwargs,
n_batch=n_batch,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_parts=n_parts,
n_threads=n_threads,
repeat_penalty=repeat_penalty,
rope_freq_base=rope_freq_base,
rope_freq_scale=rope_freq_scale,
seed=seed,
stop=stop,
streaming=streaming,
suffix=suffix,
tags=tags,
temperature=temperature,
top_k=top_k,
top_p=top_p,
use_mlock=use_mlock,
use_mmap=use_mmap,
verbose=verbose,
vocab_only=vocab_only,
)
message = output.invoke(inputs)
result = message.content if hasattr(message, "content") else message
self.status = result
return result

View file

@ -13,7 +13,7 @@ from langflow.field_typing import Text
class ChatOllamaComponent(CustomComponent):
display_name = "ChatOllama model"
description = "Local LLM for chat with Ollama."
description = "Generate text using Local LLM for chat with Ollama."
def build_config(self) -> dict:
return {

View file

@ -8,7 +8,7 @@ from langflow.field_typing import Text
class ChatVertexAIComponent(CustomComponent):
display_name = "ChatVertexAI model"
description = "`Vertex AI` Chat large language models API."
description = "Generate text using Vertex AI Chat large language models API."
def build_config(self):
return {