From 0dbd1763f3bd71651f7108c0a3586baee92068a3 Mon Sep 17 00:00:00 2001 From: anovazzi1 Date: Fri, 16 Feb 2024 16:51:59 -0300 Subject: [PATCH] update some description and implement LlamaCpp --- .../components/models/AmazonBedrock.py | 2 +- .../langflow/components/models/Anthropic.py | 2 +- .../langflow/components/models/AzureOpenAI.py | 2 +- .../components/models/BaiduQianfanChat.py | 2 +- .../components/models/CTransformers.py | 7 +- .../langflow/components/models/Cohere.py | 2 +- .../components/models/GoogleGenerativeAI.py | 2 +- .../langflow/components/models/HuggingFace.py | 2 +- .../langflow/components/models/LlamaCpp.py | 136 ++++++++++++++++++ .../langflow/components/models/Ollama.py | 2 +- .../langflow/components/models/VertexAi.py | 2 +- 11 files changed, 149 insertions(+), 12 deletions(-) create mode 100644 src/backend/langflow/components/models/LlamaCpp.py diff --git a/src/backend/langflow/components/models/AmazonBedrock.py b/src/backend/langflow/components/models/AmazonBedrock.py index d679dd501..7c40a3960 100644 --- a/src/backend/langflow/components/models/AmazonBedrock.py +++ b/src/backend/langflow/components/models/AmazonBedrock.py @@ -8,7 +8,7 @@ from langflow import CustomComponent class AmazonBedrockComponent(CustomComponent): display_name: str = "Amazon Bedrock model" - description: str = "LLM model from Amazon Bedrock." + description: str = "Generate text using LLM model from Amazon Bedrock." def build_config(self): return { diff --git a/src/backend/langflow/components/models/Anthropic.py b/src/backend/langflow/components/models/Anthropic.py index a8b7741c9..b3ec44680 100644 --- a/src/backend/langflow/components/models/Anthropic.py +++ b/src/backend/langflow/components/models/Anthropic.py @@ -10,7 +10,7 @@ from langflow import CustomComponent class AnthropicLLM(CustomComponent): display_name: str = "Anthropic model" - description: str = "Anthropic Chat&Completion large language models." + description: str = "Generate text using Anthropic Chat&Completion large language models." def build_config(self): return { diff --git a/src/backend/langflow/components/models/AzureOpenAI.py b/src/backend/langflow/components/models/AzureOpenAI.py index 285c7f316..f8c13d8ed 100644 --- a/src/backend/langflow/components/models/AzureOpenAI.py +++ b/src/backend/langflow/components/models/AzureOpenAI.py @@ -6,7 +6,7 @@ from langchain_openai import AzureChatOpenAI class AzureChatOpenAIComponent(CustomComponent): display_name: str = "AzureOpenAI model" - description: str = "LLM model from Azure OpenAI." + description: str = "Generate text using LLM model from Azure OpenAI." documentation: str = ( "https://python.langchain.com/docs/integrations/llms/azure_openai" ) diff --git a/src/backend/langflow/components/models/BaiduQianfanChat.py b/src/backend/langflow/components/models/BaiduQianfanChat.py index fe705aa51..88051d0e9 100644 --- a/src/backend/langflow/components/models/BaiduQianfanChat.py +++ b/src/backend/langflow/components/models/BaiduQianfanChat.py @@ -10,7 +10,7 @@ from langflow.field_typing import Text class QianfanChatEndpointComponent(CustomComponent): display_name: str = "QianfanChat Model" description: str = ( - "Baidu Qianfan chat models. Get more detail from " + "Generate text using Baidu Qianfan chat models. Get more detail from " "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint." ) diff --git a/src/backend/langflow/components/models/CTransformers.py b/src/backend/langflow/components/models/CTransformers.py index 92354c9ad..ed3c227d5 100644 --- a/src/backend/langflow/components/models/CTransformers.py +++ b/src/backend/langflow/components/models/CTransformers.py @@ -8,7 +8,7 @@ from langflow import CustomComponent class CTransformersComponent(CustomComponent): display_name = "CTransformers model" - description = "C Transformers LLM models" + description = "Generate text using CTransformers LLM models" documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers" def build_config(self): @@ -34,5 +34,6 @@ class CTransformersComponent(CustomComponent): def build(self, model: str, model_file: str,inputs:str, model_type: str, config: Optional[Dict] = None) -> Text: output = CTransformers(model=model, model_file=model_file, model_type=model_type, config=config) message = output.invoke(inputs) - self.status = message - return message + result = message.content if hasattr(message, "content") else message + self.status = result + return result diff --git a/src/backend/langflow/components/models/Cohere.py b/src/backend/langflow/components/models/Cohere.py index a10e642b5..3342033b1 100644 --- a/src/backend/langflow/components/models/Cohere.py +++ b/src/backend/langflow/components/models/Cohere.py @@ -5,7 +5,7 @@ from langflow.field_typing import Text class CohereComponent(CustomComponent): display_name = "Cohere model" - description = "Cohere large language models." + description = "Generate text using Cohere large language models." documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere" def build_config(self): diff --git a/src/backend/langflow/components/models/GoogleGenerativeAI.py b/src/backend/langflow/components/models/GoogleGenerativeAI.py index ada76cb1a..50707b38e 100644 --- a/src/backend/langflow/components/models/GoogleGenerativeAI.py +++ b/src/backend/langflow/components/models/GoogleGenerativeAI.py @@ -9,7 +9,7 @@ from langflow.field_typing import Text class GoogleGenerativeAIComponent(CustomComponent): display_name: str = "Google Generative AI model" - description: str = "A component that uses Google Generative AI to generate text." + description: str = "Generate text using Google Generative AI to generate text." documentation: str = "http://docs.langflow.org/components/custom" def build_config(self): diff --git a/src/backend/langflow/components/models/HuggingFace.py b/src/backend/langflow/components/models/HuggingFace.py index e49a60b08..3fa254717 100644 --- a/src/backend/langflow/components/models/HuggingFace.py +++ b/src/backend/langflow/components/models/HuggingFace.py @@ -7,7 +7,7 @@ from langflow.field_typing import Text class HuggingFaceEndpointsComponent(CustomComponent): display_name: str = "Hugging Face Inference API models" - description: str = "LLM model from Hugging Face Inference API." + description: str = "Generate text using LLM model from Hugging Face Inference API." def build_config(self): return { diff --git a/src/backend/langflow/components/models/LlamaCpp.py b/src/backend/langflow/components/models/LlamaCpp.py new file mode 100644 index 000000000..cafcdf5eb --- /dev/null +++ b/src/backend/langflow/components/models/LlamaCpp.py @@ -0,0 +1,136 @@ +from typing import Optional, List, Dict, Any +from langflow import CustomComponent +from langchain_community.llms.llamacpp import LlamaCpp +from langflow.field_typing import Text + + +class LlamaCppComponent(CustomComponent): + display_name = "LlamaCpp model" + description = "Generate text using llama.cpp model." + documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp" + + def build_config(self): + return { + "grammar": {"display_name": "Grammar", "advanced": True}, + "cache": {"display_name": "Cache", "advanced": True}, + "client": {"display_name": "Client", "advanced": True}, + "echo": {"display_name": "Echo", "advanced": True}, + "f16_kv": {"display_name": "F16 KV", "advanced": True}, + "grammar_path": {"display_name": "Grammar Path", "advanced": True}, + "last_n_tokens_size": {"display_name": "Last N Tokens Size", "advanced": True}, + "logits_all": {"display_name": "Logits All", "advanced": True}, + "logprobs": {"display_name": "Logprobs", "advanced": True}, + "lora_base": {"display_name": "Lora Base", "advanced": True}, + "lora_path": {"display_name": "Lora Path", "advanced": True}, + "max_tokens": {"display_name": "Max Tokens", "advanced": True}, + "metadata": {"display_name": "Metadata", "advanced": True}, + "model_kwargs": {"display_name": "Model Kwargs", "advanced": True}, + "model_path": { + "display_name": "Model Path", + "field_type": "file", + "file_types": [".bin"], + "required": True, + }, + "n_batch": {"display_name": "N Batch", "advanced": True}, + "n_ctx": {"display_name": "N Ctx", "advanced": True}, + "n_gpu_layers": {"display_name": "N GPU Layers", "advanced": True}, + "n_parts": {"display_name": "N Parts", "advanced": True}, + "n_threads": {"display_name": "N Threads", "advanced": True}, + "repeat_penalty": {"display_name": "Repeat Penalty", "advanced": True}, + "rope_freq_base": {"display_name": "Rope Freq Base", "advanced": True}, + "rope_freq_scale": {"display_name": "Rope Freq Scale", "advanced": True}, + "seed": {"display_name": "Seed", "advanced": True}, + "stop": {"display_name": "Stop", "advanced": True}, + "streaming": {"display_name": "Streaming", "advanced": True}, + "suffix": {"display_name": "Suffix", "advanced": True}, + "tags": {"display_name": "Tags", "advanced": True}, + "temperature": {"display_name": "Temperature"}, + "top_k": {"display_name": "Top K", "advanced": True}, + "top_p": {"display_name": "Top P", "advanced": True}, + "use_mlock": {"display_name": "Use Mlock", "advanced": True}, + "use_mmap": {"display_name": "Use Mmap", "advanced": True}, + "verbose": {"display_name": "Verbose", "advanced": True}, + "vocab_only": {"display_name": "Vocab Only", "advanced": True}, + "inputs": {"display_name": "Input"}, + } + + def build( + self, + model_path: str, + inputs:str, + grammar: Optional[str] = None, + cache: Optional[bool] = None, + client: Optional[Any] = None, + echo: Optional[bool] = False, + f16_kv: bool = True, + grammar_path: Optional[str] = None, + last_n_tokens_size: Optional[int] = 64, + logits_all: bool = False, + logprobs: Optional[int] = None, + lora_base: Optional[str] = None, + lora_path: Optional[str] = None, + max_tokens: Optional[int] = 256, + metadata: Optional[Dict] = None, + model_kwargs: Dict = {}, + n_batch: Optional[int] = 8, + n_ctx: int = 512, + n_gpu_layers: Optional[int] = 1, + n_parts: int = -1, + n_threads: Optional[int] = 1, + repeat_penalty: Optional[float] = 1.1, + rope_freq_base: float = 10000.0, + rope_freq_scale: float = 1.0, + seed: int = -1, + stop: Optional[List[str]] = [], + streaming: bool = True, + suffix: Optional[str] = "", + tags: Optional[List[str]] = [], + temperature: Optional[float] = 0.8, + top_k: Optional[int] = 40, + top_p: Optional[float] = 0.95, + use_mlock: bool = False, + use_mmap: Optional[bool] = True, + verbose: bool = True, + vocab_only: bool = False, + ) -> Text: + output = LlamaCpp( + model_path=model_path, + grammar=grammar, + cache=cache, + client=client, + echo=echo, + f16_kv=f16_kv, + grammar_path=grammar_path, + last_n_tokens_size=last_n_tokens_size, + logits_all=logits_all, + logprobs=logprobs, + lora_base=lora_base, + lora_path=lora_path, + max_tokens=max_tokens, + metadata=metadata, + model_kwargs=model_kwargs, + n_batch=n_batch, + n_ctx=n_ctx, + n_gpu_layers=n_gpu_layers, + n_parts=n_parts, + n_threads=n_threads, + repeat_penalty=repeat_penalty, + rope_freq_base=rope_freq_base, + rope_freq_scale=rope_freq_scale, + seed=seed, + stop=stop, + streaming=streaming, + suffix=suffix, + tags=tags, + temperature=temperature, + top_k=top_k, + top_p=top_p, + use_mlock=use_mlock, + use_mmap=use_mmap, + verbose=verbose, + vocab_only=vocab_only, + ) + message = output.invoke(inputs) + result = message.content if hasattr(message, "content") else message + self.status = result + return result diff --git a/src/backend/langflow/components/models/Ollama.py b/src/backend/langflow/components/models/Ollama.py index fa12ac79e..c22860f41 100644 --- a/src/backend/langflow/components/models/Ollama.py +++ b/src/backend/langflow/components/models/Ollama.py @@ -13,7 +13,7 @@ from langflow.field_typing import Text class ChatOllamaComponent(CustomComponent): display_name = "ChatOllama model" - description = "Local LLM for chat with Ollama." + description = "Generate text using Local LLM for chat with Ollama." def build_config(self) -> dict: return { diff --git a/src/backend/langflow/components/models/VertexAi.py b/src/backend/langflow/components/models/VertexAi.py index 9aa185e2e..2555aee0f 100644 --- a/src/backend/langflow/components/models/VertexAi.py +++ b/src/backend/langflow/components/models/VertexAi.py @@ -8,7 +8,7 @@ from langflow.field_typing import Text class ChatVertexAIComponent(CustomComponent): display_name = "ChatVertexAI model" - description = "`Vertex AI` Chat large language models API." + description = "Generate text using Vertex AI Chat large language models API." def build_config(self): return {