Update component descriptions and messages

This commit is contained in:
Rodrigo Nader 2024-03-28 19:33:55 -03:00
commit 754e74ec37
13 changed files with 12 additions and 219 deletions

View file

@ -7,8 +7,8 @@ from langflow.schema import Record
class DocumentToRecordComponent(CustomComponent):
display_name = "Documents to Records"
description = "Convert documents to records."
display_name = "Documents To Records"
description = "Convert LangChain Documents into Records."
field_config = {
"documents": {"display_name": "Documents"},

View file

@ -7,7 +7,7 @@ from langflow.schema import Record
class MessageHistoryComponent(CustomComponent):
display_name = "Message History"
description = "Used to retrieve stored messages."
description = "Used to retrieve stored chat messages."
beta: bool = True
def build_config(self):

View file

@ -4,9 +4,9 @@ from langflow.interface.custom.custom_component import CustomComponent
from langflow.schema import Record
class RecordsAsTextComponent(CustomComponent):
display_name = "Records to Text"
description = "Converts Records into single piece of text using a template."
class RecordsToTextComponent(CustomComponent):
display_name = "Records To Text"
description = "Convert Records into plain text using a pre-defined template."
def build_config(self):
return {

View file

@ -4,7 +4,7 @@ from langflow.schema import Record
class UpdateRecordComponent(CustomComponent):
display_name = "Update Record"
description = "Updates a record with new data."
description = "Updates a Record with new data. Akin to a Python dictionary update."
def build_config(self):
return {

View file

@ -7,7 +7,7 @@ from langflow.schema import Record
class ChatInput(ChatComponent):
display_name = "Chat Input"
description = "Used to get user input from the chat."
description = "Used to capture and send user inputs from the chat."
icon = "ChatInput"
def build(

View file

@ -6,7 +6,7 @@ from langflow.interface.custom.custom_component import CustomComponent
class PromptComponent(CustomComponent):
display_name: str = "Prompt"
description: str = "A component for creating prompts using templates"
description: str = "A component for creating prompt templates using dynamic variables."
icon = "terminal-square"
def build_config(self):

View file

@ -6,7 +6,7 @@ from langflow.field_typing import Text
class TextInput(TextComponent):
display_name = "Text Input"
description = "Used to pass text input to the next component."
description = "Used to capture and send text inputs."
def build(self, input_value: Optional[str] = "") -> Text:
return super().build(input_value=input_value)

View file

@ -1,59 +0,0 @@
from typing import Dict, Optional
from langchain_community.llms.ctransformers import CTransformers
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class CTransformersComponent(LCModelComponent):
display_name = "CTransformers"
description = "Generate text using CTransformers LLM models"
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
def build_config(self):
return {
"model": {"display_name": "Model", "required": True},
"model_file": {
"display_name": "Model File",
"required": False,
"field_type": "file",
"file_types": [".bin"],
},
"model_type": {"display_name": "Model Type", "required": True},
"config": {
"display_name": "Config",
"advanced": True,
"required": False,
"field_type": "dict",
"value": '{"top_k":40,"top_p":0.95,"temperature":0.8,"repetition_penalty":1.1,"last_n_tokens":64,"seed":-1,"max_new_tokens":256,"stop":"","stream":"False","reset":"True","batch_size":8,"threads":-1,"context_length":-1,"gpu_layers":0}',
},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
},
}
def build(
self,
model: str,
model_file: str,
input_value: Text,
model_type: str,
stream: bool = False,
config: Optional[Dict] = None,
) -> Text:
output = CTransformers(
client=None,
model=model,
model_file=model_file,
model_type=model_type,
config=config, # noqa
)
return self.get_result(runnable=output, stream=stream, input_value=input_value)

View file

@ -1,148 +0,0 @@
from typing import Any, Dict, List, Optional
from langchain_community.llms.llamacpp import LlamaCpp
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class LlamaCppComponent(LCModelComponent):
display_name = "LLaMA C++"
description = "Generate text using llama.cpp model."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
def build_config(self):
return {
"grammar": {"display_name": "Grammar", "advanced": True},
"cache": {"display_name": "Cache", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"echo": {"display_name": "Echo", "advanced": True},
"f16_kv": {"display_name": "F16 KV", "advanced": True},
"grammar_path": {"display_name": "Grammar Path", "advanced": True},
"last_n_tokens_size": {
"display_name": "Last N Tokens Size",
"advanced": True,
},
"logits_all": {"display_name": "Logits All", "advanced": True},
"logprobs": {"display_name": "Logprobs", "advanced": True},
"lora_base": {"display_name": "Lora Base", "advanced": True},
"lora_path": {"display_name": "Lora Path", "advanced": True},
"max_tokens": {"display_name": "Max Tokens", "advanced": True},
"metadata": {"display_name": "Metadata", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"model_path": {
"display_name": "Model Path",
"field_type": "file",
"file_types": [".bin"],
"required": True,
},
"n_batch": {"display_name": "N Batch", "advanced": True},
"n_ctx": {"display_name": "N Ctx", "advanced": True},
"n_gpu_layers": {"display_name": "N GPU Layers", "advanced": True},
"n_parts": {"display_name": "N Parts", "advanced": True},
"n_threads": {"display_name": "N Threads", "advanced": True},
"repeat_penalty": {"display_name": "Repeat Penalty", "advanced": True},
"rope_freq_base": {"display_name": "Rope Freq Base", "advanced": True},
"rope_freq_scale": {"display_name": "Rope Freq Scale", "advanced": True},
"seed": {"display_name": "Seed", "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "advanced": True},
"suffix": {"display_name": "Suffix", "advanced": True},
"tags": {"display_name": "Tags", "advanced": True},
"temperature": {"display_name": "Temperature"},
"top_k": {"display_name": "Top K", "advanced": True},
"top_p": {"display_name": "Top P", "advanced": True},
"use_mlock": {"display_name": "Use Mlock", "advanced": True},
"use_mmap": {"display_name": "Use Mmap", "advanced": True},
"verbose": {"display_name": "Verbose", "advanced": True},
"vocab_only": {"display_name": "Vocab Only", "advanced": True},
"input_value": {"display_name": "Input"},
"stream": {
"display_name": "Stream",
"info": "Stream the response from the model.",
},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
},
}
def build(
self,
model_path: str,
input_value: Text,
grammar: Optional[str] = None,
cache: Optional[bool] = None,
client: Optional[Any] = None,
echo: Optional[bool] = False,
f16_kv: bool = True,
grammar_path: Optional[str] = None,
last_n_tokens_size: Optional[int] = 64,
logits_all: bool = False,
logprobs: Optional[int] = None,
lora_base: Optional[str] = None,
lora_path: Optional[str] = None,
max_tokens: Optional[int] = 256,
metadata: Optional[Dict] = None,
model_kwargs: Dict = {},
n_batch: Optional[int] = 8,
n_ctx: int = 512,
n_gpu_layers: Optional[int] = 1,
n_parts: int = -1,
n_threads: Optional[int] = 1,
repeat_penalty: Optional[float] = 1.1,
rope_freq_base: float = 10000.0,
rope_freq_scale: float = 1.0,
seed: int = -1,
stop: Optional[List[str]] = [],
streaming: bool = True,
suffix: Optional[str] = "",
tags: Optional[List[str]] = [],
temperature: Optional[float] = 0.8,
top_k: Optional[int] = 40,
top_p: Optional[float] = 0.95,
use_mlock: bool = False,
use_mmap: Optional[bool] = True,
verbose: bool = True,
vocab_only: bool = False,
stream: bool = False,
) -> Text:
output = LlamaCpp(
model_path=model_path,
grammar=grammar,
cache=cache,
client=client,
echo=echo,
f16_kv=f16_kv,
grammar_path=grammar_path,
last_n_tokens_size=last_n_tokens_size,
logits_all=logits_all,
logprobs=logprobs,
lora_base=lora_base,
lora_path=lora_path,
max_tokens=max_tokens,
metadata=metadata,
model_kwargs=model_kwargs,
n_batch=n_batch,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_parts=n_parts,
n_threads=n_threads,
repeat_penalty=repeat_penalty,
rope_freq_base=rope_freq_base,
rope_freq_scale=rope_freq_scale,
seed=seed,
stop=stop,
streaming=streaming,
suffix=suffix,
tags=tags,
temperature=temperature,
top_k=top_k,
top_p=top_p,
use_mlock=use_mlock,
use_mmap=use_mmap,
verbose=verbose,
vocab_only=vocab_only,
)
return self.get_result(runnable=output, stream=stream, input_value=input_value)

View file

@ -7,7 +7,7 @@ from langflow.schema import Record
class ChatOutput(ChatComponent):
display_name = "Chat Output"
description = "Used to send a message to the chat."
description = "Used to send a chat message."
icon = "ChatOutput"
def build(

View file

@ -6,7 +6,7 @@ from langflow.field_typing import Text
class TextOutput(TextComponent):
display_name = "Text Output"
description = "Used to pass text output to the next component."
description = "Used to send a text output."
field_config = {
"input_value": {"display_name": "Value"},