Update import statements in __init__.py files

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-03-28 20:16:35 -03:00
commit 742ec9f0d5
12 changed files with 23 additions and 185 deletions

View file

@ -1,18 +1,17 @@
__all__ = [
"agents",
"chains",
"data",
"documentloaders",
"embeddings",
"experimental",
"helpers",
"inputs",
"memories",
"model_specs",
"models",
"outputs",
"retrievers",
"textsplitters",
"toolkits",
"tools",
"vectorsearch",
"vectorstores",
]

View file

@ -5,6 +5,7 @@ from .OpenAIConversationalAgent import ConversationalAgent
from .SQLAgent import SQLAgentComponent
from .VectorStoreAgent import VectorStoreAgentComponent
from .VectorStoreRouterAgent import VectorStoreRouterAgentComponent
from .XMLAgent import XMLAgentComponent
__all__ = [
"AgentInitializerComponent",
@ -14,4 +15,5 @@ __all__ = [
"SQLAgentComponent",
"VectorStoreAgentComponent",
"VectorStoreRouterAgentComponent",
"XMLAgentComponent",
]

View file

@ -1,7 +1,7 @@
from .APIRequest import APIRequest
from .Directory import DirectoryComponent
from .File import FileComponent
from .FileLoader import FileLoaderComponent
from .URL import URLComponent
__all__ = ["APIRequest", "DirectoryComponent", "FileComponent", "FileLoaderComponent", "URLComponent"]
__all__ = ["APIRequest", "DirectoryComponent", "FileComponent", "URLComponent"]

View file

@ -1,23 +1,27 @@
from .ClearMessageHistory import ClearMessageHistoryComponent
from .ExtractDataFromRecord import ExtractKeyFromRecordComponent
from .Listen import ListenComponent
from .FlowTool import FlowToolComponent
from .ListFlows import ListFlowsComponent
from .Listen import ListenComponent
from .MergeRecords import MergeRecordsComponent
from .Notify import NotifyComponent
from .PythonFunction import PythonFunctionComponent
from .RunFlow import RunFlowComponent
from .RunnableExecutor import RunnableExecComponent
from .SQLExecutor import SQLExecutorComponent
from .SubFlow import SubFlowComponent
__all__ = [
"ClearMessageHistoryComponent",
"ExtractKeyFromRecordComponent",
"ListenComponent",
"FlowToolComponent",
"ListFlowsComponent",
"ListenComponent",
"MergeRecordsComponent",
"MessageHistoryComponent",
"NotifyComponent",
"PythonFunctionComponent",
"RunFlowComponent",
"RunnableExecComponent",
"SQLExecutorComponent",
"TextToRecordComponent",
"SubFlowComponent",
]

View file

@ -2,8 +2,6 @@ from .CustomComponent import Component
from .DocumentToRecord import DocumentToRecordComponent
from .IDGenerator import UUIDGeneratorComponent
from .MessageHistory import MessageHistoryComponent
from .PythonFunction import PythonFunctionComponent
from .RecordsAsText import RecordsAsTextComponent
from .TextToRecord import TextToRecordComponent
from .UpdateRecord import UpdateRecordComponent
@ -13,7 +11,7 @@ __all__ = [
"DocumentToRecordComponent",
"UUIDGeneratorComponent",
"PythonFunctionComponent",
"RecordsAsTextComponent",
"RecordsToTextComponent",
"TextToRecordComponent",
"MessageHistoryComponent",
]

View file

@ -2,4 +2,4 @@ from .ChatInput import ChatInput
from .Prompt import PromptComponent
from .TextInput import TextInput
__all__ = ["ChatInput", "TextInput", "PromptComponent"]
__all__ = ["ChatInput", "PromptComponent", "TextInput"]

View file

@ -1,33 +0,0 @@
from typing import Dict, Optional
from langchain_community.llms.ctransformers import CTransformers
from langflow.interface.custom.custom_component import CustomComponent
class CTransformersComponent(CustomComponent):
display_name = "CTransformers"
description = "C Transformers LLM models"
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
def build_config(self):
return {
"model": {"display_name": "Model", "required": True},
"model_file": {
"display_name": "Model File",
"required": False,
"field_type": "file",
"file_types": [".bin"],
},
"model_type": {"display_name": "Model Type", "required": True},
"config": {
"display_name": "Config",
"advanced": True,
"required": False,
"field_type": "dict",
"value": '{"top_k":40,"top_p":0.95,"temperature":0.8,"repetition_penalty":1.1,"last_n_tokens":64,"seed":-1,"max_new_tokens":256,"stop":"","stream":"False","reset":"True","batch_size":8,"threads":-1,"context_length":-1,"gpu_layers":0}',
},
}
def build(self, model: str, model_file: str, model_type: str, config: Optional[Dict] = None) -> CTransformers:
return CTransformers(model=model, model_file=model_file, model_type=model_type, config=config) # type: ignore

View file

@ -1,131 +0,0 @@
from typing import Any, Dict, List, Optional
from langchain_community.llms.llamacpp import LlamaCpp
from langflow.interface.custom.custom_component import CustomComponent
class LlamaCppComponent(CustomComponent):
display_name = "LlamaCpp"
description = "llama.cpp model."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
def build_config(self):
return {
"grammar": {"display_name": "Grammar", "advanced": True},
"cache": {"display_name": "Cache", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"echo": {"display_name": "Echo", "advanced": True},
"f16_kv": {"display_name": "F16 KV", "advanced": True},
"grammar_path": {"display_name": "Grammar Path", "advanced": True},
"last_n_tokens_size": {"display_name": "Last N Tokens Size", "advanced": True},
"logits_all": {"display_name": "Logits All", "advanced": True},
"logprobs": {"display_name": "Logprobs", "advanced": True},
"lora_base": {"display_name": "Lora Base", "advanced": True},
"lora_path": {"display_name": "Lora Path", "advanced": True},
"max_tokens": {"display_name": "Max Tokens", "advanced": True},
"metadata": {"display_name": "Metadata", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"model_path": {
"display_name": "Model Path",
"field_type": "file",
"file_types": [".bin"],
"required": True,
},
"n_batch": {"display_name": "N Batch", "advanced": True},
"n_ctx": {"display_name": "N Ctx", "advanced": True},
"n_gpu_layers": {"display_name": "N GPU Layers", "advanced": True},
"n_parts": {"display_name": "N Parts", "advanced": True},
"n_threads": {"display_name": "N Threads", "advanced": True},
"repeat_penalty": {"display_name": "Repeat Penalty", "advanced": True},
"rope_freq_base": {"display_name": "Rope Freq Base", "advanced": True},
"rope_freq_scale": {"display_name": "Rope Freq Scale", "advanced": True},
"seed": {"display_name": "Seed", "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "advanced": True},
"suffix": {"display_name": "Suffix", "advanced": True},
"tags": {"display_name": "Tags", "advanced": True},
"temperature": {"display_name": "Temperature"},
"top_k": {"display_name": "Top K", "advanced": True},
"top_p": {"display_name": "Top P", "advanced": True},
"use_mlock": {"display_name": "Use Mlock", "advanced": True},
"use_mmap": {"display_name": "Use Mmap", "advanced": True},
"verbose": {"display_name": "Verbose", "advanced": True},
"vocab_only": {"display_name": "Vocab Only", "advanced": True},
}
def build(
self,
model_path: str,
grammar: Optional[str] = None,
cache: Optional[bool] = None,
client: Optional[Any] = None,
echo: Optional[bool] = False,
f16_kv: bool = True,
grammar_path: Optional[str] = None,
last_n_tokens_size: Optional[int] = 64,
logits_all: bool = False,
logprobs: Optional[int] = None,
lora_base: Optional[str] = None,
lora_path: Optional[str] = None,
max_tokens: Optional[int] = 256,
metadata: Optional[Dict] = None,
model_kwargs: Dict = {},
n_batch: Optional[int] = 8,
n_ctx: int = 512,
n_gpu_layers: Optional[int] = 1,
n_parts: int = -1,
n_threads: Optional[int] = 1,
repeat_penalty: Optional[float] = 1.1,
rope_freq_base: float = 10000.0,
rope_freq_scale: float = 1.0,
seed: int = -1,
stop: Optional[List[str]] = [],
streaming: bool = True,
suffix: Optional[str] = "",
tags: Optional[List[str]] = [],
temperature: Optional[float] = 0.8,
top_k: Optional[int] = 40,
top_p: Optional[float] = 0.95,
use_mlock: bool = False,
use_mmap: Optional[bool] = True,
verbose: bool = True,
vocab_only: bool = False,
) -> LlamaCpp:
return LlamaCpp(
model_path=model_path,
grammar=grammar,
cache=cache,
client=client,
echo=echo,
f16_kv=f16_kv,
grammar_path=grammar_path,
last_n_tokens_size=last_n_tokens_size,
logits_all=logits_all,
logprobs=logprobs,
lora_base=lora_base,
lora_path=lora_path,
max_tokens=max_tokens,
metadata=metadata,
model_kwargs=model_kwargs,
n_batch=n_batch,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_parts=n_parts,
n_threads=n_threads,
repeat_penalty=repeat_penalty,
rope_freq_base=rope_freq_base,
rope_freq_scale=rope_freq_scale,
seed=seed,
stop=stop,
streaming=streaming,
suffix=suffix,
tags=tags,
temperature=temperature,
top_k=top_k,
top_p=top_p,
use_mlock=use_mlock,
use_mmap=use_mmap,
verbose=verbose,
vocab_only=vocab_only,
)

View file

@ -4,7 +4,6 @@ from .AnthropicSpecs import AnthropicComponent
from .AzureChatOpenAISpecs import AzureChatOpenAISpecsComponent
from .BaiduQianfanChatEndpointsSpecs import QianfanChatEndpointComponent
from .BaiduQianfanLLMEndpointsSpecs import QianfanLLMEndpointComponent
from .CTransformersSpecs import CTransformersComponent
from .ChatAnthropicSpecs import ChatAnthropicComponent
from .ChatLiteLLMSpecs import ChatLiteLLMComponent
from .ChatOllamaEndpointSpecs import ChatOllamaComponent
@ -13,7 +12,6 @@ from .ChatVertexAISpecs import ChatVertexAIComponent
from .CohereSpecs import CohereComponent
from .GoogleGenerativeAISpecs import GoogleGenerativeAIComponent
from .HuggingFaceEndpointsSpecs import HuggingFaceEndpointsComponent
from .LlamaCppSpecs import LlamaCppComponent
from .OllamaLLMSpecs import OllamaLLM
from .VertexAISpecs import VertexAIComponent
@ -24,7 +22,6 @@ __all__ = [
"AzureChatOpenAISpecsComponent",
"QianfanChatEndpointComponent",
"QianfanLLMEndpointComponent",
"CTransformersComponent",
"ChatAnthropicComponent",
"ChatLiteLLMComponent",
"ChatOllamaComponent",
@ -33,7 +30,6 @@ __all__ = [
"CohereComponent",
"GoogleGenerativeAIComponent",
"HuggingFaceEndpointsComponent",
"LlamaCppComponent",
"OllamaLLM",
"VertexAIComponent",
]

View file

@ -2,11 +2,9 @@ from .AmazonBedrockModel import AmazonBedrockComponent
from .AnthropicModel import AnthropicLLM
from .AzureOpenAIModel import AzureChatOpenAIComponent
from .BaiduQianfanChatModel import QianfanChatEndpointComponent
from .CTransformersModel import CTransformersComponent
from .CohereModel import CohereComponent
from .GoogleGenerativeAIModel import GoogleGenerativeAIComponent
from .HuggingFaceModel import HuggingFaceEndpointsComponent
from .LlamaCppModel import LlamaCppComponent
from .OllamaModel import ChatOllamaComponent
from .OpenAIModel import OpenAIModelComponent
from .VertexAiModel import ChatVertexAIComponent
@ -16,11 +14,9 @@ __all__ = [
"AnthropicLLM",
"AzureChatOpenAIComponent",
"QianfanChatEndpointComponent",
"CTransformersComponent",
"CohereComponent",
"GoogleGenerativeAIComponent",
"HuggingFaceEndpointsComponent",
"LlamaCppComponent",
"ChatOllamaComponent",
"OpenAIModelComponent",
"ChatVertexAIComponent",

View file

@ -2,10 +2,12 @@ from .AmazonKendra import AmazonKendraRetrieverComponent
from .MetalRetriever import MetalRetrieverComponent
from .MultiQueryRetriever import MultiQueryRetrieverComponent
from .VectaraSelfQueryRetriver import VectaraSelfQueryRetriverComponent
from .VectorStoreRetriever import VectoStoreRetrieverComponent
__all__ = [
"AmazonKendraRetrieverComponent",
"MetalRetrieverComponent",
"MultiQueryRetrieverComponent",
"VectaraSelfQueryRetriverComponent",
"VectoStoreRetrieverComponent",
]

View file

@ -0,0 +1,5 @@
from .RetrieverTool import RetrieverToolComponent
from .SearchAPITool import SearchApiToolComponent
from .SearchApi import SearchApi
__all__ = ["RetrieverToolComponent", "SearchApiToolComponent", "SearchApi"]