Update imports and type annotations in several components

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-02-15 18:23:27 -03:00
commit daf2aec0af
14 changed files with 72 additions and 70 deletions

View file

@ -1,10 +1,8 @@
from langflow import CustomComponent
from typing import Callable, Union
from langchain.chains import LLMCheckerChain
from typing import Union, Callable
from langflow.field_typing import (
BaseLanguageModel,
Chain,
)
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, Chain
class LLMCheckerChainComponent(CustomComponent):
@ -21,4 +19,4 @@ class LLMCheckerChainComponent(CustomComponent):
self,
llm: BaseLanguageModel,
) -> Union[Chain, Callable]:
return LLMCheckerChain(llm=llm)
return LLMCheckerChain.from_llm(llm=llm)

View file

@ -1,6 +1,8 @@
from langflow import CustomComponent
from typing import Any, Dict, List
from langchain.docstore.document import Document
from typing import Optional, Dict, Any
from langchain.document_loaders.directory import DirectoryLoader
from langflow import CustomComponent
class DirectoryLoaderComponent(CustomComponent):
@ -23,20 +25,18 @@ class DirectoryLoaderComponent(CustomComponent):
self,
glob: str,
path: str,
load_hidden: Optional[bool] = False,
max_concurrency: Optional[int] = 10,
metadata: Optional[dict] = {},
recursive: Optional[bool] = True,
silent_errors: Optional[bool] = False,
use_multithreading: Optional[bool] = True,
) -> Document:
return Document(
max_concurrency: int = 2,
load_hidden: bool = False,
recursive: bool = True,
silent_errors: bool = False,
use_multithreading: bool = True,
) -> List[Document]:
return DirectoryLoader(
glob=glob,
path=path,
load_hidden=load_hidden,
max_concurrency=max_concurrency,
metadata=metadata,
recursive=recursive,
silent_errors=silent_errors,
use_multithreading=use_multithreading,
)
).load()

View file

@ -1,6 +1,8 @@
from langflow import CustomComponent
from typing import Optional, Dict
from typing import Dict, Optional
from langchain_community.embeddings.huggingface import HuggingFaceInferenceAPIEmbeddings
from langflow import CustomComponent
from pydantic.v1.types import SecretStr
class HuggingFaceInferenceAPIEmbeddingsComponent(CustomComponent):
@ -29,12 +31,12 @@ class HuggingFaceInferenceAPIEmbeddingsComponent(CustomComponent):
model_kwargs: Optional[Dict] = {},
multi_process: bool = False,
) -> HuggingFaceInferenceAPIEmbeddings:
if api_key:
secret_api_key = SecretStr(api_key)
else:
raise ValueError("API Key is required")
return HuggingFaceInferenceAPIEmbeddings(
api_key=api_key,
api_key=secret_api_key,
api_url=api_url,
model_name=model_name,
cache_folder=cache_folder,
encode_kwargs=encode_kwargs,
model_kwargs=model_kwargs,
multi_process=multi_process,
)

View file

@ -1,9 +1,9 @@
from typing import Any, Callable, Dict, List, Optional, Union
from langchain_openai.embeddings.base import OpenAIEmbeddings
from langflow import CustomComponent
from langflow.field_typing import NestedDict
from pydantic.v1.types import SecretStr
class OpenAIEmbeddingsComponent(CustomComponent):
@ -67,7 +67,7 @@ class OpenAIEmbeddingsComponent(CustomComponent):
},
"skip_empty": {"display_name": "Skip Empty", "advanced": True},
"tiktoken_model_name": {"display_name": "TikToken Model Name"},
"tikToken_enable": {"display_name": "TikToken Enable"},
"tikToken_enable": {"display_name": "TikToken Enable", "advanced": True},
}
def build(
@ -92,14 +92,17 @@ class OpenAIEmbeddingsComponent(CustomComponent):
request_timeout: Optional[float] = None,
show_progress_bar: bool = False,
skip_empty: bool = False,
tikToken_enable: bool = True,
tiktoken_enable: bool = True,
tiktoken_model_name: Optional[str] = None,
) -> Union[OpenAIEmbeddings, Callable]:
# This is to avoid errors with Vector Stores (e.g Chroma)
if disallowed_special == ["all"]:
disallowed_special = "all"
disallowed_special = "all" # type: ignore
api_key = SecretStr(openai_api_key) if openai_api_key else None
return OpenAIEmbeddings(
tiktoken_enabled=tikToken_enable,
tiktoken_enabled=tiktoken_enable,
default_headers=default_headers,
default_query=default_query,
allowed_special=set(allowed_special),
@ -112,7 +115,7 @@ class OpenAIEmbeddingsComponent(CustomComponent):
model=model,
model_kwargs=model_kwargs,
base_url=openai_api_base,
api_key=openai_api_key,
api_key=api_key,
openai_api_type=openai_api_type,
api_version=openai_api_version,
organization=openai_organization,

View file

@ -1,4 +1,4 @@
from pydantic import SecretStr
from pydantic.v1.types import SecretStr
from langflow import CustomComponent
from typing import Optional, Union, Callable
from langflow.field_typing import BaseLanguageModel

View file

@ -1,9 +1,9 @@
from typing import Optional
from langchain_google_genai import ChatGoogleGenerativeAI # type: ignore
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, RangeSpec, TemplateField
from pydantic.v1.types import SecretStr
class GoogleGenerativeAIComponent(CustomComponent):
@ -63,10 +63,10 @@ class GoogleGenerativeAIComponent(CustomComponent):
) -> BaseLanguageModel:
return ChatGoogleGenerativeAI(
model=model,
max_output_tokens=max_output_tokens or None,
max_output_tokens=max_output_tokens or None, # type: ignore
temperature=temperature,
top_k=top_k or None,
top_p=top_p or None,
top_p=top_p or None, # type: ignore
n=n or 1,
google_api_key=google_api_key,
google_api_key=SecretStr(google_api_key),
)

View file

@ -1,8 +1,7 @@
from langchain_community.agent_toolkits.openapi.toolkit import BaseToolkit, OpenAPIToolkit
from langchain_community.utilities.requests import TextRequestsWrapper
from langflow import CustomComponent
from langflow.field_typing import AgentExecutor
from typing import Callable
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
class OpenAPIToolkitComponent(CustomComponent):
@ -19,5 +18,5 @@ class OpenAPIToolkitComponent(CustomComponent):
self,
json_agent: AgentExecutor,
requests_wrapper: TextRequestsWrapper,
) -> Callable:
) -> BaseToolkit:
return OpenAPIToolkit(json_agent=json_agent, requests_wrapper=requests_wrapper)

View file

@ -1,6 +1,7 @@
from langflow import CustomComponent
from typing import Union, Callable
from typing import Callable, Union
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
from langflow import CustomComponent
class GoogleSearchAPIWrapperComponent(CustomComponent):
@ -18,4 +19,4 @@ class GoogleSearchAPIWrapperComponent(CustomComponent):
google_api_key: str,
google_cse_id: str,
) -> Union[GoogleSearchAPIWrapper, Callable]:
return GoogleSearchAPIWrapper(google_api_key=google_api_key, google_cse_id=google_cse_id)
return GoogleSearchAPIWrapper(google_api_key=google_api_key, google_cse_id=google_cse_id) # type: ignore

View file

@ -1,9 +1,9 @@
from langflow import CustomComponent
from typing import Dict, Optional
from typing import Dict
# Assuming the existence of GoogleSerperAPIWrapper class in the serper module
# If this class does not exist, you would need to create it or import the appropriate class from another module
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
from langflow import CustomComponent
class GoogleSerperAPIWrapperComponent(CustomComponent):
@ -42,6 +42,5 @@ class GoogleSerperAPIWrapperComponent(CustomComponent):
def build(
self,
serper_api_key: str,
result_key_for_type: Optional[Dict[str, str]] = None,
) -> GoogleSerperAPIWrapper:
return GoogleSerperAPIWrapper(result_key_for_type=result_key_for_type, serper_api_key=serper_api_key)
return GoogleSerperAPIWrapper(serper_api_key=serper_api_key)

View file

@ -5,7 +5,6 @@ import pinecone # type: ignore
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.pinecone import Pinecone
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings
@ -31,11 +30,11 @@ class PineconeComponent(CustomComponent):
embedding: Embeddings,
pinecone_env: str,
documents: List[Document],
text_key: str = "text",
pool_threads: int = 4,
index_name: Optional[str] = None,
pinecone_api_key: Optional[str] = None,
text_key: Optional[str] = "text",
namespace: Optional[str] = "default",
pool_threads: Optional[int] = None,
) -> Union[VectorStore, Pinecone, BaseRetriever]:
if pinecone_api_key is None or pinecone_env is None:
raise ValueError("Pinecone API Key and Environment are required.")
@ -43,6 +42,8 @@ class PineconeComponent(CustomComponent):
raise ValueError("Pinecone API Key is required.")
pinecone.init(api_key=pinecone_api_key, environment=pinecone_env) # type: ignore
if not index_name:
raise ValueError("Index Name is required.")
if documents:
return Pinecone.from_documents(
documents=documents,

View file

@ -36,14 +36,14 @@ class QdrantComponent(CustomComponent):
def build(
self,
embedding: Embeddings,
collection_name: str,
documents: Optional[Document] = None,
api_key: Optional[str] = None,
collection_name: Optional[str] = None,
content_payload_key: str = "page_content",
distance_func: str = "Cosine",
grpc_port: Optional[int] = 6334,
host: Optional[str] = None,
grpc_port: int = 6334,
https: bool = False,
host: Optional[str] = None,
location: Optional[str] = None,
metadata_payload_key: str = "metadata",
path: Optional[str] = None,
@ -51,7 +51,7 @@ class QdrantComponent(CustomComponent):
prefer_grpc: bool = False,
prefix: Optional[str] = None,
search_kwargs: Optional[NestedDict] = None,
timeout: Optional[float] = None,
timeout: Optional[int] = None,
url: Optional[str] = None,
) -> Union[VectorStore, Qdrant, BaseRetriever]:
if documents is None:
@ -77,13 +77,11 @@ class QdrantComponent(CustomComponent):
client=client,
collection_name=collection_name,
embeddings=embedding,
search_kwargs=search_kwargs,
distance_func=distance_func,
)
return vs
else:
vs = Qdrant.from_documents(
documents=documents,
documents=documents, # type: ignore
embedding=embedding,
api_key=api_key,
collection_name=collection_name,

View file

@ -5,7 +5,6 @@ from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.redis import Redis
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langflow import CustomComponent
@ -31,6 +30,7 @@ class RedisComponent(CustomComponent):
"code": {"show": False, "display_name": "Code"},
"documents": {"display_name": "Documents", "is_list": True},
"embedding": {"display_name": "Embedding"},
"schema": {"display_name": "Schema", "file_types": [".yaml"]},
"redis_server_url": {
"display_name": "Redis Server Connection String",
"advanced": False,
@ -43,6 +43,7 @@ class RedisComponent(CustomComponent):
embedding: Embeddings,
redis_server_url: str,
redis_index_name: str,
schema: Optional[str] = None,
documents: Optional[Document] = None,
) -> Union[VectorStore, BaseRetriever]:
"""
@ -58,10 +59,12 @@ class RedisComponent(CustomComponent):
- VectorStore: The Vector Store object.
"""
if documents is None:
if schema is None:
raise ValueError("If no documents are provided, a schema must be provided.")
redis_vs = Redis.from_existing_index(
embedding=embedding,
index_name=redis_index_name,
schema=None,
schema=schema,
key_prefix=None,
redis_url=redis_server_url,
)

View file

@ -6,7 +6,6 @@ from typing import List, Optional, Union
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores.vectara import Vectara
from langchain_core.vectorstores import VectorStore
from langflow import CustomComponent
from langflow.field_typing import BaseRetriever, Document
@ -46,7 +45,7 @@ class VectaraComponent(CustomComponent):
if documents is not None:
return Vectara.from_documents(
documents=documents,
documents=documents, # type: ignore
embedding=FakeEmbeddings(size=768),
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,

View file

@ -5,7 +5,6 @@ from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.pgvector import PGVector
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langflow import CustomComponent
@ -63,13 +62,13 @@ class PGVectorComponent(CustomComponent):
collection_name=collection_name,
connection_string=pg_server_url,
)
vector_store = PGVector.from_documents(
embedding=embedding,
documents=documents,
collection_name=collection_name,
connection_string=pg_server_url,
)
else:
vector_store = PGVector.from_documents(
embedding=embedding,
documents=documents, # type: ignore
collection_name=collection_name,
connection_string=pg_server_url,
)
except Exception as e:
raise RuntimeError(f"Failed to build PGVector: {e}")
return vector_store