apply ruff

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-06-20 18:21:47 -03:00
commit c811e5f045
23 changed files with 10 additions and 119 deletions

View file

@ -1,9 +1,7 @@
from typing import Optional
from langchain_community.embeddings import BedrockEmbeddings
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import DropdownInput, Output, SecretStrInput, TextInput
from langflow.io import DropdownInput, Output, TextInput
class AmazonBedrockEmbeddingsComponent(LCModelComponent):

View file

@ -1,4 +1,3 @@
from typing import Optional
from langchain_openai import AzureOpenAIEmbeddings
from pydantic.v1 import SecretStr

View file

@ -2,7 +2,7 @@ from langchain_community.embeddings.cohere import CohereEmbeddings
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, Output, SecretStrInput, TextInput
from langflow.io import DropdownInput, FloatInput, IntInput, Output, SecretStrInput, TextInput
class CohereEmbeddingsComponent(LCModelComponent):

View file

@ -1,5 +1,3 @@
from typing import Dict, Optional
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
from langflow.base.models.model import LCModelComponent

View file

@ -1,11 +1,9 @@
from typing import Dict, Optional
from langchain_community.embeddings.huggingface import HuggingFaceInferenceAPIEmbeddings
from pydantic.v1.types import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import BoolInput, DictInput, FloatInput, Output, SecretStrInput, TextInput
from langflow.io import BoolInput, DictInput, Output, SecretStrInput, TextInput
class HuggingFaceInferenceAPIEmbeddingsComponent(LCModelComponent):

View file

@ -1,5 +1,3 @@
from typing import Optional
from langchain_community.embeddings import OllamaEmbeddings
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings

View file

@ -1,5 +1,3 @@
from typing import List, Optional
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import BoolInput, DictInput, FileInput, FloatInput, IntInput, Output, TextInput

View file

@ -1,86 +0,0 @@
from typing import Optional
from langchain_anthropic import ChatAnthropic
from pydantic.v1.types import SecretStr
from langflow.field_typing import LanguageModel
class AnthropicLLM(CustomComponent):
display_name: str = "Anthropic"
description: str = "Generate text using Anthropic Chat&Completion LLMs."
icon = "Anthropic"
field_order = [
"model",
"anthropic_api_key",
"max_tokens",
"temperature",
"anthropic_api_url",
]
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"options": [
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
"claude-2.1",
"claude-2.0",
"claude-instant-1.2",
"claude-instant-1",
],
"info": "Name of the model to use.",
"required": True,
"value": "claude-3-opus-20240229",
},
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"required": True,
"password": True,
"info": "Your Anthropic API key.",
},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.1,
},
"anthropic_api_url": {
"display_name": "Anthropic API URL",
"advanced": True,
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
}
def build(
self,
model: str,
anthropic_api_key: Optional[str] = None,
max_tokens: Optional[int] = 1000,
temperature: Optional[float] = None,
anthropic_api_url: Optional[str] = None,
) -> LanguageModel:
# Set default API endpoint if not provided
if not anthropic_api_url:
anthropic_api_url = "https://api.anthropic.com"
try:
output = ChatAnthropic(
model_name=model,
anthropic_api_key=(SecretStr(anthropic_api_key) if anthropic_api_key else None),
max_tokens_to_sample=max_tokens, # type: ignore
temperature=temperature,
anthropic_api_url=anthropic_api_url,
)
except Exception as e:
raise ValueError("Could not connect to Anthropic API.") from e
return output

View file

@ -2,7 +2,7 @@ from langchain_aws import ChatBedrock
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel, Text
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput, Output, StrInput

View file

@ -3,7 +3,7 @@ from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel, Text
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput

View file

@ -2,7 +2,7 @@ from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel, Text
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput

View file

@ -47,7 +47,7 @@ class RecursiveCharacterTextSplitterComponent(Component):
Split text into chunks of a specified length.
Args:
separators (list[str]): The characters to split on.
separators (list[str] | None): The characters to split on.
chunk_size (int): The maximum length of each chunk.
chunk_overlap (int): The amount of overlap between chunks.
@ -63,9 +63,9 @@ class RecursiveCharacterTextSplitterComponent(Component):
self.separators = [unescape_string(x) for x in self.separators]
# Make sure chunk_size and chunk_overlap are ints
if isinstance(self.chunk_size, str):
if self.chunk_size:
self.chunk_size = int(self.chunk_size)
if isinstance(self.chunk_overlap, str):
if self.chunk_overlap:
self.chunk_overlap = int(self.chunk_overlap)
splitter = RecursiveCharacterTextSplitter(
separators=self.separators,

View file

@ -7,7 +7,6 @@ from langflow.custom import Component
from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, DropdownInput, HandleInput, IntInput, Output, SecretStrInput, StrInput
from langflow.schema import Data
from langflow.field_typing import Retriever
class CassandraVectorStoreComponent(Component):

View file

@ -1,5 +1,5 @@
from copy import deepcopy
from typing import TYPE_CHECKING, List
from typing import TYPE_CHECKING
from chromadb.config import Settings
from langchain_chroma.vectorstores import Chroma

View file

@ -8,7 +8,6 @@ from langflow.custom import Component
from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, HandleInput, IntInput, Output, SecretStrInput, StrInput
from langflow.schema import Data
from langflow.field_typing import Retriever
class CouchbaseVectorStoreComponent(Component):

View file

@ -8,8 +8,6 @@ from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, HandleInput, IntInput, Output, StrInput
from langflow.schema import Data
from langflow.field_typing import Retriever
class MongoVectorStoreComponent(Component):
display_name = "MongoDB Atlas"

View file

@ -4,7 +4,6 @@ from langchain_community.vectorstores import Qdrant
from langchain_core.retrievers import BaseRetriever
from langflow.custom import Component
from langflow.field_typing import Retriever
from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, DropdownInput, HandleInput, IntInput, Output, SecretStrInput, StrInput
from langflow.schema import Data

View file

@ -5,7 +5,6 @@ from langchain_core.retrievers import BaseRetriever
from supabase.client import Client, create_client
from langflow.custom import Component
from langflow.field_typing import Retriever
from langflow.helpers.data import docs_to_data
from langflow.io import HandleInput, IntInput, Output, StrInput
from langflow.schema import Data

View file

@ -4,7 +4,6 @@ from langchain_community.vectorstores import UpstashVectorStore
from langchain_core.retrievers import BaseRetriever
from langflow.custom import Component
from langflow.field_typing import Retriever
from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, HandleInput, IntInput, Output, StrInput
from langflow.schema import Data

View file

@ -5,7 +5,6 @@ from langchain_community.vectorstores import Weaviate
from langchain_core.retrievers import BaseRetriever
from langflow.custom import Component
from langflow.field_typing import Retriever
from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, HandleInput, IntInput, Output, SecretStrInput, StrInput
from langflow.schema import Data

View file

@ -8,8 +8,6 @@ from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, HandleInput, IntInput, Output, StrInput
from langflow.schema import Data
from langflow.field_typing import Retriever
class PGVectorStoreComponent(Component):
display_name = "PGVector"

View file

@ -6,7 +6,6 @@ from itertools import chain
from typing import TYPE_CHECKING, Dict, Generator, List, Optional, Tuple, Type, Union
from loguru import logger
from langflow.exceptions.component import ComponentBuildException
from langflow.exceptions.component import ComponentBuildException
from langflow.graph.edge.base import ContractEdge
from langflow.graph.graph.constants import lazy_load_vertex_dict

View file

@ -11,7 +11,6 @@ class Template(BaseModel):
type_name: str = Field(serialization_alias="_type")
fields: list[Union[Input, InputTypes]]
def process_fields(
self,
format_field_func: Union[Callable, None] = None,