chore: Remove deprecated vector search components

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-06-22 13:36:54 -03:00
commit 0999ce5ae5
15 changed files with 0 additions and 1172 deletions

View file

@ -1,148 +0,0 @@
from typing import List, Optional
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.AstraDB import AstraVectorStoreComponent
from langflow.field_typing import Embeddings, Text
from langflow.schema import Data
class AstraDBSearchComponent(LCVectorStoreComponent):
display_name = "Astra DB Search"
description = "Searches an existing Astra DB Vector Store."
icon = "AstraDB"
field_order = ["token", "api_endpoint", "collection_name", "input_value", "embedding"]
def build_config(self):
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {
"display_name": "Input Value",
"info": "Input value to search",
},
"embedding": {"display_name": "Embedding", "info": "Embedding to use"},
"collection_name": {
"display_name": "Collection Name",
"info": "The name of the collection within Astra DB where the vectors will be stored.",
},
"token": {
"display_name": "Astra DB Application Token",
"info": "Authentication token for accessing Astra DB.",
"password": True,
},
"api_endpoint": {
"display_name": "API Endpoint",
"info": "API endpoint URL for the Astra DB service.",
},
"namespace": {
"display_name": "Namespace",
"info": "Optional namespace within Astra DB to use for the collection.",
"advanced": True,
},
"metric": {
"display_name": "Metric",
"info": "Optional distance metric for vector comparisons in the vector store.",
"advanced": True,
},
"batch_size": {
"display_name": "Batch Size",
"info": "Optional number of data to process in a single batch.",
"advanced": True,
},
"bulk_insert_batch_concurrency": {
"display_name": "Bulk Insert Batch Concurrency",
"info": "Optional concurrency level for bulk insert operations.",
"advanced": True,
},
"bulk_insert_overwrite_concurrency": {
"display_name": "Bulk Insert Overwrite Concurrency",
"info": "Optional concurrency level for bulk insert operations that overwrite existing data.",
"advanced": True,
},
"bulk_delete_concurrency": {
"display_name": "Bulk Delete Concurrency",
"info": "Optional concurrency level for bulk delete operations.",
"advanced": True,
},
"setup_mode": {
"display_name": "Setup Mode",
"info": "Configuration mode for setting up the vector store, with options like “Sync”, “Async”, or “Off”.",
"options": ["Sync", "Async", "Off"],
"advanced": True,
},
"pre_delete_collection": {
"display_name": "Pre Delete Collection",
"info": "Boolean flag to determine whether to delete the collection before creating a new one.",
"advanced": True,
},
"metadata_indexing_include": {
"display_name": "Metadata Indexing Include",
"info": "Optional list of metadata fields to include in the indexing.",
"advanced": True,
},
"metadata_indexing_exclude": {
"display_name": "Metadata Indexing Exclude",
"info": "Optional list of metadata fields to exclude from the indexing.",
"advanced": True,
},
"collection_indexing_policy": {
"display_name": "Collection Indexing Policy",
"info": "Optional dictionary defining the indexing policy for the collection.",
"advanced": True,
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build(
self,
embedding: Embeddings,
collection_name: str,
input_value: Text,
token: str,
api_endpoint: str,
search_type: str = "Similarity",
number_of_results: int = 4,
namespace: Optional[str] = None,
metric: Optional[str] = None,
batch_size: Optional[int] = None,
bulk_insert_batch_concurrency: Optional[int] = None,
bulk_insert_overwrite_concurrency: Optional[int] = None,
bulk_delete_concurrency: Optional[int] = None,
setup_mode: str = "Sync",
pre_delete_collection: bool = False,
metadata_indexing_include: Optional[List[str]] = None,
metadata_indexing_exclude: Optional[List[str]] = None,
collection_indexing_policy: Optional[dict] = None,
) -> List[Data]:
vector_store = AstraVectorStoreComponent().build(
embedding=embedding,
collection_name=collection_name,
token=token,
api_endpoint=api_endpoint,
namespace=namespace,
metric=metric,
batch_size=batch_size,
bulk_insert_batch_concurrency=bulk_insert_batch_concurrency,
bulk_insert_overwrite_concurrency=bulk_insert_overwrite_concurrency,
bulk_delete_concurrency=bulk_delete_concurrency,
setup_mode=setup_mode,
pre_delete_collection=pre_delete_collection,
metadata_indexing_include=metadata_indexing_include,
metadata_indexing_exclude=metadata_indexing_exclude,
collection_indexing_policy=collection_indexing_policy,
)
try:
return self.search_with_vector_store(input_value, search_type, vector_store, k=number_of_results)
except KeyError as e:
if "content" in str(e):
raise ValueError(
"You should ingest data through Langflow (or LangChain) to query it in Langflow. Your collection does not contain a field name 'content'."
)
else:
raise e

View file

@ -1,95 +0,0 @@
from typing import Any, List, Optional, Tuple
from langchain_community.utilities.cassandra import SetupMode
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.Cassandra import CassandraVectorStoreComponent
from langflow.field_typing import Embeddings, Text
from langflow.schema import Data
class CassandraSearchComponent(LCVectorStoreComponent):
display_name = "Cassandra Search"
description = "Searches an existing Cassandra Vector Store."
icon = "Cassandra"
field_order = ["token", "database_id", "table_name", "input_value", "embedding"]
def build_config(self):
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {
"display_name": "Input Value",
"info": "Input value to search",
},
"embedding": {"display_name": "Embedding", "info": "Embedding to use"},
"token": {
"display_name": "Token",
"info": "Authentication token for accessing Cassandra on Astra DB.",
"password": True,
},
"database_id": {
"display_name": "Database ID",
"info": "The Astra database ID.",
},
"table_name": {
"display_name": "Table Name",
"info": "The name of the table where vectors will be stored.",
},
"keyspace": {
"display_name": "Keyspace",
"info": "Optional key space within Astra DB. The keyspace should already be created.",
"advanced": True,
},
"body_index_options": {
"display_name": "Body Index Options",
"info": "Optional options used to create the body index.",
"advanced": True,
},
"setup_mode": {
"display_name": "Setup Mode",
"info": "Configuration mode for setting up the Cassandra table, with options like 'Sync', 'Async', or 'Off'.",
"options": ["Sync", "Async", "Off"],
"advanced": True,
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build(
self,
embedding: Embeddings,
table_name: str,
input_value: Text,
token: str,
database_id: str,
search_type: str = "similarity",
number_of_results: int = 4,
keyspace: Optional[str] = None,
body_index_options: Optional[List[Tuple[str, Any]]] = None,
setup_mode: SetupMode = SetupMode.SYNC,
) -> List[Data]:
vector_store = CassandraVectorStoreComponent().build(
embedding=embedding,
table_name=table_name,
token=token,
database_id=database_id,
keyspace=keyspace,
body_index_options=body_index_options,
setup_mode=setup_mode,
)
try:
return self.search_with_vector_store(input_value, search_type, vector_store, k=number_of_results)
except KeyError as e:
if "content" in str(e):
raise ValueError(
"You should ingest data through Langflow (or LangChain) to query it in Langflow. Your collection does not contain a field name 'content'."
)
else:
raise e

View file

@ -1,116 +0,0 @@
from typing import List, Optional
import chromadb
from chromadb.config import Settings
from langchain_chroma import Chroma
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.field_typing import Embeddings, Text
from langflow.schema import Data
class ChromaSearchComponent(LCVectorStoreComponent):
display_name: str = "Chroma Search"
description: str = "Search a Chroma collection for similar documents."
icon = "Chroma"
def build_config(self):
"""
Builds the configuration for the component.
Returns:
- dict: A dictionary containing the configuration options for the component.
"""
return {
"input_value": {"display_name": "Input"},
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"collection_name": {"display_name": "Collection Name", "value": "langflow"},
# "persist": {"display_name": "Persist"},
"index_directory": {"display_name": "Index Directory"},
"code": {"show": False, "display_name": "Code"},
"embedding": {
"display_name": "Embedding",
"info": "Embedding model to vectorize inputs (make sure to use same as index)",
},
"chroma_server_cors_allow_origins": {
"display_name": "Server CORS Allow Origins",
"advanced": True,
},
"chroma_server_host": {"display_name": "Server Host", "advanced": True},
"chroma_server_http_port": {"display_name": "Server HTTP Port", "advanced": True},
"chroma_server_grpc_port": {
"display_name": "Server gRPC Port",
"advanced": True,
},
"chroma_server_ssl_enabled": {
"display_name": "Server SSL Enabled",
"advanced": True,
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build(
self,
input_value: Text,
search_type: str,
collection_name: str,
embedding: Embeddings,
chroma_server_ssl_enabled: bool,
number_of_results: int = 4,
index_directory: Optional[str] = None,
chroma_server_cors_allow_origins: List[str] = [],
chroma_server_host: Optional[str] = None,
chroma_server_http_port: Optional[int] = None,
chroma_server_grpc_port: Optional[int] = None,
) -> List[Data]:
"""
Builds the Vector Store or BaseRetriever object.
Args:
- input_value (Text): The input value.
- search_type (str): The type of search.
- collection_name (str): The name of the collection.
- embedding (Embeddings): The embeddings to use for the Vector Store.
- chroma_server_ssl_enabled (bool): Whether to enable SSL for the Chroma server.
- number_of_results (int, optional): The number of results to retrieve. Defaults to 4.
- index_directory (str, optional): The directory to persist the Vector Store to. Defaults to None.
- chroma_server_cors_allow_origins (List[str], optional): The CORS allow origins for the Chroma server. Defaults to [].
- chroma_server_host (str, optional): The host for the Chroma server. Defaults to None.
- chroma_server_http_port (int, optional): The HTTP port for the Chroma server. Defaults to None.
- chroma_server_grpc_port (int, optional): The gRPC port for the Chroma server. Defaults to None.
Returns:
- List[Data]: The list of data.
"""
# Chroma settings
chroma_settings = None
client = None
if chroma_server_host is not None:
chroma_settings = Settings(
chroma_server_cors_allow_origins=chroma_server_cors_allow_origins or [],
chroma_server_host=chroma_server_host,
chroma_server_http_port=chroma_server_http_port or None,
chroma_server_grpc_port=chroma_server_grpc_port or None,
chroma_server_ssl_enabled=chroma_server_ssl_enabled,
)
client = chromadb.HttpClient(settings=chroma_settings)
if index_directory:
index_directory = self.resolve_path(index_directory)
vector_store = Chroma(
embedding_function=embedding,
collection_name=collection_name,
persist_directory=index_directory or None,
client=client,
)
return self.search_with_vector_store(input_value, search_type, vector_store, k=number_of_results)

View file

@ -1,69 +0,0 @@
from typing import List
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.Couchbase import CouchbaseVectorStoreComponent
from langflow.field_typing import Embeddings, Text
from langflow.schema import Data
class CouchbaseSearchComponent(LCVectorStoreComponent):
display_name = "Couchbase Search"
description = "Search a Couchbase Vector Store for similar documents."
documentation = "https://python.langchain.com/docs/integrations/vectorstores/couchbase"
icon = "Couchbase"
field_order = [
"couchbase_connection_string",
"couchbase_username",
"couchbase_password",
"bucket_name",
"scope_name",
"collection_name",
"index_name",
]
def build_config(self):
return {
"input_value": {"display_name": "Input"},
"embedding": {"display_name": "Embedding"},
"couchbase_connection_string": {"display_name": "Couchbase Cluster connection string", "required": True},
"couchbase_username": {"display_name": "Couchbase username", "required": True},
"couchbase_password": {"display_name": "Couchbase password", "password": True, "required": True},
"bucket_name": {"display_name": "Bucket Name", "required": True},
"scope_name": {"display_name": "Scope Name", "required": True},
"collection_name": {"display_name": "Collection Name", "required": True},
"index_name": {"display_name": "Index Name", "required": True},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
embedding: Embeddings,
number_of_results: int = 4,
bucket_name: str = "",
scope_name: str = "",
collection_name: str = "",
index_name: str = "",
couchbase_connection_string: str = "",
couchbase_username: str = "",
couchbase_password: str = "",
) -> List[Data]:
vector_store = CouchbaseVectorStoreComponent().build(
couchbase_connection_string=couchbase_connection_string,
couchbase_username=couchbase_username,
couchbase_password=couchbase_password,
bucket_name=bucket_name,
scope_name=scope_name,
collection_name=collection_name,
embedding=embedding,
index_name=index_name,
)
if not vector_store:
raise ValueError("Failed to create Couchbase Vector Store")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type="similarity", k=number_of_results
)

View file

@ -1,48 +0,0 @@
from typing import List
from langchain_community.vectorstores.faiss import FAISS
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.field_typing import Embeddings, Text
from langflow.schema import Data
class FAISSSearchComponent(LCVectorStoreComponent):
display_name = "FAISS Search"
description = "Search a FAISS Vector Store for similar documents."
documentation = "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
def build_config(self):
return {
"embedding": {"display_name": "Embedding"},
"folder_path": {
"display_name": "Folder Path",
"info": "Path to save the FAISS index. It will be relative to where Langflow is running.",
},
"input_value": {"display_name": "Input"},
"index_name": {"display_name": "Index Name"},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build(
self,
input_value: Text,
embedding: Embeddings,
folder_path: str,
number_of_results: int = 4,
index_name: str = "langflow_index",
) -> List[Data]:
if not folder_path:
raise ValueError("Folder path is required to save the FAISS index.")
path = self.resolve_path(folder_path)
vector_store = FAISS.load_local(folder_path=Text(path), embeddings=embedding, index_name=index_name)
if not vector_store:
raise ValueError("Failed to load the FAISS index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type="similarity", k=number_of_results
)

View file

@ -1,57 +0,0 @@
from typing import List, Optional
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.MongoDBAtlasVector import MongoVectorStoreComponent
from langflow.field_typing import Embeddings, NestedDict, Text
from langflow.schema import Data
class MongoDBAtlasSearchComponent(LCVectorStoreComponent):
display_name = "MongoDB Atlas Search"
description = "Search a MongoDB Atlas Vector Store for similar documents."
def build_config(self):
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"embedding": {"display_name": "Embedding"},
"collection_name": {"display_name": "Collection Name"},
"db_name": {"display_name": "Database Name"},
"index_name": {"display_name": "Index Name"},
"mongodb_atlas_cluster_uri": {"display_name": "MongoDB Atlas Cluster URI"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
search_type: str,
embedding: Embeddings,
number_of_results: int = 4,
collection_name: str = "",
db_name: str = "",
index_name: str = "",
mongodb_atlas_cluster_uri: str = "",
search_kwargs: Optional[NestedDict] = None,
) -> List[Data]:
search_kwargs = search_kwargs or {}
vector_store = MongoVectorStoreComponent().build(
mongodb_atlas_cluster_uri=mongodb_atlas_cluster_uri,
collection_name=collection_name,
db_name=db_name,
embedding=embedding,
index_name=index_name,
)
if not vector_store:
raise ValueError("Failed to create MongoDB Atlas Vector Store")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type, k=number_of_results
)

View file

@ -1,95 +0,0 @@
from typing import List, Optional
from langchain_pinecone._utilities import DistanceStrategy
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.Pinecone import PineconeVectorStoreComponent
from langflow.field_typing import Embeddings, Text
from langflow.field_typing.constants import NestedDict
from langflow.schema import Data
class PineconeSearchComponent(PineconeVectorStoreComponent, LCVectorStoreComponent):
display_name = "Pinecone Search"
description = "Search a Pinecone Vector Store for similar documents."
icon = "Pinecone"
field_order = ["index_name", "namespace", "distance_strategy", "pinecone_api_key", "input_value", "embedding"]
def build_config(self):
distance_options = [e.value.title().replace("_", " ") for e in DistanceStrategy]
distance_value = distance_options[0]
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"embedding": {"display_name": "Embedding"},
"index_name": {"display_name": "Index Name"},
"namespace": {"display_name": "Namespace", "info": "Namespace for the index."},
"distance_strategy": {
"display_name": "Distance Strategy",
# get values from enum
# and make them title case for display
"options": distance_options,
"advanced": True,
"value": distance_value,
},
"pinecone_api_key": {
"display_name": "Pinecone API Key",
"default": "",
"password": True,
},
"pool_threads": {
"display_name": "Pool Threads",
"default": 1,
"advanced": True,
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
"text_key": {
"display_name": "Text Key",
"info": "Key in the record to use as text.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
embedding: Embeddings,
distance_strategy: str,
text_key: str = "text",
number_of_results: int = 4,
pool_threads: int = 4,
index_name: Optional[str] = None,
pinecone_api_key: Optional[str] = None,
namespace: Optional[str] = "default",
search_type: str = "similarity",
search_kwargs: Optional[NestedDict] = None,
) -> List[Data]: # type: ignore[override]
vector_store = super().build(
embedding=embedding,
distance_strategy=distance_strategy,
inputs=[],
text_key=text_key,
pool_threads=pool_threads,
index_name=index_name,
pinecone_api_key=pinecone_api_key,
namespace=namespace,
)
if not vector_store:
raise ValueError("Failed to load the Pinecone index.")
if search_kwargs is None:
search_kwargs = {}
return self.search_with_vector_store(
vector_store=vector_store,
input_value=input_value,
search_type=search_type,
k=number_of_results,
**search_kwargs,
)

View file

@ -1,103 +0,0 @@
from typing import List, Optional
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.Qdrant import QdrantVectorStoreComponent
from langflow.field_typing import Embeddings, NestedDict, Text
from langflow.schema import Data
class QdrantSearchComponent(QdrantVectorStoreComponent, LCVectorStoreComponent):
display_name = "Qdrant Search"
description = "Construct Qdrant wrapper from a list of texts."
icon = "Qdrant"
def build_config(self):
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"embedding": {"display_name": "Embedding"},
"api_key": {"display_name": "API Key", "password": True, "advanced": True},
"collection_name": {"display_name": "Collection Name"},
"content_payload_key": {
"display_name": "Content Payload Key",
"advanced": True,
},
"distance_func": {"display_name": "Distance Function", "advanced": True},
"grpc_port": {"display_name": "gRPC Port", "advanced": True},
"host": {"display_name": "Host", "advanced": True},
"https": {"display_name": "HTTPS", "advanced": True},
"location": {"display_name": "Location", "advanced": True},
"metadata_payload_key": {
"display_name": "Metadata Payload Key",
"advanced": True,
},
"path": {"display_name": "Path", "advanced": True},
"port": {"display_name": "Port", "advanced": True},
"prefer_grpc": {"display_name": "Prefer gRPC", "advanced": True},
"prefix": {"display_name": "Prefix", "advanced": True},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"timeout": {"display_name": "Timeout", "advanced": True},
"url": {"display_name": "URL", "advanced": True},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
embedding: Embeddings,
collection_name: str,
number_of_results: int = 4,
search_type: str = "similarity",
api_key: Optional[str] = None,
content_payload_key: str = "page_content",
distance_func: str = "Cosine",
grpc_port: int = 6334,
https: bool = False,
host: Optional[str] = None,
location: Optional[str] = None,
metadata_payload_key: str = "metadata",
path: Optional[str] = None,
port: Optional[int] = 6333,
prefer_grpc: bool = False,
prefix: Optional[str] = None,
search_kwargs: Optional[NestedDict] = None,
timeout: Optional[int] = None,
url: Optional[str] = None,
) -> List[Data]: # type: ignore[override]
vector_store = super().build(
embedding=embedding,
collection_name=collection_name,
api_key=api_key,
content_payload_key=content_payload_key,
distance_func=distance_func,
grpc_port=grpc_port,
https=https,
host=host,
location=location,
metadata_payload_key=metadata_payload_key,
path=path,
port=port,
prefer_grpc=prefer_grpc,
prefix=prefix,
timeout=timeout,
url=url,
)
if not vector_store:
raise ValueError("Failed to load the Qdrant index.")
if search_kwargs is None:
search_kwargs = {}
return self.search_with_vector_store(
vector_store=vector_store,
input_value=input_value,
search_type=search_type,
k=number_of_results,
**search_kwargs,
)

View file

@ -1,82 +0,0 @@
from typing import List, Optional
from langchain_core.embeddings import Embeddings
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.Redis import RedisVectorStoreComponent
from langflow.field_typing import Text
from langflow.schema import Data
class RedisSearchComponent(RedisVectorStoreComponent, LCVectorStoreComponent):
"""
A custom component for implementing a Vector Store using Redis.
"""
display_name: str = "Redis Search"
description: str = "Search a Redis Vector Store for similar documents."
documentation = "https://python.langchain.com/docs/integrations/vectorstores/redis"
def build_config(self):
"""
Builds the configuration for the component.
Returns:
- dict: A dictionary containing the configuration options for the component.
"""
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"index_name": {"display_name": "Index Name", "value": "your_index"},
"code": {"show": False, "display_name": "Code"},
"embedding": {"display_name": "Embedding"},
"schema": {"display_name": "Schema", "file_types": [".yaml"]},
"redis_server_url": {
"display_name": "Redis Server Connection String",
"advanced": False,
},
"redis_index_name": {"display_name": "Redis Index", "advanced": False},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
search_type: str,
embedding: Embeddings,
redis_server_url: str,
redis_index_name: str,
number_of_results: int = 4,
schema: Optional[str] = None,
) -> List[Data]:
"""
Builds the Vector Store or BaseRetriever object.
Args:
- embedding (Embeddings): The embeddings to use for the Vector Store.
- documents (Optional[Document]): The documents to use for the Vector Store.
- redis_index_name (str): The name of the Redis index.
- redis_server_url (str): The URL for the Redis server.
Returns:
- VectorStore: The Vector Store object.
"""
vector_store = super().build(
embedding=embedding,
redis_server_url=redis_server_url,
redis_index_name=redis_index_name,
schema=schema,
)
if not vector_store:
raise ValueError("Failed to load the Redis index.")
return self.search_with_vector_store(
input_value=input_value, search_type=search_type, vector_store=vector_store, k=number_of_results
)

View file

@ -1,54 +0,0 @@
from typing import List
from langchain_community.vectorstores.supabase import SupabaseVectorStore
from supabase.client import Client, create_client
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.field_typing import Embeddings, Text
from langflow.schema import Data
class SupabaseSearchComponent(LCVectorStoreComponent):
display_name = "Supabase Search"
description = "Search a Supabase Vector Store for similar documents."
icon = "Supabase"
def build_config(self):
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"embedding": {"display_name": "Embedding"},
"query_name": {"display_name": "Query Name"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"supabase_service_key": {"display_name": "Supabase Service Key"},
"supabase_url": {"display_name": "Supabase URL"},
"table_name": {"display_name": "Table Name", "advanced": True},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build(
self,
input_value: Text,
search_type: str,
embedding: Embeddings,
number_of_results: int = 4,
query_name: str = "",
supabase_service_key: str = "",
supabase_url: str = "",
table_name: str = "",
) -> List[Data]:
supabase: Client = create_client(supabase_url, supabase_key=supabase_service_key)
vector_store = SupabaseVectorStore(
client=supabase,
embedding=embedding,
table_name=table_name,
query_name=query_name,
)
return self.search_with_vector_store(input_value, search_type, vector_store, k=number_of_results)

View file

@ -1,79 +0,0 @@
from typing import List, Optional
from langchain_core.embeddings import Embeddings
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.Upstash import UpstashVectorStoreComponent
from langflow.field_typing import Text
from langflow.schema import Data
class UpstashSearchComponent(UpstashVectorStoreComponent, LCVectorStoreComponent):
"""
A custom component for implementing a Vector Store using Upstash.
"""
display_name: str = "Upstash Search"
description: str = "Search an Upstash Vector Store for similar documents."
def build_config(self):
"""
Builds the configuration for the component.
Returns:
- dict: A dictionary containing the configuration options for the component.
"""
return {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"inputs": {"display_name": "Input", "input_types": ["Document", "Data"]},
"embedding": {
"display_name": "Embedding",
"input_types": ["Embeddings"],
"info": "To use Upstash's embeddings, don't provide an embedding.",
},
"index_url": {
"display_name": "Index URL",
"info": "The URL of the Upstash index.",
},
"index_token": {
"display_name": "Index Token",
"info": "The token for the Upstash index.",
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
"text_key": {
"display_name": "Text Key",
"info": "The key in the record to use as text.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
search_type: str,
text_key: str = "text",
index_url: Optional[str] = None,
index_token: Optional[str] = None,
embedding: Optional[Embeddings] = None,
number_of_results: int = 4,
) -> List[Data]:
vector_store = super().build(
embedding=embedding,
text_key=text_key,
index_url=index_url,
index_token=index_token,
)
if not vector_store:
raise ValueError("Failed to load the Upstash Vector Store.")
return self.search_with_vector_store(
input_value=input_value, search_type=search_type, vector_store=vector_store, k=number_of_results
)

View file

@ -1,66 +0,0 @@
from typing import List
from langchain_community.vectorstores.vectara import Vectara
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.Vectara import VectaraVectorStoreComponent
from langflow.field_typing import Text
from langflow.schema import Data
class VectaraSearchComponent(VectaraVectorStoreComponent, LCVectorStoreComponent):
display_name: str = "Vectara Search"
description: str = "Search a Vectara Vector Store for similar documents."
documentation = "https://python.langchain.com/docs/integrations/vectorstores/vectara"
icon = "Vectara"
field_config = {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"vectara_customer_id": {
"display_name": "Vectara Customer ID",
},
"vectara_corpus_id": {
"display_name": "Vectara Corpus ID",
},
"vectara_api_key": {
"display_name": "Vectara API Key",
"password": True,
},
"files_url": {
"display_name": "Files Url",
"info": "Make vectara object using url of files (optional)",
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
search_type: str,
vectara_customer_id: str,
vectara_corpus_id: str,
vectara_api_key: str,
number_of_results: int = 4,
) -> List[Data]:
source = "Langflow"
vector_store = Vectara(
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key,
source=source,
)
if not vector_store:
raise ValueError("Failed to create Vectara Vector Store")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type, k=number_of_results
)

View file

@ -1,86 +0,0 @@
from typing import List, Optional
from langchain_core.embeddings import Embeddings
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.Weaviate import WeaviateVectorStoreComponent
from langflow.field_typing import Text
from langflow.schema import Data
class WeaviateSearchVectorStore(WeaviateVectorStoreComponent, LCVectorStoreComponent):
display_name: str = "Weaviate Search"
description: str = "Search a Weaviate Vector Store for similar documents."
documentation = "https://python.langchain.com/docs/integrations/vectorstores/weaviate"
icon = "Weaviate"
field_config = {
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"input_value": {"display_name": "Input"},
"url": {"display_name": "Weaviate URL", "value": "http://localhost:8080"},
"api_key": {
"display_name": "API Key",
"password": True,
"required": False,
},
"index_name": {
"display_name": "Index name",
"required": False,
},
"text_key": {
"display_name": "Text Key",
"required": False,
"advanced": True,
"value": "text",
},
"embedding": {"display_name": "Embedding"},
"attributes": {
"display_name": "Attributes",
"required": False,
"is_list": True,
"field_type": "str",
"advanced": True,
},
"search_by_text": {
"display_name": "Search By Text",
"field_type": "bool",
"advanced": True,
},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
search_type: str,
url: str,
index_name: str,
number_of_results: int = 4,
search_by_text: bool = False,
api_key: Optional[str] = None,
text_key: str = "text",
embedding: Optional[Embeddings] = None,
attributes: Optional[list] = None,
) -> List[Data]:
vector_store = super().build(
url=url,
api_key=api_key,
index_name=index_name,
text_key=text_key,
embedding=embedding,
attributes=attributes,
search_by_text=search_by_text,
)
if not vector_store:
raise ValueError("Failed to load the Weaviate index.")
return self.search_with_vector_store(
vector_store=vector_store, input_value=input_value, search_type=search_type, k=number_of_results
)

View file

@ -1,74 +0,0 @@
from typing import List
from langchain_core.embeddings import Embeddings
from langflow.base.vectorstores.model import LCVectorStoreComponent
from langflow.components.vectorstores.pgvector import PGVectorStoreComponent
from langflow.field_typing import Text
from langflow.schema import Data
class PGVectorSearchComponent(PGVectorStoreComponent, LCVectorStoreComponent):
display_name: str = "PGVector Search"
description: str = "Search a PGVector Store for similar documents."
documentation = "https://python.langchain.com/docs/integrations/vectorstores/pgvector"
def build_config(self):
"""
Builds the configuration for the component.
Returns:
- dict: A dictionary containing the configuration options for the component.
"""
return {
"code": {"show": False},
"embedding": {"display_name": "Embedding"},
"search_type": {
"display_name": "Search Type",
"options": ["Similarity", "MMR"],
},
"pg_server_url": {
"display_name": "PostgreSQL Server Connection String",
"advanced": False,
},
"collection_name": {"display_name": "Table", "advanced": False},
"input_value": {"display_name": "Input"},
"number_of_results": {
"display_name": "Number of Results",
"info": "Number of results to return.",
"advanced": True,
},
}
def build( # type: ignore[override]
self,
input_value: Text,
embedding: Embeddings,
search_type: str,
pg_server_url: str,
collection_name: str,
number_of_results: int = 4,
) -> List[Data]:
"""
Builds the Vector Store or BaseRetriever object.
Args:
- input_value (str): The input value to search for.
- embedding (Embeddings): The embeddings to use for the Vector Store.
- collection_name (str): The name of the PG table.
- pg_server_url (str): The URL for the PG server.
Returns:
- VectorStore: The Vector Store object.
"""
try:
vector_store = super().build(
embedding=embedding,
pg_server_url=pg_server_url,
collection_name=collection_name,
)
except Exception as e:
raise RuntimeError(f"Failed to build PGVector: {e}")
return self.search_with_vector_store(
input_value=input_value, search_type=search_type, vector_store=vector_store, k=number_of_results
)