Add broken components

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-01-09 00:30:23 -03:00
commit 4852b845b4
66 changed files with 2529 additions and 0 deletions

View file

@ -0,0 +1,22 @@
from langflow import CustomComponent
from langchain.field_typing import BaseLanguageModel, AgentExecutor
class CSVAgentComponent(CustomComponent):
display_name = "CSVAgent"
description = "Construct a CSV agent from a CSV and tools."
documentation = "https://python.langchain.com/docs/modules/agents/toolkits/csv"
def build_config(self):
return {
"llm": {"display_name": "LLM", "type": BaseLanguageModel},
"path": {"display_name": "Path", "type": "file", "suffixes": [".csv"], "file_types": ["csv"]},
}
def build(
self,
llm: BaseLanguageModel,
path: str,
) -> AgentExecutor:
# Instantiate and return the CSV agent class with the provided llm and path
return AgentExecutor(llm=llm, path=path)

View file

@ -0,0 +1,25 @@
from langflow import CustomComponent
from langchain.agents import AgentExecutor
from typing import Callable
from langflow.field_typing import (
BaseLanguageModel,
BaseToolkit,
)
class JsonAgentComponent(CustomComponent):
display_name = "JsonAgent"
description = "Construct a json agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"toolkit": {"display_name": "Toolkit"},
}
def build(
self,
llm: BaseLanguageModel,
toolkit: BaseToolkit,
) -> Callable:
return AgentExecutor(llm=llm, toolkit=toolkit)

View file

@ -0,0 +1,25 @@
from langflow import CustomComponent
from typing import Union, Callable
from langchain.agents import AgentExecutor
from langflow.field_typing import BaseLanguageModel
class SQLAgentComponent(CustomComponent):
display_name = "SQLAgent"
description = "Construct an SQL agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"database_uri": {"display_name": "Database URI"},
}
def build(
self,
llm: BaseLanguageModel,
database_uri: str,
) -> Union[AgentExecutor, Callable]:
# Assuming there is a constructor for SQLAgent that takes these parameters
# Since the actual implementation is not provided, this is a placeholder
# Replace SQLAgent with the actual class name if different
return SQLAgent(llm=llm, database_uri=database_uri)

View file

@ -0,0 +1,22 @@
from langflow import CustomComponent
from langchain.agents import AgentExecutor
from typing import Union, Callable
from langflow.field_typing import BaseLanguageModel, VectorStore
class VectorStoreAgentComponent(CustomComponent):
display_name = "VectorStoreAgent"
description = "Construct an agent from a Vector Store."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"vectorstoreinfo": {"display_name": "Vector Store Info"},
}
def build(
self,
llm: BaseLanguageModel,
vectorstoreinfo: VectorStore,
) -> Union[AgentExecutor, Callable]:
return AgentExecutor(llm=llm, vectorstore=vectorstoreinfo)

View file

@ -0,0 +1,23 @@
from langflow import CustomComponent
from langchain.llms import BaseLanguageModel
from langchain.vectorstores import VectorStoreRouterToolkit
from langchain.agents import AgentExecutor
from typing import Callable
class VectorStoreRouterAgentComponent(CustomComponent):
display_name = "VectorStoreRouterAgent"
description = "Construct an agent from a Vector Store Router."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"vectorstoreroutertoolkit": {"display_name": "Vector Store Router Toolkit"},
}
def build(
self,
llm: BaseLanguageModel,
vectorstoreroutertoolkit: VectorStoreRouterToolkit
) -> Callable:
return AgentExecutor(llm=llm, toolkit=vectorstoreroutertoolkit)

View file

@ -0,0 +1,29 @@
from langflow import CustomComponent
from langchain.agents import ZeroShotAgent
from typing import List, Optional
from langflow.field_typing import (
BaseLanguageModel,
BaseTool,
)
class ZeroShotAgentComponent(CustomComponent):
display_name = "ZeroShotAgent"
description = "Construct an agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"tools": {"display_name": "Tools"},
"prefix": {"display_name": "Prefix", "multiline": True},
"suffix": {"display_name": "Suffix", "multiline": True},
}
def build(
self,
llm: BaseLanguageModel,
tools: List[BaseTool],
prefix: Optional[str] = "Answer the following questions as best you can. You have access to the following tools:",
suffix: Optional[str] = "Begin!\n\nQuestion: {input}\nThought:{agent_scratchpad}",
) -> ZeroShotAgent:
return ZeroShotAgent(llm=llm, tools=tools, prefix=prefix, suffix=suffix)

View file

@ -0,0 +1,37 @@
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, Chain
from typing import Union, Callable
class CombineDocsChainComponent(CustomComponent):
display_name = "CombineDocsChain"
description = "Load question answering chain."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"chain_type": {
"display_name": "Chain Type",
"options": ['stuff', 'map_reduce', 'map_rerank', 'refine'],
},
}
def build(
self,
llm: BaseLanguageModel,
chain_type: str,
) -> Union[Chain, Callable]:
if chain_type not in ['stuff', 'map_reduce', 'map_rerank', 'refine']:
raise ValueError(f"Invalid chain_type: {chain_type}")
# Implement the logic to create and return the appropriate chain based on the chain_type
# This could be a placeholder for now, as the specific chain loading function is not defined.
# Replace with actual implementation when available.
return load_qa_chain(llm=llm, chain_type=chain_type)
# Assuming there is a function or class `load_qa_chain` that creates the chain
# based on the `chain_type` and `llm`. This is a placeholder for the actual
# implementation which should be replaced with the correct function/class call.
def load_qa_chain(llm: BaseLanguageModel, chain_type: str) -> Union[Chain, Callable]:
# Implement the logic to create and return the appropriate chain based on the chain_type
pass

View file

@ -0,0 +1,24 @@
from langflow import CustomComponent
from langchain.chains import LLMCheckerChain
from typing import Union, Callable
from langflow.field_typing import (
BaseLanguageModel,
Chain,
)
class LLMCheckerChainComponent(CustomComponent):
display_name = "LLMCheckerChain"
description = ""
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_checker"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
}
def build(
self,
llm: BaseLanguageModel,
) -> Union[Chain, Callable]:
return LLMCheckerChain(llm=llm)

View file

@ -0,0 +1,32 @@
from langflow import CustomComponent
from langchain.chains import LLMChain
from typing import Optional
from langflow.field_typing import (
BaseLanguageModel,
BaseMemory,
)
class LLMMathChainComponent(CustomComponent):
display_name = "LLMMathChain"
description = "Chain that interprets a prompt and executes python code to do math."
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_math"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"llm_chain": {"display_name": "LLM Chain"},
"memory": {"display_name": "Memory"},
"input_key": {"display_name": "Input Key"},
"output_key": {"display_name": "Output Key"},
}
def build(
self,
llm: BaseLanguageModel,
llm_chain: LLMChain,
input_key: str,
output_key: str,
memory: Optional[BaseMemory] = None,
) -> LLMChain:
return LLMChain(llm=llm, prompt=llm_chain, input_key=input_key, output_key=output_key, memory=memory)

View file

@ -0,0 +1,41 @@
from langflow import CustomComponent
from langchain.chains import BaseRetrievalQA
from typing import Optional, Union, Callable
from langflow.field_typing import (
BaseCombineDocumentsChain,
BaseMemory,
BaseRetriever,
)
class RetrievalQAComponent(CustomComponent):
display_name = "RetrievalQA"
description = "Chain for question-answering against an index."
def build_config(self):
return {
"combine_documents_chain": {"display_name": "Combine Documents Chain"},
"retriever": {"display_name": "Retriever"},
"memory": {"display_name": "Memory", "required": False},
"input_key": {"display_name": "Input Key"},
"output_key": {"display_name": "Output Key"},
"return_source_documents": {"display_name": "Return Source Documents"},
}
def build(
self,
combine_documents_chain: BaseCombineDocumentsChain,
retriever: BaseRetriever,
memory: Optional[BaseMemory] = None,
input_key: str = "query",
output_key: str = "result",
return_source_documents: bool = True,
) -> Union[BaseRetrievalQA, Callable]:
return BaseRetrievalQA(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
memory=memory,
input_key=input_key,
output_key=output_key,
return_source_documents=return_source_documents,
)

View file

@ -0,0 +1,35 @@
from langflow import CustomComponent
from langchain.chains import RetrievalQAWithSourcesChain
from typing import Optional
from langflow.field_typing import (
BaseMemory,
BaseRetriever,
Chain,
)
class RetrievalQAWithSourcesChainComponent(CustomComponent):
display_name = "RetrievalQAWithSourcesChain"
description = "Question-answering with sources over an index."
def build_config(self):
return {
"combine_documents_chain": {"display_name": "Combine Documents Chain"},
"retriever": {"display_name": "Retriever"},
"memory": {"display_name": "Memory", "optional": True},
"return_source_documents": {"display_name": "Return Source Documents", "default": True, "advanced": True},
}
def build(
self,
combine_documents_chain: Chain,
retriever: BaseRetriever,
memory: Optional[BaseMemory] = None,
return_source_documents: Optional[bool] = True,
) -> RetrievalQAWithSourcesChain:
return RetrievalQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
memory=memory,
return_source_documents=return_source_documents
)

View file

@ -0,0 +1,33 @@
from langflow import CustomComponent
from langchain.chains import Chain
from typing import Callable, Union
from langflow.field_typing import (
BasePromptTemplate,
BaseLanguageModel,
)
# Placeholder SQLDatabase class. In practice, replace this with the actual class or import it if available.
class SQLDatabase:
pass
class SQLDatabaseChainComponent(CustomComponent):
display_name = "SQLDatabaseChain"
description = ""
def build_config(self):
return {
"db": {"display_name": "Database"},
"llm": {"display_name": "LLM"},
"prompt": {"display_name": "Prompt"},
}
def build(
self,
db: SQLDatabase,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
) -> Union[Chain, Callable]:
# Assuming there's a specific chain for SQLDatabase in the langchain library:
# Replace `Chain` with the specific chain class that interfaces with the SQLDatabase.
return Chain(db=db, llm=llm, prompt=prompt)

View file

@ -0,0 +1,31 @@
from langflow import CustomComponent
from langchain.field_typing import Document
from typing import Optional, Dict
class AZLyricsLoaderComponent(CustomComponent):
display_name = "AZLyricsLoader"
description = "Load `AZLyrics` webpages."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/azlyrics"
def build_config(self):
return {
"metadata": {
"display_name": "Metadata",
"type": "dict",
"default": {},
"show": True
},
"web_path": {
"display_name": "Web Page",
"type": "str",
"required": True,
"show": True
},
}
def build(self, metadata: Optional[Dict] = None, web_path: str = "") -> Document:
# Assuming there is a class AZLyricsLoader that takes metadata and web_path as parameters
# and returns a Document object. Replace AZLyricsLoader with the actual class name if different.
# The import statement for AZLyricsLoader is assumed to be added above.
return AZLyricsLoader(metadata=metadata, web_path=web_path)

View file

@ -0,0 +1,31 @@
from langflow import CustomComponent
from langchain.field_typing import Document
from typing import Optional, Dict
class AirbyteJSONLoaderComponent(CustomComponent):
display_name = "AirbyteJSONLoader"
description = "Load local `Airbyte` json files."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/airbyte_json"
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"type": "file",
"fileTypes": ["json"],
"required": True,
},
"metadata": {
"display_name": "Metadata",
"type": "dict",
"required": False,
},
}
def build(self, file_path: str, metadata: Optional[Dict] = None) -> Document:
# Assuming there is a function or class named AirbyteJSONLoader that takes file_path and metadata as parameters
# and returns a Document object. Replace AirbyteJSONLoader with the actual class or function name.
# The actual implementation here is a placeholder and should be adapted to the real AirbyteJSONLoader class or function.
return AirbyteJSONLoader(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,33 @@
from langflow import CustomComponent
from langchain.field_typing import Document
from typing import Optional, Dict
class BSHTMLLoaderComponent(CustomComponent):
display_name = "BSHTMLLoader"
description = "Load `HTML` files and parse them with `beautiful soup`."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/how_to/html"
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"required": True,
"show": True,
"type": "file",
"suffixes": [".html"],
"file_types": ["html"],
},
"metadata": {
"display_name": "Metadata",
"required": False,
"show": True,
"type": "dict",
},
}
def build(self, file_path: str, metadata: Optional[Dict] = None) -> Document:
# Assuming there is a class or function named BSHTMLLoader that takes a file path and optional metadata
# and returns a Document object after parsing HTML. Since the actual implementation of BSHTMLLoader is not provided,
# this is a placeholder and should be replaced with the actual logic.
raise NotImplementedError("The BSHTMLLoader function or class needs to be implemented.")

View file

@ -0,0 +1,30 @@
from langchain import CustomComponent
from typing import Optional, Dict, List
from langchain.loaders import CSVLoader
from langchain.documents import Document
class CSVLoaderComponent(CustomComponent):
display_name = "CSVLoader"
description = "Load a `CSV` file into a list of Documents."
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"required": True,
"suffixes": [".csv"],
"file_types": ["csv"],
},
"metadata": {
"display_name": "Metadata",
"required": False,
},
}
def build(
self,
file_path: str,
metadata: Optional[Dict[str, str]] = None,
) -> List[Document]:
return CSVLoader(file_path=file_path, metadata=metadata).load()

View file

@ -0,0 +1,33 @@
from langflow import CustomComponent
from langchain.documents import Document
from typing import Optional, Dict
from langchain.field_typing import TemplateField
class CoNLLULoaderComponent(CustomComponent):
display_name = "CoNLLULoader"
description = "Load `CoNLL-U` files."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/conll-u"
def build_config(self):
return {
"file_path": TemplateField(
display_name="File Path",
required=True,
type="file",
file_types=["conllu"],
suffixes=['.conllu'],
),
"metadata": TemplateField(
display_name="Metadata",
required=False,
type="dict",
),
}
def build(self, file_path: str, metadata: Optional[Dict[str, str]] = None) -> Document:
# Here, you would use the actual class that loads CoNLL-U files.
# As I don't have the specific class, I'm returning an instance of Document.
# In a real scenario, you should replace the below Document with the actual loader class.
return Document(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,24 @@
from langflow import CustomComponent
from langchain.document_loaders import Document
from typing import Optional, Dict
class CollegeConfidentialLoaderComponent(CustomComponent):
display_name = "CollegeConfidentialLoader"
description = "Load `College Confidential` webpages."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/college_confidential"
def build_config(self):
return {
"metadata": {"display_name": "Metadata", "default": {}},
"web_path": {"display_name": "Web Page", "required": True},
}
def build(
self,
web_path: str,
metadata: Optional[Dict] = None,
) -> Document:
# Assuming there is a loader class `CollegeConfidentialLoader` that takes `metadata` and `web_path` as arguments
# Replace `CollegeConfidentialLoader` with the actual class name if different
return CollegeConfidentialLoader(web_path=web_path, metadata=metadata)

View file

@ -0,0 +1,42 @@
from langflow import CustomComponent
from langchain.data_connections import Document
from typing import Optional, Dict, Any
class DirectoryLoaderComponent(CustomComponent):
display_name = "DirectoryLoader"
description = "Load from a directory."
def build_config(self) -> Dict[str, Any]:
return {
"glob": {"display_name": "Glob Pattern", "default": "**/*.txt"},
"load_hidden": {"display_name": "Load Hidden Files", "default": False, "advanced": True},
"max_concurrency": {"display_name": "Max Concurrency", "default": 10, "advanced": True},
"metadata": {"display_name": "Metadata", "default": {}},
"path": {"display_name": "Local Directory"},
"recursive": {"display_name": "Recursive", "default": True, "advanced": True},
"silent_errors": {"display_name": "Silent Errors", "default": False, "advanced": True},
"use_multithreading": {"display_name": "Use Multithreading", "default": True, "advanced": True},
}
def build(
self,
glob: str,
path: str,
load_hidden: Optional[bool] = False,
max_concurrency: Optional[int] = 10,
metadata: Optional[Dict[str, Any]] = None,
recursive: Optional[bool] = True,
silent_errors: Optional[bool] = False,
use_multithreading: Optional[bool] = True,
) -> Document:
return Document(
glob=glob,
path=path,
load_hidden=load_hidden,
max_concurrency=max_concurrency,
metadata=metadata,
recursive=recursive,
silent_errors=silent_errors,
use_multithreading=use_multithreading,
)

View file

@ -0,0 +1,32 @@
from langflow import CustomComponent
from langchain.field_typing import Document
from typing import Optional, Dict
class EverNoteLoaderComponent(CustomComponent):
display_name = "EverNoteLoader"
description = "Load from `EverNote`."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/evernote"
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"required": True,
"suffixes": [".xml"],
"show": True,
"type": "file",
"file_types": ["xml"],
},
"metadata": {
"display_name": "Metadata",
"required": False,
"show": True,
"type": "dict",
},
}
def build(self, file_path: str, metadata: Optional[Dict] = None) -> Document:
# Assuming there is a function or class named `EverNoteLoader` that takes these parameters
# and returns a `Document` object. Replace `EverNoteLoader` with the actual implementation.
return EverNoteLoader(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,30 @@
from langflow import CustomComponent
from langchain.documents import Document
from typing import Optional, Dict
class FacebookChatLoaderComponent(CustomComponent):
display_name = "FacebookChatLoader"
description = "Load `Facebook Chat` messages directory dump."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/facebook_chat"
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"required": True,
"suffixes": [".json"],
"file_types": ["json"],
},
"metadata": {
"display_name": "Metadata",
"required": False,
},
}
def build(self, file_path: str, metadata: Optional[Dict] = None) -> Document:
# Assuming there is a class named FacebookChatLoader that takes file_path and metadata as parameters
# and returns a Document object. Replace 'FacebookChatLoader' with the actual class name.
# As per the JSON, the output type is 'Document', which is part of langchain.documents.
# Therefore, the 'FacebookChatLoader' should be imported or defined elsewhere in the codebase.
return FacebookChatLoader(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,26 @@
from langflow import CustomComponent
from langchain.field_typing import Document
from typing import Optional, Dict
class GitbookLoaderComponent(CustomComponent):
display_name = "GitbookLoader"
description = "Load `GitBook` data."
def build_config(self):
return {
"metadata": {
"display_name": "Metadata",
"default": {},
},
"web_page": {
"display_name": "Web Page",
"required": True,
},
}
def build(self, metadata: Optional[Dict] = None, web_page: str = "") -> Document:
# Assuming there is a GitbookLoader class that takes metadata and web_page as parameters
# Replace 'GitbookLoader' with the actual class name if different
return GitbookLoader(metadata=metadata, web_page=web_page)

View file

@ -0,0 +1,31 @@
from langchain import CustomComponent
from langchain.document_loaders import BaseLoader
from typing import Optional, Dict
class HNLoaderComponent(CustomComponent):
display_name = "HNLoader"
description = "Load `Hacker News` data."
def build_config(self):
return {
"metadata": {
"display_name": "Metadata",
"default": {},
"required": False
},
"web_path": {
"display_name": "Web Page",
"required": True
},
}
def build(
self,
web_path: str,
metadata: Optional[Dict] = None,
) -> BaseLoader:
# Assuming that there's a specific loader for Hacker News
# as BaseLoader does not take a web_path argument
# The HackerNewsLoader needs to be defined somewhere in the actual implementation
return HackerNewsLoader(metadata=metadata, web_path=web_path)

View file

@ -0,0 +1,20 @@
from langflow import CustomComponent
from langchain.field_typing import Document
from typing import Optional, Dict
class IFixitLoaderComponent(CustomComponent):
display_name = "IFixitLoader"
description = "Load `iFixit` repair guides, device wikis and answers."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/ifixit"
def build_config(self):
return {
"metadata": {"display_name": "Metadata", "type": "dict", "default": {}},
"web_path": {"display_name": "Web Page", "type": "str"},
}
def build(self, web_path: str, metadata: Optional[Dict] = None) -> Document:
# Assuming IFixitLoader is the correct class name from the langchain library,
# and it has a load method that returns a Document object.
return IFixitLoader(web_path=web_path, metadata=metadata).load()

View file

@ -0,0 +1,23 @@
from langflow import CustomComponent
from langchain.field_typing import Document
from typing import Dict, Optional
class IMSDbLoaderComponent(CustomComponent):
display_name = "IMSDbLoader"
description = "Load `IMSDb` webpages."
def build_config(self):
return {
"metadata": {"display_name": "Metadata", "type": "dict"},
"web_path": {"display_name": "Web Page", "type": "str"},
}
def build(
self,
metadata: Optional[Dict] = None,
web_path: str = "",
) -> Document:
# Assuming there is a class or function named `IMSDbLoader` that takes these parameters
# and returns a Document object. Replace `IMSDbLoader` with the actual class or function name.
return IMSDbLoader(metadata=metadata, web_path=web_path)

View file

@ -0,0 +1,23 @@
from langflow import CustomComponent
from langchain.documents import Document
from typing import Optional, Dict
class PyPDFDirectoryLoaderComponent(CustomComponent):
display_name = "PyPDFDirectoryLoader"
description = "Load a directory with `PDF` files using `pypdf` and chunks at character level."
def build_config(self):
return {
"metadata": {"display_name": "Metadata", "required": False},
"path": {"display_name": "Local directory", "required": True},
}
def build(
self,
path: str,
metadata: Optional[Dict] = None,
) -> Document:
# Assuming there is a PyPDFDirectoryLoader class that takes these parameters
# Since the actual implementation is not provided, this is a placeholder
return PyPDFDirectoryLoader(path=path, metadata=metadata)

View file

@ -0,0 +1,31 @@
from langflow import CustomComponent
from langchain.document_loaders import BaseLoader
from typing import Optional, Dict
class PyPDFLoaderComponent(CustomComponent):
display_name = "PyPDFLoader"
description = "Load PDF using pypdf into list of documents"
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/how_to/pdf"
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"required": True,
"type": "file",
"fileTypes": ["pdf"],
"show": True,
},
"metadata": {
"display_name": "Metadata",
"required": False,
"type": "dict",
"show": True,
}
}
def build(self, file_path: str, metadata: Optional[Dict] = None) -> BaseLoader:
# Assuming there is a PyPDFLoader class that takes file_path and metadata as parameters
# and inherits from BaseLoader
return PyPDFLoader(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,21 @@
from langflow import CustomComponent
from langchain.field_typing import Document
from typing import Dict, Optional
class ReadTheDocsLoaderComponent(CustomComponent):
display_name = "ReadTheDocsLoader"
description = "Load `ReadTheDocs` documentation directory."
def build_config(self):
return {
"metadata": {"display_name": "Metadata", "default": {}},
"path": {"display_name": "Local directory", "required": True},
}
def build(
self,
path: str,
metadata: Optional[Dict] = None,
) -> Document:
return Document(path=path, metadata=metadata or {})

View file

@ -0,0 +1,25 @@
from langflow import CustomComponent
from langchain.documents import Document
from typing import Optional, Dict
class SRTLoaderComponent(CustomComponent):
display_name = "SRTLoader"
description = "Load `.srt` (subtitle) files."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/subtitle"
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"required": True,
"fileTypes": ["srt"],
},
"metadata": {
"display_name": "Metadata",
"required": False,
},
}
def build(self, file_path: str, metadata: Optional[Dict] = None) -> Document:
return Document(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,26 @@
from langflow import CustomComponent
from typing import Optional, Dict
class SlackDirectoryLoaderComponent(CustomComponent):
display_name = "SlackDirectoryLoader"
description = "Load from a `Slack` directory dump."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/slack"
def build_config(self):
return {
"zip_path": {"display_name": "Path to zip file"},
"metadata": {"display_name": "Metadata"},
"workspace_url": {"display_name": "Workspace URL"},
}
def build(
self,
zip_path: str,
metadata: Optional[Dict] = None,
workspace_url: Optional[str] = None,
) -> 'Document':
# Assuming there is a SlackDirectoryLoader class that takes these parameters
# Since the actual implementation details are not provided, this is a placeholder
# Replace SlackDirectoryLoader with the actual class that should be instantiated
return SlackDirectoryLoader(zip_path=zip_path, metadata=metadata, workspace_url=workspace_url)

View file

@ -0,0 +1,28 @@
from langflow import CustomComponent
from langchain.data_connections import Document
from typing import Optional, Dict
class TextLoaderComponent(CustomComponent):
display_name = "TextLoader"
description = "Load text file."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/"
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"required": True,
"type": "file",
"suffixes": [".txt"],
},
"metadata": {
"display_name": "Metadata",
"required": False,
"type": "dict",
"default": {},
},
}
def build(self, file_path: str, metadata: Optional[Dict] = None) -> Document:
return Document(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,21 @@
from langchain import CustomComponent
from langchain.field_typing import Document
from typing import Optional, Dict
class UnstructuredHTMLLoaderComponent(CustomComponent):
display_name = "UnstructuredHTMLLoader"
description = "Load `HTML` files using `Unstructured`."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/how_to/html"
def build_config(self):
return {
"file_path": {"display_name": "File Path", "type": "file", "fileTypes": ["html"]},
"metadata": {"display_name": "Metadata"},
}
def build(self, file_path: str, metadata: Optional[Dict] = None) -> Document:
# Assuming the existence of a function or class named UnstructuredHTMLLoader that
# loads HTML and creates a Document object; Replace with actual implementation.
return UnstructuredHTMLLoader(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,32 @@
from langflow import CustomComponent
from langchain.document_loaders import Document
from typing import Optional, Dict
class UnstructuredPowerPointLoaderComponent(CustomComponent):
display_name = "UnstructuredPowerPointLoader"
description = "Load `Microsoft PowerPoint` files using `Unstructured`."
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"type": "file",
"fileTypes": ["pptx", "ppt"],
},
"metadata": {
"display_name": "Metadata",
"type": "dict",
},
}
def build(
self,
file_path: str,
metadata: Optional[Dict] = None,
) -> Document:
# Assuming there is a loader class `UnstructuredPowerPointLoader` that takes these parameters
# Since the actual loader class is not provided, this is a placeholder for the actual implementation
loader_class = self.get_loader_class() # Placeholder method to obtain the correct loader class
return loader_class(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,28 @@
from langchain import CustomComponent
from langchain.field_typing import Document
from typing import Optional, Dict
class UnstructuredWordDocumentLoaderComponent(CustomComponent):
display_name = "UnstructuredWordDocumentLoader"
description = "Load `Microsoft Word` file using `Unstructured`."
documentation = "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/microsoft_word"
def build_config(self):
return {
"file_path": {
"display_name": "File Path",
"required": True,
"type": "file",
"suffixes": [".docx", ".doc"],
},
"metadata": {
"display_name": "Metadata",
"required": False,
"type": "dict"
},
}
def build(self, file_path: str, metadata: Optional[Dict] = None) -> Document:
return Document(file_path=file_path, metadata=metadata)

View file

@ -0,0 +1,34 @@
from langflow import CustomComponent
from langchain.embeddings import CohereEmbeddings
from typing import Optional, Any
class CohereEmbeddingsComponent(CustomComponent):
display_name = "CohereEmbeddings"
description = "Cohere embedding models."
def build_config(self):
return {
"async_client": {"display_name": "Async Client", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"cohere_api_key": {"display_name": "Cohere API Key"},
"model": {"display_name": "Model", "default": "embed-english-v2.0", "advanced": True},
"truncate": {"display_name": "Truncate", "advanced": True},
}
def build(
self,
async_client: Optional[Any] = None,
client: Optional[Any] = None,
cohere_api_key: Optional[str] = None,
model: str = "embed-english-v2.0",
truncate: Optional[str] = None,
) -> CohereEmbeddings:
return CohereEmbeddings(
async_client=async_client,
client=client,
cohere_api_key=cohere_api_key,
model=model,
truncate=truncate,
)

View file

@ -0,0 +1,37 @@
from langflow import CustomComponent
from typing import Optional, Any, Dict
from langchain.field_typing import Embeddings
class HuggingFaceEmbeddingsComponent(CustomComponent):
display_name = "HuggingFaceEmbeddings"
description = "HuggingFace sentence_transformers embedding models."
documentation = "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/sentence_transformers"
def build_config(self):
return {
"cache_folder": {"display_name": "Cache Folder", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"encode_kwargs": {"display_name": "Encode Kwargs", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"model_name": {"display_name": "Model Name"},
"multi_process": {"display_name": "Multi Process", "advanced": True},
}
def build(
self,
cache_folder: Optional[str] = None,
client: Optional[Any] = None,
encode_kwargs: Optional[Dict] = None,
model_kwargs: Optional[Dict] = None,
model_name: str = "sentence-transformers/all-mpnet-base-v2",
multi_process: bool = False,
) -> Embeddings:
return Embeddings(
cache_folder=cache_folder,
client=client,
encode_kwargs=encode_kwargs,
model_kwargs=model_kwargs,
model_name=model_name,
multi_process=multi_process,
)

View file

@ -0,0 +1,75 @@
from langflow import CustomComponent
from typing import Optional, Set, Dict, Any, Union, Callable
from langchain.embeddings import OpenAIEmbeddings
class OpenAIEmbeddingsComponent(CustomComponent):
display_name = "OpenAIEmbeddings"
description = "OpenAI embedding models"
def build_config(self):
return {
"allowed_special": {"display_name": "Allowed Special", "advanced": True},
"disallowed_special": {"display_name": "Disallowed Special", "advanced": True},
"chunk_size": {"display_name": "Chunk Size", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"deployment": {"display_name": "Deployment", "advanced": True},
"embedding_ctx_length": {"display_name": "Embedding Context Length", "advanced": True},
"max_retries": {"display_name": "Max Retries", "advanced": True},
"model": {"display_name": "Model", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"openai_api_base": {"display_name": "OpenAI API Base", "advanced": True},
"openai_api_key": {"display_name": "OpenAI API Key"},
"openai_api_type": {"display_name": "OpenAI API Type", "advanced": True},
"openai_api_version": {"display_name": "OpenAI API Version", "advanced": True},
"openai_organization": {"display_name": "OpenAI Organization", "advanced": True},
"openai_proxy": {"display_name": "OpenAI Proxy", "advanced": True},
"request_timeout": {"display_name": "Request Timeout", "advanced": True},
"show_progress_bar": {"display_name": "Show Progress Bar", "advanced": True},
"skip_empty": {"display_name": "Skip Empty", "advanced": True},
"tiktoken_model_name": {"display_name": "TikToken Model Name"},
}
def build(
self,
allowed_special: Optional[Set[str]] = set(),
disallowed_special: str = "all",
chunk_size: Optional[int] = 1000,
client: Optional[Any] = None,
deployment: str = "text-embedding-ada-002",
embedding_ctx_length: Optional[int] = 8191,
max_retries: Optional[int] = 6,
model: str = "text-embedding-ada-002",
model_kwargs: Optional[Dict[str, Any]] = None,
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = '',
openai_api_type: Optional[str] = None,
openai_api_version: Optional[str] = None,
openai_organization: Optional[str] = None,
openai_proxy: Optional[str] = None,
request_timeout: Optional[float] = None,
show_progress_bar: Optional[bool] = False,
skip_empty: Optional[bool] = False,
tiktoken_model_name: Optional[str] = None,
) -> Union[OpenAIEmbeddings, Callable]:
return OpenAIEmbeddings(
allowed_special=allowed_special,
disallowed_special=disallowed_special,
chunk_size=chunk_size,
client=client,
deployment=deployment,
embedding_ctx_length=embedding_ctx_length,
max_retries=max_retries,
model=model,
model_kwargs=model_kwargs,
openai_api_base=openai_api_base,
openai_api_key=openai_api_key,
openai_api_type=openai_api_type,
openai_api_version=openai_api_version,
openai_organization=openai_organization,
openai_proxy=openai_proxy,
request_timeout=request_timeout,
show_progress_bar=show_progress_bar,
skip_empty=skip_empty,
tiktoken_model_name=tiktoken_model_name,
)

View file

@ -0,0 +1,60 @@
from langflow import CustomComponent
from langchain.embeddings import VertexAIEmbeddings
from typing import Optional, List
class VertexAIEmbeddingsComponent(CustomComponent):
display_name = "VertexAIEmbeddings"
description = "Google Cloud VertexAI embedding models."
def build_config(self):
return {
"client": {"display_name": "Client", "advanced": True},
"credentials": {"display_name": "Credentials", "default": '', "file_types": ['json']},
"location": {"display_name": "Location", "default": 'us-central1', "advanced": True},
"max_output_tokens": {"display_name": "Max Output Tokens", "default": 128},
"max_retries": {"display_name": "Max Retries", "default": 6, "advanced": True},
"model_name": {"display_name": "Model Name", "default": 'textembedding-gecko'},
"n": {"display_name": "N", "default": 1, "advanced": True},
"project": {"display_name": "Project", "advanced": True},
"request_parallelism": {"display_name": "Request Parallelism", "default": 5, "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "default": False, "advanced": True},
"temperature": {"display_name": "Temperature", "default": 0.0},
"top_k": {"display_name": "Top K", "default": 40, "advanced": True},
"top_p": {"display_name": "Top P", "default": 0.95, "advanced": True},
}
def build(
self,
client: Optional[str] = None,
credentials: Optional[str] = None,
location: str = 'us-central1',
max_output_tokens: int = 128,
max_retries: int = 6,
model_name: str = 'textembedding-gecko',
n: int = 1,
project: Optional[str] = None,
request_parallelism: int = 5,
stop: Optional[List[str]] = None,
streaming: bool = False,
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
) -> VertexAIEmbeddings:
return VertexAIEmbeddings(
client=client,
credentials=credentials,
location=location,
max_output_tokens=max_output_tokens,
max_retries=max_retries,
model_name=model_name,
n=n,
project=project,
request_parallelism=request_parallelism,
stop=stop,
streaming=streaming,
temperature=temperature,
top_k=top_k,
top_p=top_p,
)

View file

@ -0,0 +1,61 @@
from langflow import CustomComponent
from pydantic import SecretStr
from typing import Optional, Dict, Any
from langchain.field_typing import BaseLanguageModel
class AnthropicComponent(CustomComponent):
display_name = "Anthropic"
description = "Anthropic large language models."
def build_config(self):
return {
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"type": SecretStr,
},
"anthropic_api_url": {
"display_name": "Anthropic API URL",
"type": str,
},
"model_kwargs": {
"display_name": "Model Kwargs",
"type": Dict[str, Any],
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"type": float,
},
}
def build(
self,
anthropic_api_key: Optional[SecretStr],
anthropic_api_url: Optional[str],
model_kwargs: Optional[Dict[str, Any]],
temperature: Optional[float] = None,
) -> BaseLanguageModel:
# The actual builder method should return an instance of the Anthropic class
# Here we are returning a placeholder class as the Anthropic class is not defined
# This is to comply with the type hints required by the CustomComponent
class Anthropic(BaseLanguageModel):
def __init__(self, api_key: Optional[SecretStr], api_url: Optional[str], model_kwargs: Optional[Dict[str, Any]] = None, temperature: Optional[float] = None):
# Initialize Anthropic model with the provided arguments
super().__init__()
self.api_key = api_key
self.api_url = api_url
self.model_kwargs = model_kwargs
self.temperature = temperature
def __call__(self, prompt: str) -> str:
# The logic to call the Anthropic model would go here
# This is a placeholder implementation
return "This is a simulated response from the Anthropic model."
return Anthropic(
api_key=anthropic_api_key,
api_url=anthropic_api_url,
model_kwargs=model_kwargs,
temperature=temperature,
)

View file

@ -0,0 +1,56 @@
from langflow import CustomComponent
from langchain.llms import BaseLanguageModel
from typing import Optional, Dict
class CTransformersComponent(CustomComponent):
display_name = "CTransformers"
description = "C Transformers LLM models"
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
def build_config(self):
return {
"model": {"display_name": "Model", "required": True},
"model_file": {"display_name": "Model File", "required": False},
"model_type": {"display_name": "Model Type", "required": False},
"config": {"display_name": "Config", "advanced": True, "required": False},
}
def build(
self,
model: str,
model_file: Optional[str] = None,
model_type: Optional[str] = None,
config: Optional[Dict] = None
) -> BaseLanguageModel:
# Default config values
default_config = {
"top_k": 40,
"top_p": 0.95,
"temperature": 0.8,
"repetition_penalty": 1.1,
"last_n_tokens": 64,
"seed": -1,
"max_new_tokens": 256,
"stop": None,
"stream": False,
"reset": True,
"batch_size": 8,
"threads": -1,
"context_length": -1,
"gpu_layers": 0
}
# If there is a custom config, update the default config with it
if config:
default_config.update(config)
# Assuming the import below is correct and CTransformers is a class within the langchain library
# that inherits from BaseLanguageModel. The following import statement is required:
# from langchain.llms.integration_module import CTransformers
return CTransformers(model=model, model_file=model_file, model_type=model_type, config=default_config)
# Note: The actual CTransformers class needs to be imported from the correct module inside the langchain library.
# The `integration_module` in the import statement is just a placeholder and should be replaced with
# the actual module where the CTransformers class is located.

View file

@ -0,0 +1,47 @@
from langflow import CustomComponent
from langchain.tools import SecretStr
from typing import Optional, Dict, Union, Callable
from langflow.field_typing import BaseLanguageModel
class ChatAnthropicComponent(CustomComponent):
display_name = "ChatAnthropic"
description = "`Anthropic` chat large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/anthropic"
def build_config(self):
return {
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"type": SecretStr,
},
"anthropic_api_url": {
"display_name": "Anthropic API URL",
"type": str,
},
"model_kwargs": {
"display_name": "Model Kwargs",
"type": Dict[str, Union[str, int, float, bool]],
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"type": float,
},
}
def build(
self,
anthropic_api_key: Optional[SecretStr] = None,
anthropic_api_url: Optional[str] = None,
model_kwargs: Optional[Dict[str, Union[str, int, float, bool]]] = None,
temperature: Optional[float] = None,
) -> Union[BaseLanguageModel, Callable]:
from langchain.model_io.models.chat.integrations import ChatAnthropic # Importing here due to potential local scope requirements
return ChatAnthropic(
anthropic_api_key=anthropic_api_key.get_secret_value() if anthropic_api_key else None,
anthropic_api_url=anthropic_api_url,
model_kwargs=model_kwargs,
temperature=temperature,
)

View file

@ -0,0 +1,82 @@
from langflow import CustomComponent
from langchain.llms import BaseLLM
from typing import Optional, Dict, Union, Any
from langchain.field_typing import BaseLanguageModel
class ChatOpenAIComponent(CustomComponent):
display_name = "ChatOpenAI"
description = "`OpenAI` Chat large language models API."
def build_config(self):
return {
"max_tokens": {
"display_name": "Max Tokens",
"type": "int",
"advanced": False,
"required": False,
},
"model_kwargs": {
"display_name": "Model Kwargs",
"type": "dict",
"advanced": True,
"required": False,
},
"model_name": {
"display_name": "Model Name",
"type": "str",
"advanced": False,
"required": False,
"options": [
"gpt-4-1106-preview",
"gpt-4",
"gpt-4-32k",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
],
},
"openai_api_base": {
"display_name": "OpenAI API Base",
"type": "str",
"advanced": False,
"required": False,
"info": (
"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\n"
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
),
},
"openai_api_key": {
"display_name": "OpenAI API Key",
"type": "str",
"advanced": False,
"required": False,
},
"temperature": {
"display_name": "Temperature",
"type": "float",
"advanced": False,
"required": False,
"default": 0.7,
},
}
def build(
self,
max_tokens: Optional[int] = None,
model_kwargs: Optional[Dict[str, Any]] = None,
model_name: Optional[str] = "gpt-4-1106-preview",
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = None,
temperature: float = 0.7,
) -> Union[BaseLanguageModel, BaseLLM]:
# Assuming there is a class `ChatOpenAI` that takes these parameters
# The `ChatOpenAI` class must be imported or defined elsewhere in the actual implementation
return ChatOpenAI(
max_tokens=max_tokens,
model_kwargs=model_kwargs,
model_name=model_name,
openai_api_base=openai_api_base,
openai_api_key=openai_api_key,
temperature=temperature,
)

View file

@ -0,0 +1,84 @@
from langflow import CustomComponent
from typing import List
from langchain.messages import BaseMessage
class ChatVertexAIComponent(CustomComponent):
display_name = "ChatVertexAI"
description = "`Vertex AI` Chat large language models API."
def build_config(self):
return {
"credentials": {
"display_name": "Credentials",
"type": "file",
"fileTypes": ["json"],
"file_path": None,
},
"examples": {
"display_name": "Examples",
"multiline": True,
},
"location": {
"display_name": "Location",
"default": "us-central1",
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"default": 128,
"advanced": True,
},
"model_name": {
"display_name": "Model Name",
"default": "chat-bison",
},
"project": {
"display_name": "Project",
},
"temperature": {
"display_name": "Temperature",
"default": 0.0,
},
"top_k": {
"display_name": "Top K",
"default": 40,
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"default": 0.95,
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"default": False,
"advanced": True,
},
}
def build(
self,
credentials: str,
examples: List[BaseMessage],
project: str,
location: str = "us-central1",
max_output_tokens: int = 128,
model_name: str = "chat-bison",
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
verbose: bool = False,
):
# Assuming there is a ChatVertexAI class that takes these parameters
return ChatVertexAI(
credentials=credentials,
examples=examples,
location=location,
max_output_tokens=max_output_tokens,
model_name=model_name,
project=project,
temperature=temperature,
top_k=top_k,
top_p=top_p,
verbose=verbose,
)

View file

@ -0,0 +1,40 @@
from langflow import CustomComponent
from langchain.llms import BaseLanguageModel
from typing import Optional
class CohereComponent(CustomComponent):
display_name = "Cohere"
description = "Cohere large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
def build_config(self):
return {
"cohere_api_key": {
"display_name": "Cohere API Key",
"type": "password",
"show": True
},
"max_tokens": {
"display_name": "Max Tokens",
"default": 256,
"type": "int",
"show": True
},
"temperature": {
"display_name": "Temperature",
"default": 0.75,
"type": "float",
"show": True
},
}
def build(
self,
cohere_api_key: str,
max_tokens: Optional[int] = 256,
temperature: Optional[float] = 0.75,
) -> BaseLanguageModel:
# Assuming there is a Cohere class that takes these parameters to initialize
# Please replace `Cohere` with the actual class name that should be instantiated
return Cohere(api_key=cohere_api_key, max_tokens=max_tokens, temperature=temperature)

View file

@ -0,0 +1,126 @@
from typing import Optional, List, Dict, Any
from langflow import CustomComponent
from langchain.llms import BaseLanguageModel
class LlamaCppComponent(CustomComponent):
display_name = "LlamaCpp"
description = "llama.cpp model."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
def build_config(self):
return {
"grammar": {"display_name": "Grammar", "advanced": True},
"cache": {"display_name": "Cache", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"echo": {"display_name": "Echo", "advanced": True},
"f16_kv": {"display_name": "F16 KV", "advanced": True},
"grammar_path": {"display_name": "Grammar Path", "advanced": True},
"last_n_tokens_size": {"display_name": "Last N Tokens Size", "advanced": True},
"logits_all": {"display_name": "Logits All", "advanced": True},
"logprobs": {"display_name": "Logprobs", "advanced": True},
"lora_base": {"display_name": "Lora Base", "advanced": True},
"lora_path": {"display_name": "Lora Path", "advanced": True},
"max_tokens": {"display_name": "Max Tokens", "advanced": True},
"metadata": {"display_name": "Metadata", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"model_path": {"display_name": "Model Path"},
"n_batch": {"display_name": "N Batch", "advanced": True},
"n_ctx": {"display_name": "N Ctx", "advanced": True},
"n_gpu_layers": {"display_name": "N GPU Layers", "advanced": True},
"n_parts": {"display_name": "N Parts", "advanced": True},
"n_threads": {"display_name": "N Threads", "advanced": True},
"repeat_penalty": {"display_name": "Repeat Penalty", "advanced": True},
"rope_freq_base": {"display_name": "Rope Freq Base", "advanced": True},
"rope_freq_scale": {"display_name": "Rope Freq Scale", "advanced": True},
"seed": {"display_name": "Seed", "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "advanced": True},
"suffix": {"display_name": "Suffix", "advanced": True},
"tags": {"display_name": "Tags", "advanced": True},
"temperature": {"display_name": "Temperature"},
"top_k": {"display_name": "Top K", "advanced": True},
"top_p": {"display_name": "Top P", "advanced": True},
"use_mlock": {"display_name": "Use Mlock", "advanced": True},
"use_mmap": {"display_name": "Use Mmap", "advanced": True},
"verbose": {"display_name": "Verbose", "advanced": True},
"vocab_only": {"display_name": "Vocab Only", "advanced": True},
}
def build(
self,
model_path: str,
grammar: Optional[str] = None,
cache: Optional[bool] = None,
client: Optional[Any] = None,
echo: Optional[bool] = False,
f16_kv: Optional[bool] = True,
grammar_path: Optional[str] = None,
last_n_tokens_size: Optional[int] = 64,
logits_all: Optional[bool] = False,
logprobs: Optional[int] = None,
lora_base: Optional[str] = None,
lora_path: Optional[str] = None,
max_tokens: Optional[int] = 256,
metadata: Optional[Dict] = None,
model_kwargs: Optional[Dict] = None,
n_batch: Optional[int] = 8,
n_ctx: Optional[int] = 512,
n_gpu_layers: Optional[int] = None,
n_parts: Optional[int] = -1,
n_threads: Optional[int] = None,
repeat_penalty: Optional[float] = 1.1,
rope_freq_base: Optional[float] = 10000.0,
rope_freq_scale: Optional[float] = 1.0,
seed: Optional[int] = -1,
stop: Optional[List[str]] = None,
streaming: Optional[bool] = True,
suffix: Optional[str] = None,
tags: Optional[List[str]] = None,
temperature: Optional[float] = 0.8,
top_k: Optional[int] = 40,
top_p: Optional[float] = 0.95,
use_mlock: Optional[bool] = False,
use_mmap: Optional[bool] = True,
verbose: Optional[bool] = True,
vocab_only: Optional[bool] = False,
) -> BaseLanguageModel:
# Here you would instantiate the LlamaCpp model with the provided parameters
# Since the actual implementation of LlamaCpp is not provided, this is a placeholder
return BaseLanguageModel(
model_path=model_path,
grammar=grammar,
cache=cache,
client=client,
echo=echo,
f16_kv=f16_kv,
grammar_path=grammar_path,
last_n_tokens_size=last_n_tokens_size,
logits_all=logits_all,
logprobs=logprobs,
lora_base=lora_base,
lora_path=lora_path,
max_tokens=max_tokens,
metadata=metadata,
model_kwargs=model_kwargs,
n_batch=n_batch,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_parts=n_parts,
n_threads=n_threads,
repeat_penalty=repeat_penalty,
rope_freq_base=rope_freq_base,
rope_freq_scale=rope_freq_scale,
seed=seed,
stop=stop,
streaming=streaming,
suffix=suffix,
tags=tags,
temperature=temperature,
top_k=top_k,
top_p=top_p,
use_mlock=use_mlock,
use_mmap=use_mmap,
verbose=verbose,
vocab_only=vocab_only,
)

View file

@ -0,0 +1,57 @@
from langflow import CustomComponent
from langchain.llms import BaseLLM
from typing import Optional, Dict
class OpenAIComponent(CustomComponent):
display_name = "OpenAI"
description = "OpenAI large language models."
def build_config(self):
return {
"max_tokens": {"display_name": "Max Tokens", "default": 256},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"model_name": {
"display_name": "Model Name",
"default": "text-davinci-003",
"options": [
"text-davinci-003",
"text-davinci-002",
"text-curie-001",
"text-babbage-001",
"text-ada-001",
],
},
"openai_api_base": {
"display_name": "OpenAI API Base",
"info": (
"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n"
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
),
},
"openai_api_key": {
"display_name": "OpenAI API Key",
"default": "",
"password": True,
},
"temperature": {"display_name": "Temperature", "default": 0.7},
}
def build(
self,
max_tokens: int = 256,
model_kwargs: Optional[Dict] = None,
model_name: str = "text-davinci-003",
openai_api_base: str = "https://api.openai.com/v1",
openai_api_key: str = "",
temperature: float = 0.7,
) -> BaseLLM:
return BaseLLM(
max_tokens=max_tokens,
model_kwargs=model_kwargs or {},
model_name=model_name,
openai_api_base=openai_api_base,
openai_api_key=openai_api_key,
temperature=temperature,
)

View file

@ -0,0 +1,145 @@
from langflow import CustomComponent
from langchain.llms import BaseLLM
from typing import Optional, Union, Callable, Dict
class VertexAIComponent(CustomComponent):
display_name = "VertexAI"
description = "Google Vertex AI large language models"
def build_config(self):
return {
"credentials": {
"display_name": "Credentials",
"type": "file",
"file_types": ["json"],
"required": False,
"default": None,
},
"location": {
"display_name": "Location",
"type": "str",
"default": "us-central1",
"required": False,
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"type": "int",
"default": 128,
"required": False,
},
"max_retries": {
"display_name": "Max Retries",
"type": "int",
"default": 6,
"required": False,
},
"metadata": {
"display_name": "Metadata",
"type": "dict",
"required": False,
"default": {},
},
"model_name": {
"display_name": "Model Name",
"type": "str",
"default": "text-bison",
"required": False,
},
"n": {
"display_name": "N",
"type": "int",
"default": 1,
"required": False,
},
"project": {
"display_name": "Project",
"type": "str",
"required": False,
"default": None,
},
"request_parallelism": {
"display_name": "Request Parallelism",
"type": "int",
"default": 5,
"required": False,
},
"streaming": {
"display_name": "Streaming",
"type": "bool",
"default": False,
"required": False,
},
"temperature": {
"display_name": "Temperature",
"type": "float",
"default": 0.0,
"required": False,
},
"top_k": {
"display_name": "Top K",
"type": "int",
"default": 40,
"required": False,
},
"top_p": {
"display_name": "Top P",
"type": "float",
"default": 0.95,
"required": False,
},
"tuned_model_name": {
"display_name": "Tuned Model Name",
"type": "str",
"required": False,
"default": None,
},
"verbose": {
"display_name": "Verbose",
"type": "bool",
"default": False,
"required": False,
},
}
def build(
self,
credentials: Optional[str] = None,
location: str = "us-central1",
max_output_tokens: int = 128,
max_retries: int = 6,
metadata: Dict = None,
model_name: str = "text-bison",
n: int = 1,
project: Optional[str] = None,
request_parallelism: int = 5,
streaming: bool = False,
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
tuned_model_name: Optional[str] = None,
verbose: bool = False,
) -> Union[BaseLLM, Callable]:
if metadata is None:
metadata = {}
# Import the appropriate VertexAI class from the langchain.llms module
from langchain.llms import VertexAI
return VertexAI(
credentials=credentials,
location=location,
max_output_tokens=max_output_tokens,
max_retries=max_retries,
metadata=metadata,
model_name=model_name,
n=n,
project=project,
request_parallelism=request_parallelism,
streaming=streaming,
temperature=temperature,
top_k=top_k,
top_p=top_p,
tuned_model_name=tuned_model_name,
verbose=verbose,
)

View file

@ -0,0 +1,46 @@
from langflow import CustomComponent
from langchain.retrievers import MultiQueryRetriever
from typing import Optional, Union, Callable
from langflow.field_typing import (
PromptTemplate,
BaseLLM,
BaseRetriever,
)
class MultiQueryRetrieverComponent(CustomComponent):
display_name = "MultiQueryRetriever"
description = "Initialize from llm using default template."
documentation = "https://python.langchain.com/docs/modules/data_connection/retrievers/how_to/MultiQueryRetriever"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"prompt": {"display_name": "Prompt", "default": {
"input_variables": ["question"],
"input_types": {},
"output_parser": None,
"partial_variables": {},
"template": 'You are an AI language model assistant. Your task is \n'
'to generate 3 different versions of the given user \n'
'question to retrieve relevant documents from a vector database. \n'
'By generating multiple perspectives on the user question, \n'
'your goal is to help the user overcome some of the limitations \n'
'of distance-based similarity search. Provide these alternative \n'
'questions separated by newlines. Original question: {question}',
"template_format": "f-string",
"validate_template": False,
"_type": "prompt"
}},
"retriever": {"display_name": "Retriever"},
"parser_key": {"display_name": "Parser Key", "default": "lines"},
}
def build(
self,
llm: BaseLLM,
retriever: BaseRetriever,
prompt: Optional[PromptTemplate] = None,
parser_key: str = "lines",
) -> Union[Callable, MultiQueryRetriever]:
return MultiQueryRetriever(llm=llm, retriever=retriever, prompt=prompt, parser_key=parser_key)

View file

@ -0,0 +1,32 @@
from langflow import CustomComponent
from langchain.document_transformers import TextSplitter
from langchain.documents import Document
from typing import List
class CharacterTextSplitterComponent(CustomComponent):
display_name = "CharacterTextSplitter"
description = "Splitting text that looks at characters."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"chunk_overlap": {"display_name": "Chunk Overlap", "default": 200},
"chunk_size": {"display_name": "Chunk Size", "default": 1000},
"separator": {"display_name": "Separator", "default": "\n"},
}
def build(
self,
documents: List[Document],
chunk_overlap: int = 200,
chunk_size: int = 1000,
separator: str = "\n",
) -> TextSplitter:
return TextSplitter(
documents=documents,
chunk_overlap=chunk_overlap,
chunk_size=chunk_size,
separator=separator,
)

View file

@ -0,0 +1,17 @@
from langflow import CustomComponent
from langchain.field_typing import JsonSpec, Tool
class JsonToolkitComponent(CustomComponent):
display_name = "JsonToolkit"
description = "Toolkit for interacting with a JSON spec."
def build_config(self):
return {
"spec": {"display_name": "Spec", "type": JsonSpec},
}
def build(self, spec: JsonSpec) -> Tool:
# Assuming JsonToolkit is the class that should be instantiated with the spec
# The actual class name should be used in place of JsonToolkit if it is different
return JsonToolkit(spec=spec) # Replace JsonToolkit with the actual class name if necessary

View file

@ -0,0 +1,22 @@
from langflow import CustomComponent
from langchain.field_typing import AgentExecutor, TextRequestsWrapper
from typing import Callable
class OpenAPIToolkitComponent(CustomComponent):
display_name = "OpenAPIToolkit"
description = "Toolkit for interacting with an OpenAPI API."
def build_config(self):
return {
"json_agent": {"display_name": "JSON Agent"},
"requests_wrapper": {"display_name": "Text Requests Wrapper"},
}
def build(
self,
json_agent: AgentExecutor,
requests_wrapper: TextRequestsWrapper,
) -> Callable:
# Assuming the actual toolkit class name is OpenAPIToolkit
return OpenAPIToolkit(json_agent=json_agent, requests_wrapper=requests_wrapper)

View file

@ -0,0 +1,33 @@
from langflow import CustomComponent
from langchain.vectorstores import VectorStore
from typing import Union, Callable
from langflow.field_typing import Chain
class VectorStoreInfoComponent(CustomComponent):
display_name = "VectorStoreInfo"
description = "Information about a VectorStore"
def build_config(self):
return {
"vectorstore": {"display_name": "VectorStore"},
"description": {"display_name": "Description", "multiline": True},
"name": {"display_name": "Name"},
}
def build(
self,
vectorstore: VectorStore,
description: str,
name: str,
) -> Union[Chain, Callable]:
# Since the actual implementation of VectorStoreInfo is not provided, this is a placeholder
# Replace VectorStoreInfo with the actual class that should be instantiated
# This is a hypothetical class, actual implementation may vary
class VectorStoreInfo:
def __init__(self, vectorstore, description, name):
self.vectorstore = vectorstore
self.description = description
self.name = name
return VectorStoreInfo(vectorstore=vectorstore, description=description, name=name)

View file

@ -0,0 +1,23 @@
from langflow import CustomComponent
from typing import List
from langchain.vectorstores import VectorStore
class VectorStoreRouterToolkitComponent(CustomComponent):
display_name = "VectorStoreRouterToolkit"
description = "Toolkit for routing between Vector Stores."
def build_config(self):
return {
"vectorstores": {"display_name": "Vector Stores"},
}
def build(
self,
vectorstores: List[VectorStore],
):
# Assuming the class `VectorStoreRouterToolkit` exists within a module, but since there
# is no further information provided about the module structure, I will assume it is
# accessible from the current context. If it's in `langchain.vectorstores`, it should be
# imported from there.
return VectorStoreRouterToolkit(vectorstores=vectorstores)

View file

@ -0,0 +1,22 @@
from langflow import CustomComponent
from langchain.toolkits import VectorStoreToolkit
from langflow.field_typing import (
VectorStore,
Tool,
)
class VectorStoreToolkitComponent(CustomComponent):
display_name = "VectorStoreToolkit"
description = "Toolkit for interacting with a Vector Store."
def build_config(self):
return {
"vectorstore_info": {"display_name": "Vector Store Info"},
}
def build(
self,
vectorstore_info: VectorStore,
) -> Tool:
return VectorStoreToolkit(vectorstore_info=vectorstore_info)

View file

@ -0,0 +1,34 @@
from langflow import CustomComponent
# Assuming `BingSearchAPIWrapper` is a class that exists in the context
# and has the appropriate methods and attributes.
# We need to make sure this class is importable from the context where this code will be running.
from your_module import BingSearchAPIWrapper
class BingSearchAPIWrapperComponent(CustomComponent):
display_name = "BingSearchAPIWrapper"
description = "Wrapper for Bing Search API."
def build_config(self):
return {
"bing_search_url": {"display_name": "Bing Search URL"},
"bing_subscription_key": {
"display_name": "Bing Subscription Key",
"password": True,
},
# 'k' is not included as it is not shown (show=False)
}
def build(
self,
bing_search_url: str,
bing_subscription_key: str,
) -> BingSearchAPIWrapper:
# 'k' has a default value and is not shown (show=False), so it is hardcoded here
return BingSearchAPIWrapper(
bing_search_url=bing_search_url,
bing_subscription_key=bing_subscription_key,
k=10
)

View file

@ -0,0 +1,27 @@
from langflow import CustomComponent
from typing import Optional, Union, Callable
# Assuming GoogleSearchAPIWrapper is a valid import based on JSON
# and it exists in some module that should be imported here.
# The import path should be replaced with the correct one once available.
from some_module import GoogleSearchAPIWrapper
class GoogleSearchAPIWrapperComponent(CustomComponent):
display_name = "GoogleSearchAPIWrapper"
description = "Wrapper for Google Search API."
def build_config(self):
return {
"google_api_key": {"display_name": "Google API Key", "password": True},
"google_cse_id": {"display_name": "Google CSE ID"},
# Fields with "show": False are omitted based on the rules
}
def build(
self,
google_api_key: Optional[str] = None,
google_cse_id: Optional[str] = None,
) -> Union[GoogleSearchAPIWrapper, Callable]:
return GoogleSearchAPIWrapper(google_api_key=google_api_key, google_cse_id=google_cse_id)

View file

@ -0,0 +1,57 @@
from langflow import CustomComponent
from typing import Dict, Optional
# Assuming the existence of GoogleSerperAPIWrapper class in the serper module
# If this class does not exist, you would need to create it or import the appropriate class from another module
from serper import GoogleSerperAPIWrapper
class GoogleSerperAPIWrapperComponent(CustomComponent):
display_name = "GoogleSerperAPIWrapper"
description = "Wrapper around the Serper.dev Google Search API."
def build_config(self) -> Dict[str, Dict]:
return {
"result_key_for_type": {
"display_name": "Result Key for Type",
"show": True,
"multiline": False,
"password": False, # corrected based on error message
"name": "result_key_for_type",
"advanced": False,
"dynamic": False,
"info": '',
"type": "dict",
"list": False,
"value": {
"news": "news",
"places": "places",
"images": "images",
"search": "organic"
}
},
"serper_api_key": {
"display_name": "Serper API Key",
"show": True,
"multiline": False,
"password": False, # corrected based on error message
"name": "serper_api_key",
"advanced": False,
"dynamic": False,
"info": '',
"type": "str",
"list": False,
"value": "" # assuming empty string as default, needs to be set by user
}
}
def build(
self,
result_key_for_type: Optional[Dict[str, str]] = None,
serper_api_key: Optional[str] = None,
) -> GoogleSerperAPIWrapper:
return GoogleSerperAPIWrapper(
result_key_for_type=result_key_for_type,
serper_api_key=serper_api_key
)

View file

@ -0,0 +1,27 @@
from langflow import CustomComponent
from typing import Optional, Dict
class SearxSearchWrapperComponent(CustomComponent):
display_name = "SearxSearchWrapper"
description = "Wrapper for Searx API."
def build_config(self):
return {
"headers": {
"display_name": "Headers",
"multiline": True,
"default": '{"Authorization": "Bearer <token>"}'
},
}
def build(
self,
headers: Optional[Dict[str, str]] = None,
):
if headers is None:
headers = {"Authorization": "Bearer <token>"}
# Placeholder for actual SearxSearchWrapper instantiation
# Since the actual SearxSearchWrapper class is not available,
# it is assumed that it would be instantiated here with headers as an argument.
pass

View file

@ -0,0 +1,32 @@
from langflow import CustomComponent
from typing import Callable, Union
# Assuming SerpAPIWrapper is a predefined class within the langflow context.
# If it's not, it must be defined or imported from the appropriate module.
class SerpAPIWrapperComponent(CustomComponent):
display_name = "SerpAPIWrapper"
description = "Wrapper around SerpAPI"
def build_config(self):
return {
"serpapi_api_key": {"display_name": "SerpAPI API Key", "type": "password"},
}
def build(
self,
serpapi_api_key: str,
) -> Union['SerpAPIWrapper', Callable]:
# Default parameters as defined in the JSON template.
default_params = {
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en"
}
return SerpAPIWrapper(
serpapi_api_key=serpapi_api_key,
params=default_params
)

View file

@ -0,0 +1,17 @@
from langflow import CustomComponent
from typing import Union, Callable
# Assuming WikipediaAPIWrapper is a class that needs to be imported.
# The import statement is not included as it is not provided in the JSON
# and the actual implementation details are unknown.
class WikipediaAPIWrapperComponent(CustomComponent):
display_name = "WikipediaAPIWrapper"
description = "Wrapper around WikipediaAPI."
def build_config(self):
return {}
def build(self) -> Union[WikipediaAPIWrapper, Callable]:
return WikipediaAPIWrapper()

View file

@ -0,0 +1,22 @@
from langflow import CustomComponent
from typing import Callable, Union
# Since all the fields in the JSON have show=False, we will only create a basic component
# without any configurable fields.
class WolframAlphaAPIWrapperComponent(CustomComponent):
display_name = "WolframAlphaAPIWrapper"
description = "Wrapper for Wolfram Alpha."
def build_config(self):
# No fields with show=True are available according to the JSON configuration,
# so we return an empty config.
return {}
def build(self) -> Union[Callable, object]:
# Since we are not given any specific implementation details or associated classes,
# we will simply return an object that represents the WolframAlphaAPIWrapper without
# initializing any specific fields. In a real scenario, this would be replaced with
# the actual instantiation of the WolframAlphaAPIWrapper class.
return object() # Placeholder for actual WolframAlphaAPIWrapper class instantiation.

View file

@ -0,0 +1,39 @@
from langflow import CustomComponent
from langchain.vectorstores import FAISS
from typing import Optional, List
from langflow.field_typing import (
Document,
Embeddings,
NestedDict,
)
class FAISSComponent(CustomComponent):
display_name = "FAISS"
description = "Construct FAISS wrapper from raw documents."
documentation = "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"folder_path": {"display_name": "Local Path"},
"index_name": {"display_name": "Index Name"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
}
def build(
self,
embedding: Embeddings,
documents: Optional[List[Document]] = None,
folder_path: str = "",
index_name: str = "",
search_kwargs: Optional[NestedDict] = None,
) -> FAISS:
return FAISS(
embedding=embedding,
documents=documents,
folder_path=folder_path,
index_name=index_name,
search_kwargs=search_kwargs or {},
)

View file

@ -0,0 +1,45 @@
from langflow import CustomComponent
from langchain.vectorstores import MongoDBAtlasVectorSearch
from typing import Optional, List
from langflow.field_typing import (
Document,
Embeddings,
NestedDict,
)
class MongoDBAtlasComponent(CustomComponent):
display_name = "MongoDB Atlas"
description = "Construct a `MongoDB Atlas Vector Search` vector store from raw documents."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"collection_name": {"display_name": "Collection Name"},
"db_name": {"display_name": "Database Name"},
"index_name": {"display_name": "Index Name"},
"mongodb_atlas_cluster_uri": {"display_name": "MongoDB Atlas Cluster URI"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
}
def build(
self,
documents: List[Document],
embedding: Embeddings,
collection_name: str = "",
db_name: str = "",
index_name: str = "",
mongodb_atlas_cluster_uri: str = "",
search_kwargs: Optional[NestedDict] = None,
) -> MongoDBAtlasVectorSearch:
search_kwargs = search_kwargs or {}
return MongoDBAtlasVectorSearch(
documents=documents,
embedding=embedding,
collection_name=collection_name,
db_name=db_name,
index_name=index_name,
mongodb_atlas_cluster_uri=mongodb_atlas_cluster_uri,
search_kwargs=search_kwargs,
)

View file

@ -0,0 +1,44 @@
from langflow import CustomComponent
from typing import Optional, List
from langchain.vectorstores import Pinecone
from langchain.field_typing import (
Document,
Embeddings,
NestedDict,
)
class PineconeComponent(CustomComponent):
display_name = "Pinecone"
description = "Construct Pinecone wrapper from raw documents."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding", "default": 1000},
"index_name": {"display_name": "Index Name"},
"namespace": {"display_name": "Namespace"},
"pinecone_api_key": {"display_name": "Pinecone API Key", "default": ""},
"pinecone_env": {"display_name": "Pinecone Environment", "default": ""},
"search_kwargs": {"display_name": "Search Kwargs", "default": '{}'},
}
def build(
self,
embedding: Embeddings,
documents: Optional[List[Document]] = None,
index_name: Optional[str] = None,
namespace: Optional[str] = None,
pinecone_api_key: Optional[str] = None,
pinecone_env: Optional[str] = None,
search_kwargs: Optional[NestedDict] = None,
) -> Pinecone:
return Pinecone(
documents=documents,
embedding=embedding,
index_name=index_name,
namespace=namespace,
pinecone_api_key=pinecone_api_key,
pinecone_env=pinecone_env,
search_kwargs=search_kwargs,
)

View file

@ -0,0 +1,73 @@
from langflow import CustomComponent
from langchain.vectorstores import Qdrant
from typing import Optional, List
from langflow.field_typing import Document, Embeddings, NestedDict
class QdrantComponent(CustomComponent):
display_name = "Qdrant"
description = "Construct Qdrant wrapper from a list of texts."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"api_key": {"display_name": "API Key", "password": True},
"collection_name": {"display_name": "Collection Name"},
"content_payload_key": {"display_name": "Content Payload Key"},
"distance_func": {"display_name": "Distance Function"},
"grpc_port": {"display_name": "gRPC Port"},
"host": {"display_name": "Host"},
"https": {"display_name": "HTTPS"},
"location": {"display_name": "Location"},
"metadata_payload_key": {"display_name": "Metadata Payload Key"},
"path": {"display_name": "Path"},
"port": {"display_name": "Port"},
"prefer_grpc": {"display_name": "Prefer gRPC"},
"prefix": {"display_name": "Prefix"},
"search_kwargs": {"display_name": "Search Kwargs"},
"timeout": {"display_name": "Timeout"},
"url": {"display_name": "URL"},
}
def build(
self,
embedding: Embeddings,
documents: Optional[List[Document]] = None,
api_key: Optional[str] = None,
collection_name: Optional[str] = None,
content_payload_key: str = "page_content",
distance_func: str = "Cosine",
grpc_port: int = 6334,
host: Optional[str] = None,
https: bool = False,
location: str = ":memory:",
metadata_payload_key: str = "metadata",
path: Optional[str] = None,
port: int = 6333,
prefer_grpc: bool = False,
prefix: Optional[str] = None,
search_kwargs: Optional[NestedDict] = None,
timeout: Optional[float] = None,
url: Optional[str] = None,
) -> Qdrant:
return Qdrant(
documents=documents,
embedding=embedding,
api_key=api_key,
collection_name=collection_name,
content_payload_key=content_payload_key,
distance_func=distance_func,
grpc_port=grpc_port,
host=host,
https=https,
location=location,
metadata_payload_key=metadata_payload_key,
path=path,
port=port,
prefer_grpc=prefer_grpc,
prefix=prefix,
search_kwargs=search_kwargs,
timeout=timeout,
url=url,
)

View file

@ -0,0 +1,44 @@
from langflow import CustomComponent
from typing import Optional, List
from langchain.vectorstores import SupabaseVectorStore
from langchain.field_typing import (
Document,
Embeddings,
NestedDict,
)
class SupabaseComponent(CustomComponent):
display_name = "Supabase"
description = "Return VectorStore initialized from texts and embeddings."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"query_name": {"display_name": "Query Name"},
"search_kwargs": {"display_name": "Search Kwargs"},
"supabase_service_key": {"display_name": "Supabase Service Key"},
"supabase_url": {"display_name": "Supabase URL"},
"table_name": {"display_name": "Table Name"},
}
def build(
self,
embedding: Embeddings,
documents: Optional[List[Document]] = None,
query_name: str = '',
search_kwargs: NestedDict = {},
supabase_service_key: str = '',
supabase_url: str = '',
table_name: str = '',
) -> SupabaseVectorStore:
return SupabaseVectorStore(
documents=documents,
embedding=embedding,
query_name=query_name,
search_kwargs=search_kwargs,
supabase_service_key=supabase_service_key,
supabase_url=supabase_url,
table_name=table_name,
)