Update Langchain to 0.1 and migrate most components to CustomComponent (#1382)
This PR updates Langchain to 0.1.* and move components previously created automatically to start using the CustomComponent framework. This will improve maintainability and move Langflow to be closer to the framework.
This commit is contained in:
commit
e61e4ac51b
81 changed files with 3194 additions and 2804 deletions
2740
poetry.lock
generated
2740
poetry.lock
generated
File diff suppressed because it is too large
Load diff
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "langflow"
|
||||
version = "0.6.5a12"
|
||||
version = "0.6.5a13"
|
||||
description = "A Python package with a built-in web application"
|
||||
authors = ["Logspace <contact@logspace.ai>"]
|
||||
maintainers = [
|
||||
|
|
@ -25,8 +25,6 @@ documentation = "https://docs.langflow.org"
|
|||
langflow = "langflow.__main__:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
|
||||
|
||||
python = ">=3.9,<3.11"
|
||||
fastapi = "^0.108.0"
|
||||
uvicorn = "^0.25.0"
|
||||
|
|
@ -35,8 +33,8 @@ google-search-results = "^2.4.1"
|
|||
google-api-python-client = "^2.79.0"
|
||||
typer = "^0.9.0"
|
||||
gunicorn = "^21.2.0"
|
||||
langchain = "~0.0.345"
|
||||
openai = "^1.6.1"
|
||||
langchain = "~0.1.0"
|
||||
openai = "^1.10.0"
|
||||
pandas = "2.0.3"
|
||||
chromadb = "^0.4.0"
|
||||
huggingface-hub = { version = "^0.19.0", extras = ["inference"] }
|
||||
|
|
@ -55,7 +53,7 @@ tiktoken = "~0.5.0"
|
|||
wikipedia = "^1.4.0"
|
||||
qdrant-client = "^1.7.0"
|
||||
websockets = "^10.3"
|
||||
weaviate-client = { version = "^4.4b6", allow-prereleases = true }
|
||||
weaviate-client = "*"
|
||||
jina = "*"
|
||||
sentence-transformers = { version = "^2.2.2", optional = true }
|
||||
ctransformers = { version = "^0.2.10", optional = true }
|
||||
|
|
@ -63,7 +61,7 @@ cohere = "^4.39.0"
|
|||
python-multipart = "^0.0.6"
|
||||
sqlmodel = "^0.0.14"
|
||||
faiss-cpu = "^1.7.4"
|
||||
anthropic = "^0.8.0"
|
||||
anthropic = "^0.13.0"
|
||||
orjson = "3.9.3"
|
||||
multiprocess = "^0.70.14"
|
||||
cachetools = "^5.3.1"
|
||||
|
|
@ -98,7 +96,7 @@ markupsafe = "^2.1.3"
|
|||
extract-msg = "^0.45.0"
|
||||
# jq is not available for windows
|
||||
jq = { version = "^1.6.0", markers = "sys_platform != 'win32'" }
|
||||
boto3 = "^1.28.63"
|
||||
boto3 = "^1.34.0"
|
||||
numexpr = "^2.8.6"
|
||||
qianfan = "0.2.0"
|
||||
pgvector = "^0.2.3"
|
||||
|
|
@ -107,12 +105,13 @@ langchain-google-genai = "^0.0.2"
|
|||
elasticsearch = "^8.11.1"
|
||||
pytube = "^15.0.0"
|
||||
llama-index = "^0.9.24"
|
||||
langchain-openai = "^0.0.2"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest-asyncio = "^0.23.1"
|
||||
types-redis = "^4.6.0.5"
|
||||
ipykernel = "^6.27.0"
|
||||
mypy = "^1.7.1"
|
||||
mypy = "^1.8.0"
|
||||
ruff = "^0.1.5"
|
||||
httpx = "*"
|
||||
pytest = "^7.4.2"
|
||||
|
|
@ -154,7 +153,8 @@ exclude = ["src/backend/langflow/alembic/*"]
|
|||
line-length = 120
|
||||
|
||||
[tool.mypy]
|
||||
plugins = "pydantic.mypy"
|
||||
plugins = ["pydantic.mypy"]
|
||||
follow_imports = "silent"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ def upgrade() -> None:
|
|||
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
|
||||
batch_op.add_column(sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=True))
|
||||
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
|
||||
batch_op.create_foreign_key(None, 'user', ['user_id'], ['id'])
|
||||
batch_op.create_foreign_key('fk_flow_user_id_user', 'user', ['user_id'], ['id'])
|
||||
except Exception:
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
@ -38,7 +38,7 @@ def downgrade() -> None:
|
|||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.drop_constraint(None, type_='foreignkey')
|
||||
batch_op.drop_constraint('fk_flow_user_id_user', type_='foreignkey')
|
||||
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
|
||||
batch_op.drop_column('user_id')
|
||||
batch_op.drop_column('folder')
|
||||
|
|
|
|||
23
src/backend/langflow/components/agents/CSVAgent.py
Normal file
23
src/backend/langflow/components/agents/CSVAgent.py
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, AgentExecutor
|
||||
from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_agent
|
||||
|
||||
|
||||
class CSVAgentComponent(CustomComponent):
|
||||
display_name = "CSVAgent"
|
||||
description = "Construct a CSV agent from a CSV and tools."
|
||||
documentation = "https://python.langchain.com/docs/modules/agents/toolkits/csv"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM", "type": BaseLanguageModel},
|
||||
"path": {"display_name": "Path", "field_type": "file", "suffixes": [".csv"], "file_types": [".csv"]},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
path: str,
|
||||
) -> AgentExecutor:
|
||||
# Instantiate and return the CSV agent class with the provided llm and path
|
||||
return create_csv_agent(llm=llm, path=path)
|
||||
24
src/backend/langflow/components/agents/JsonAgent.py
Normal file
24
src/backend/langflow/components/agents/JsonAgent.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.agents import AgentExecutor, create_json_agent
|
||||
from langflow.field_typing import (
|
||||
BaseLanguageModel,
|
||||
)
|
||||
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
|
||||
|
||||
|
||||
class JsonAgentComponent(CustomComponent):
|
||||
display_name = "JsonAgent"
|
||||
description = "Construct a json agent from an LLM and tools."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM"},
|
||||
"toolkit": {"display_name": "Toolkit"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
toolkit: JsonToolkit,
|
||||
) -> AgentExecutor:
|
||||
return create_json_agent(llm=llm, toolkit=toolkit)
|
||||
|
|
@ -3,7 +3,7 @@ from typing import List, Optional
|
|||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.agent_toolkits.conversational_retrieval.openai_functions import _get_default_system_message
|
||||
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain.memory.token_buffer import ConversationTokenBufferMemory
|
||||
from langchain.prompts import SystemMessagePromptTemplate
|
||||
from langchain.prompts.chat import MessagesPlaceholder
|
||||
|
|
|
|||
29
src/backend/langflow/components/agents/SQLAgent.py
Normal file
29
src/backend/langflow/components/agents/SQLAgent.py
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
from langflow import CustomComponent
|
||||
from typing import Union, Callable
|
||||
from langchain.agents import AgentExecutor
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langchain_community.agent_toolkits.sql.base import create_sql_agent
|
||||
from langchain.sql_database import SQLDatabase
|
||||
from langchain_community.agent_toolkits import SQLDatabaseToolkit
|
||||
|
||||
|
||||
class SQLAgentComponent(CustomComponent):
|
||||
display_name = "SQLAgent"
|
||||
description = "Construct an SQL agent from an LLM and tools."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM"},
|
||||
"database_uri": {"display_name": "Database URI"},
|
||||
"verbose": {"display_name": "Verbose", "value": False, "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
database_uri: str,
|
||||
verbose: bool = False,
|
||||
) -> Union[AgentExecutor, Callable]:
|
||||
db = SQLDatabase.from_uri(database_uri)
|
||||
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
|
||||
return create_sql_agent(llm=llm, toolkit=toolkit)
|
||||
23
src/backend/langflow/components/agents/VectorStoreAgent.py
Normal file
23
src/backend/langflow/components/agents/VectorStoreAgent.py
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.agents import AgentExecutor, create_vectorstore_agent
|
||||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreToolkit
|
||||
from typing import Union, Callable
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
|
||||
|
||||
class VectorStoreAgentComponent(CustomComponent):
|
||||
display_name = "VectorStoreAgent"
|
||||
description = "Construct an agent from a Vector Store."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM"},
|
||||
"vector_store_toolkit": {"display_name": "Vector Store Info"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
vector_store_toolkit: VectorStoreToolkit,
|
||||
) -> Union[AgentExecutor, Callable]:
|
||||
return create_vectorstore_agent(llm=llm, toolkit=vector_store_toolkit)
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain_core.language_models.base import BaseLanguageModel
|
||||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
|
||||
from langchain.agents import create_vectorstore_router_agent
|
||||
from typing import Callable
|
||||
|
||||
|
||||
class VectorStoreRouterAgentComponent(CustomComponent):
|
||||
display_name = "VectorStoreRouterAgent"
|
||||
description = "Construct an agent from a Vector Store Router."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM"},
|
||||
"vectorstoreroutertoolkit": {"display_name": "Vector Store Router Toolkit"},
|
||||
}
|
||||
|
||||
def build(self, llm: BaseLanguageModel, vectorstoreroutertoolkit: VectorStoreRouterToolkit) -> Callable:
|
||||
return create_vectorstore_router_agent(llm=llm, toolkit=vectorstoreroutertoolkit)
|
||||
|
|
@ -28,5 +28,5 @@ class LLMChainComponent(CustomComponent):
|
|||
prompt: BasePromptTemplate,
|
||||
llm: BaseLanguageModel,
|
||||
memory: Optional[BaseMemory] = None,
|
||||
) -> Union[Chain, Callable]:
|
||||
) -> Union[Chain, Callable, LLMChain]:
|
||||
return LLMChain(prompt=prompt, llm=llm, memory=memory)
|
||||
|
|
|
|||
24
src/backend/langflow/components/chains/LLMCheckerChain.py
Normal file
24
src/backend/langflow/components/chains/LLMCheckerChain.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.chains import LLMCheckerChain
|
||||
from typing import Union, Callable
|
||||
from langflow.field_typing import (
|
||||
BaseLanguageModel,
|
||||
Chain,
|
||||
)
|
||||
|
||||
|
||||
class LLMCheckerChainComponent(CustomComponent):
|
||||
display_name = "LLMCheckerChain"
|
||||
description = ""
|
||||
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_checker"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
) -> Union[Chain, Callable]:
|
||||
return LLMCheckerChain(llm=llm)
|
||||
31
src/backend/langflow/components/chains/LLMMathChain.py
Normal file
31
src/backend/langflow/components/chains/LLMMathChain.py
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
from typing import Callable, Optional, Union
|
||||
|
||||
from langchain.chains import LLMChain, LLMMathChain
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, Chain
|
||||
|
||||
|
||||
class LLMMathChainComponent(CustomComponent):
|
||||
display_name = "LLMMathChain"
|
||||
description = "Chain that interprets a prompt and executes python code to do math."
|
||||
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_math"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM"},
|
||||
"llm_chain": {"display_name": "LLM Chain"},
|
||||
"memory": {"display_name": "Memory"},
|
||||
"input_key": {"display_name": "Input Key"},
|
||||
"output_key": {"display_name": "Output Key"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
llm_chain: LLMChain,
|
||||
input_key: str = "question",
|
||||
output_key: str = "answer",
|
||||
memory: Optional[BaseMemory] = None,
|
||||
) -> Union[LLMMathChain, Callable, Chain]:
|
||||
return LLMMathChain(llm=llm, llm_chain=llm_chain, input_key=input_key, output_key=output_key, memory=memory)
|
||||
39
src/backend/langflow/components/chains/RetrievalQA.py
Normal file
39
src/backend/langflow/components/chains/RetrievalQA.py
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
from typing import Callable, Optional, Union
|
||||
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseMemory, BaseRetriever
|
||||
|
||||
|
||||
class RetrievalQAComponent(CustomComponent):
|
||||
display_name = "RetrievalQA"
|
||||
description = "Chain for question-answering against an index."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"combine_documents_chain": {"display_name": "Combine Documents Chain"},
|
||||
"retriever": {"display_name": "Retriever"},
|
||||
"memory": {"display_name": "Memory", "required": False},
|
||||
"input_key": {"display_name": "Input Key", "advanced": True},
|
||||
"output_key": {"display_name": "Output Key", "advanced": True},
|
||||
"return_source_documents": {"display_name": "Return Source Documents"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
combine_documents_chain: BaseCombineDocumentsChain,
|
||||
retriever: BaseRetriever,
|
||||
memory: Optional[BaseMemory] = None,
|
||||
input_key: str = "query",
|
||||
output_key: str = "result",
|
||||
return_source_documents: bool = True,
|
||||
) -> Union[BaseRetrievalQA, Callable]:
|
||||
return RetrievalQA(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
retriever=retriever,
|
||||
memory=memory,
|
||||
input_key=input_key,
|
||||
output_key=output_key,
|
||||
return_source_documents=return_source_documents,
|
||||
)
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain.chains import RetrievalQAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever
|
||||
|
||||
|
||||
class RetrievalQAWithSourcesChainComponent(CustomComponent):
|
||||
display_name = "RetrievalQAWithSourcesChain"
|
||||
description = "Question-answering with sources over an index."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM"},
|
||||
"chain_type": {
|
||||
"display_name": "Chain Type",
|
||||
"options": ["stuff", "map_reduce", "map_rerank", "refine"],
|
||||
},
|
||||
"memory": {"display_name": "Memory"},
|
||||
"return_source_documents": {"display_name": "Return Source Documents"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
retriever: BaseRetriever,
|
||||
llm: BaseLanguageModel,
|
||||
combine_documents_chain: BaseCombineDocumentsChain,
|
||||
chain_type: str,
|
||||
memory: Optional[BaseMemory] = None,
|
||||
return_source_documents: Optional[bool] = True,
|
||||
) -> BaseQAWithSourcesChain:
|
||||
return RetrievalQAWithSourcesChain.from_chain_type(
|
||||
llm=llm,
|
||||
chain_type=chain_type,
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
memory=memory,
|
||||
return_source_documents=return_source_documents,
|
||||
retriever=retriever,
|
||||
)
|
||||
25
src/backend/langflow/components/chains/SQLDatabaseChain.py
Normal file
25
src/backend/langflow/components/chains/SQLDatabaseChain.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
from langflow import CustomComponent
|
||||
from typing import Callable, Union
|
||||
from langflow.field_typing import BasePromptTemplate, BaseLanguageModel, Chain
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
from langchain_experimental.sql.base import SQLDatabaseChain
|
||||
|
||||
|
||||
class SQLDatabaseChainComponent(CustomComponent):
|
||||
display_name = "SQLDatabaseChain"
|
||||
description = ""
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"db": {"display_name": "Database"},
|
||||
"llm": {"display_name": "LLM"},
|
||||
"prompt": {"display_name": "Prompt"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
db: SQLDatabase,
|
||||
llm: BaseLanguageModel,
|
||||
prompt: BasePromptTemplate,
|
||||
) -> Union[Chain, Callable, SQLDatabaseChain]:
|
||||
return SQLDatabaseChain.from_llm(llm=llm, db=db, prompt=prompt)
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.docstore.document import Document
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
|
||||
class DirectoryLoaderComponent(CustomComponent):
|
||||
display_name = "DirectoryLoader"
|
||||
description = "Load from a directory."
|
||||
|
||||
def build_config(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"glob": {"display_name": "Glob Pattern", "value": "**/*.txt"},
|
||||
"load_hidden": {"display_name": "Load Hidden Files", "value": False, "advanced": True},
|
||||
"max_concurrency": {"display_name": "Max Concurrency", "value": 10, "advanced": True},
|
||||
"metadata": {"display_name": "Metadata", "value": {}},
|
||||
"path": {"display_name": "Local Directory"},
|
||||
"recursive": {"display_name": "Recursive", "value": True, "advanced": True},
|
||||
"silent_errors": {"display_name": "Silent Errors", "value": False, "advanced": True},
|
||||
"use_multithreading": {"display_name": "Use Multithreading", "value": True, "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
glob: str,
|
||||
path: str,
|
||||
load_hidden: Optional[bool] = False,
|
||||
max_concurrency: Optional[int] = 10,
|
||||
metadata: Optional[dict] = {},
|
||||
recursive: Optional[bool] = True,
|
||||
silent_errors: Optional[bool] = False,
|
||||
use_multithreading: Optional[bool] = True,
|
||||
) -> Document:
|
||||
return Document(
|
||||
glob=glob,
|
||||
path=path,
|
||||
load_hidden=load_hidden,
|
||||
max_concurrency=max_concurrency,
|
||||
metadata=metadata,
|
||||
recursive=recursive,
|
||||
silent_errors=silent_errors,
|
||||
use_multithreading=use_multithreading,
|
||||
)
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain_community.embeddings import AzureOpenAIEmbeddings
|
||||
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class AzureOpenAIEmbeddingsComponent(CustomComponent):
|
||||
display_name: str = "AzureOpenAIEmbeddings"
|
||||
|
|
@ -53,9 +54,9 @@ class AzureOpenAIEmbeddingsComponent(CustomComponent):
|
|||
try:
|
||||
embeddings = AzureOpenAIEmbeddings(
|
||||
azure_endpoint=azure_endpoint,
|
||||
deployment=azure_deployment,
|
||||
openai_api_version=api_version,
|
||||
openai_api_key=api_key,
|
||||
azure_deployment=azure_deployment,
|
||||
api_version=api_version,
|
||||
api_key=api_key,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,36 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_community.embeddings.cohere import CohereEmbeddings
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class CohereEmbeddingsComponent(CustomComponent):
|
||||
display_name = "CohereEmbeddings"
|
||||
description = "Cohere embedding models."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"cohere_api_key": {"display_name": "Cohere API Key", "password": True},
|
||||
"model": {"display_name": "Model", "default": "embed-english-v2.0", "advanced": True},
|
||||
"truncate": {"display_name": "Truncate", "advanced": True},
|
||||
"max_retries": {"display_name": "Max Retries", "advanced": True},
|
||||
"user_agent": {"display_name": "User Agent", "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
request_timeout: Optional[float] = None,
|
||||
cohere_api_key: str = "",
|
||||
max_retries: Optional[int] = None,
|
||||
model: str = "embed-english-v2.0",
|
||||
truncate: Optional[str] = None,
|
||||
user_agent: str = "langchain",
|
||||
) -> CohereEmbeddings:
|
||||
return CohereEmbeddings( # type: ignore
|
||||
max_retries=max_retries,
|
||||
user_agent=user_agent,
|
||||
request_timeout=request_timeout,
|
||||
cohere_api_key=cohere_api_key,
|
||||
model=model,
|
||||
truncate=truncate,
|
||||
)
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
from langflow import CustomComponent
|
||||
from typing import Optional, Dict
|
||||
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
|
||||
|
||||
class HuggingFaceEmbeddingsComponent(CustomComponent):
|
||||
display_name = "HuggingFaceEmbeddings"
|
||||
description = "HuggingFace sentence_transformers embedding models."
|
||||
documentation = (
|
||||
"https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/sentence_transformers"
|
||||
)
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"cache_folder": {"display_name": "Cache Folder", "advanced": True},
|
||||
"encode_kwargs": {"display_name": "Encode Kwargs", "advanced": True, "field_type": "dict"},
|
||||
"model_kwargs": {"display_name": "Model Kwargs", "field_type": "dict", "advanced": True},
|
||||
"model_name": {"display_name": "Model Name"},
|
||||
"multi_process": {"display_name": "Multi Process", "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
cache_folder: Optional[str] = None,
|
||||
encode_kwargs: Optional[Dict] = {},
|
||||
model_kwargs: Optional[Dict] = {},
|
||||
model_name: str = "sentence-transformers/all-mpnet-base-v2",
|
||||
multi_process: bool = False,
|
||||
) -> HuggingFaceEmbeddings:
|
||||
return HuggingFaceEmbeddings(
|
||||
cache_folder=cache_folder,
|
||||
encode_kwargs=encode_kwargs,
|
||||
model_kwargs=model_kwargs,
|
||||
model_name=model_name,
|
||||
multi_process=multi_process,
|
||||
)
|
||||
117
src/backend/langflow/components/embeddings/OpenAIEmbeddings.py
Normal file
117
src/backend/langflow/components/embeddings/OpenAIEmbeddings.py
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
from langchain_openai.embeddings.base import OpenAIEmbeddings
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import NestedDict
|
||||
|
||||
|
||||
class OpenAIEmbeddingsComponent(CustomComponent):
|
||||
display_name = "OpenAIEmbeddings"
|
||||
description = "OpenAI embedding models"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"allowed_special": {
|
||||
"display_name": "Allowed Special",
|
||||
"advanced": True,
|
||||
"field_type": "str",
|
||||
"is_list": True,
|
||||
},
|
||||
"default_headers": {
|
||||
"display_name": "Default Headers",
|
||||
"advanced": True,
|
||||
"field_type": "dict",
|
||||
},
|
||||
"default_query": {
|
||||
"display_name": "Default Query",
|
||||
"advanced": True,
|
||||
"field_type": "NestedDict",
|
||||
},
|
||||
"disallowed_special": {
|
||||
"display_name": "Disallowed Special",
|
||||
"advanced": True,
|
||||
"field_type": "str",
|
||||
"is_list": True,
|
||||
},
|
||||
"chunk_size": {"display_name": "Chunk Size", "advanced": True},
|
||||
"client": {"display_name": "Client", "advanced": True},
|
||||
"deployment": {"display_name": "Deployment", "advanced": True},
|
||||
"embedding_ctx_length": {
|
||||
"display_name": "Embedding Context Length",
|
||||
"advanced": True,
|
||||
},
|
||||
"max_retries": {"display_name": "Max Retries", "advanced": True},
|
||||
"model": {"display_name": "Model", "advanced": True},
|
||||
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
|
||||
"openai_api_base": {"display_name": "OpenAI API Base", "password": True, "advanced": True},
|
||||
"openai_api_key": {"display_name": "OpenAI API Key", "password": True},
|
||||
"openai_api_type": {"display_name": "OpenAI API Type", "advanced": True, "password": True},
|
||||
"openai_api_version": {
|
||||
"display_name": "OpenAI API Version",
|
||||
"advanced": True,
|
||||
},
|
||||
"openai_organization": {
|
||||
"display_name": "OpenAI Organization",
|
||||
"advanced": True,
|
||||
},
|
||||
"openai_proxy": {"display_name": "OpenAI Proxy", "advanced": True},
|
||||
"request_timeout": {"display_name": "Request Timeout", "advanced": True},
|
||||
"show_progress_bar": {
|
||||
"display_name": "Show Progress Bar",
|
||||
"advanced": True,
|
||||
},
|
||||
"skip_empty": {"display_name": "Skip Empty", "advanced": True},
|
||||
"tiktoken_model_name": {"display_name": "TikToken Model Name"},
|
||||
"tikToken_enable": {"display_name": "TikToken Enable"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
default_headers: Optional[Dict[str, str]] = None,
|
||||
default_query: Optional[NestedDict] = {},
|
||||
allowed_special: List[str] = [],
|
||||
disallowed_special: List[str] = ["all"],
|
||||
chunk_size: int = 1000,
|
||||
client: Optional[Any] = None,
|
||||
deployment: str = "text-embedding-ada-002",
|
||||
embedding_ctx_length: int = 8191,
|
||||
max_retries: int = 6,
|
||||
model: str = "text-embedding-ada-002",
|
||||
model_kwargs: NestedDict = {},
|
||||
openai_api_base: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = "",
|
||||
openai_api_type: Optional[str] = None,
|
||||
openai_api_version: Optional[str] = None,
|
||||
openai_organization: Optional[str] = None,
|
||||
openai_proxy: Optional[str] = None,
|
||||
request_timeout: Optional[float] = None,
|
||||
show_progress_bar: bool = False,
|
||||
skip_empty: bool = False,
|
||||
tikToken_enable: bool = True,
|
||||
tiktoken_model_name: Optional[str] = None,
|
||||
) -> Union[OpenAIEmbeddings, Callable]:
|
||||
return OpenAIEmbeddings(
|
||||
tiktoken_enabled=tikToken_enable,
|
||||
default_headers=default_headers,
|
||||
default_query=default_query,
|
||||
allowed_special=set(allowed_special),
|
||||
disallowed_special=set(disallowed_special),
|
||||
chunk_size=chunk_size,
|
||||
client=client,
|
||||
deployment=deployment,
|
||||
embedding_ctx_length=embedding_ctx_length,
|
||||
max_retries=max_retries,
|
||||
model=model,
|
||||
model_kwargs=model_kwargs,
|
||||
base_url=openai_api_base,
|
||||
api_key=openai_api_key,
|
||||
openai_api_type=openai_api_type,
|
||||
api_version=openai_api_version,
|
||||
organization=openai_organization,
|
||||
openai_proxy=openai_proxy,
|
||||
timeout=request_timeout,
|
||||
show_progress_bar=show_progress_bar,
|
||||
skip_empty=skip_empty,
|
||||
tiktoken_model_name=tiktoken_model_name,
|
||||
)
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.embeddings import VertexAIEmbeddings
|
||||
from typing import Optional, List
|
||||
|
||||
|
||||
class VertexAIEmbeddingsComponent(CustomComponent):
|
||||
display_name = "VertexAIEmbeddings"
|
||||
description = "Google Cloud VertexAI embedding models."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"credentials": {"display_name": "Credentials", "value": "", "file_types": [".json"], "field_type": "file"},
|
||||
"instance": {"display_name": "instance", "advanced": True, "field_type": "dict"},
|
||||
"location": {"display_name": "Location", "value": "us-central1", "advanced": True},
|
||||
"max_output_tokens": {"display_name": "Max Output Tokens", "value": 128},
|
||||
"max_retries": {"display_name": "Max Retries", "value": 6, "advanced": True},
|
||||
"model_name": {"display_name": "Model Name", "value": "textembedding-gecko"},
|
||||
"n": {"display_name": "N", "value": 1, "advanced": True},
|
||||
"project": {"display_name": "Project", "advanced": True},
|
||||
"request_parallelism": {"display_name": "Request Parallelism", "value": 5, "advanced": True},
|
||||
"stop": {"display_name": "Stop", "advanced": True},
|
||||
"streaming": {"display_name": "Streaming", "value": False, "advanced": True},
|
||||
"temperature": {"display_name": "Temperature", "value": 0.0},
|
||||
"top_k": {"display_name": "Top K", "value": 40, "advanced": True},
|
||||
"top_p": {"display_name": "Top P", "value": 0.95, "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
instance: Optional[str] = None,
|
||||
credentials: Optional[str] = None,
|
||||
location: str = "us-central1",
|
||||
max_output_tokens: int = 128,
|
||||
max_retries: int = 6,
|
||||
model_name: str = "textembedding-gecko",
|
||||
n: int = 1,
|
||||
project: Optional[str] = None,
|
||||
request_parallelism: int = 5,
|
||||
stop: Optional[List[str]] = None,
|
||||
streaming: bool = False,
|
||||
temperature: float = 0.0,
|
||||
top_k: int = 40,
|
||||
top_p: float = 0.95,
|
||||
) -> VertexAIEmbeddings:
|
||||
return VertexAIEmbeddings(
|
||||
instance=instance,
|
||||
credentials=credentials,
|
||||
location=location,
|
||||
max_output_tokens=max_output_tokens,
|
||||
max_retries=max_retries,
|
||||
model_name=model_name,
|
||||
n=n,
|
||||
project=project,
|
||||
request_parallelism=request_parallelism,
|
||||
stop=stop,
|
||||
streaming=streaming,
|
||||
temperature=temperature,
|
||||
top_k=top_k,
|
||||
top_p=top_p,
|
||||
)
|
||||
48
src/backend/langflow/components/llms/Anthropic.py
Normal file
48
src/backend/langflow/components/llms/Anthropic.py
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_community.llms.anthropic import Anthropic
|
||||
from pydantic.v1 import SecretStr
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, NestedDict
|
||||
|
||||
|
||||
class AnthropicComponent(CustomComponent):
|
||||
display_name = "Anthropic"
|
||||
description = "Anthropic large language models."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"anthropic_api_key": {
|
||||
"display_name": "Anthropic API Key",
|
||||
"type": str,
|
||||
"password": True,
|
||||
},
|
||||
"anthropic_api_url": {
|
||||
"display_name": "Anthropic API URL",
|
||||
"type": str,
|
||||
},
|
||||
"model_kwargs": {
|
||||
"display_name": "Model Kwargs",
|
||||
"field_type": "NestedDict",
|
||||
"advanced": True,
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"field_type": "float",
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
anthropic_api_key: str,
|
||||
anthropic_api_url: str,
|
||||
model_kwargs: NestedDict = {},
|
||||
temperature: Optional[float] = None,
|
||||
) -> BaseLanguageModel:
|
||||
return Anthropic(
|
||||
anthropic_api_key=SecretStr(anthropic_api_key),
|
||||
anthropic_api_url=anthropic_api_url,
|
||||
model_kwargs=model_kwargs,
|
||||
temperature=temperature,
|
||||
)
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain.chat_models.anthropic import ChatAnthropic
|
||||
from langchain_community.chat_models.anthropic import ChatAnthropic
|
||||
from langchain.llms.base import BaseLanguageModel
|
||||
from pydantic.v1 import SecretStr
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Optional
|
||||
from langflow import CustomComponent
|
||||
from langchain.llms.base import BaseLanguageModel
|
||||
from langchain.chat_models.azure_openai import AzureChatOpenAI
|
||||
from langchain_community.chat_models.azure_openai import AzureChatOpenAI
|
||||
|
||||
|
||||
class AzureChatOpenAIComponent(CustomComponent):
|
||||
|
|
@ -26,7 +26,7 @@ class AzureChatOpenAIComponent(CustomComponent):
|
|||
"2023-07-01-preview",
|
||||
"2023-08-01-preview",
|
||||
"2023-09-01-preview",
|
||||
"2023-12-01-preview"
|
||||
"2023-12-01-preview",
|
||||
]
|
||||
|
||||
def build_config(self):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
|
||||
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
|
||||
from langchain.llms.base import BaseLLM
|
||||
from pydantic.v1 import SecretStr
|
||||
|
||||
|
|
|
|||
33
src/backend/langflow/components/llms/CTransformers.py
Normal file
33
src/backend/langflow/components/llms/CTransformers.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
from typing import Dict, Optional
|
||||
|
||||
from langchain_community.llms.ctransformers import CTransformers
|
||||
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class CTransformersComponent(CustomComponent):
|
||||
display_name = "CTransformers"
|
||||
description = "C Transformers LLM models"
|
||||
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"model": {"display_name": "Model", "required": True},
|
||||
"model_file": {
|
||||
"display_name": "Model File",
|
||||
"required": False,
|
||||
"field_type": "file",
|
||||
"file_types": [".bin"],
|
||||
},
|
||||
"model_type": {"display_name": "Model Type", "required": True},
|
||||
"config": {
|
||||
"display_name": "Config",
|
||||
"advanced": True,
|
||||
"required": False,
|
||||
"field_type": "dict",
|
||||
"value": '{"top_k":40,"top_p":0.95,"temperature":0.8,"repetition_penalty":1.1,"last_n_tokens":64,"seed":-1,"max_new_tokens":256,"stop":"","stream":"False","reset":"True","batch_size":8,"threads":-1,"context_length":-1,"gpu_layers":0}',
|
||||
},
|
||||
}
|
||||
|
||||
def build(self, model: str, model_file: str, model_type: str, config: Optional[Dict] = None) -> CTransformers:
|
||||
return CTransformers(model=model, model_file=model_file, model_type=model_type, config=config) # type: ignore
|
||||
47
src/backend/langflow/components/llms/ChatAnthropic.py
Normal file
47
src/backend/langflow/components/llms/ChatAnthropic.py
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
from pydantic import SecretStr
|
||||
from langflow import CustomComponent
|
||||
from typing import Optional, Union, Callable
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langchain_community.chat_models.anthropic import ChatAnthropic
|
||||
|
||||
|
||||
class ChatAnthropicComponent(CustomComponent):
|
||||
display_name = "ChatAnthropic"
|
||||
description = "`Anthropic` chat large language models."
|
||||
documentation = "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/anthropic"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"anthropic_api_key": {
|
||||
"display_name": "Anthropic API Key",
|
||||
"field_type": "str",
|
||||
"password": True,
|
||||
},
|
||||
"anthropic_api_url": {
|
||||
"display_name": "Anthropic API URL",
|
||||
"field_type": "str",
|
||||
},
|
||||
"model_kwargs": {
|
||||
"display_name": "Model Kwargs",
|
||||
"field_type": "dict",
|
||||
"advanced": True,
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"field_type": "float",
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
anthropic_api_key: str,
|
||||
anthropic_api_url: Optional[str] = None,
|
||||
model_kwargs: dict = {},
|
||||
temperature: Optional[float] = None,
|
||||
) -> Union[BaseLanguageModel, Callable]:
|
||||
return ChatAnthropic(
|
||||
anthropic_api_key=SecretStr(anthropic_api_key),
|
||||
anthropic_api_url=anthropic_api_url,
|
||||
model_kwargs=model_kwargs,
|
||||
temperature=temperature,
|
||||
)
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
from typing import Any, Dict, List, Optional
|
||||
|
||||
# from langchain_community.chat_models import ChatOllama
|
||||
from langchain.chat_models import ChatOllama
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain_community.chat_models import ChatOllama
|
||||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
|
||||
# from langchain.chat_models import ChatOllama
|
||||
from langflow import CustomComponent
|
||||
|
|
|
|||
84
src/backend/langflow/components/llms/ChatOpenAI.py
Normal file
84
src/backend/langflow/components/llms/ChatOpenAI.py
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
from typing import Optional, Union
|
||||
|
||||
from langchain.llms import BaseLLM
|
||||
from langchain_community.chat_models.openai import ChatOpenAI
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, NestedDict
|
||||
|
||||
|
||||
class ChatOpenAIComponent(CustomComponent):
|
||||
display_name = "ChatOpenAI"
|
||||
description = "`OpenAI` Chat large language models API."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"max_tokens": {
|
||||
"display_name": "Max Tokens",
|
||||
"field_type": "int",
|
||||
"advanced": False,
|
||||
"required": False,
|
||||
},
|
||||
"model_kwargs": {
|
||||
"display_name": "Model Kwargs",
|
||||
"field_type": "NestedDict",
|
||||
"advanced": True,
|
||||
"required": False,
|
||||
},
|
||||
"model_name": {
|
||||
"display_name": "Model Name",
|
||||
"field_type": "str",
|
||||
"advanced": False,
|
||||
"required": False,
|
||||
"options": [
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-32k",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
],
|
||||
},
|
||||
"openai_api_base": {
|
||||
"display_name": "OpenAI API Base",
|
||||
"field_type": "str",
|
||||
"advanced": False,
|
||||
"required": False,
|
||||
"info": (
|
||||
"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\n"
|
||||
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
|
||||
),
|
||||
},
|
||||
"openai_api_key": {
|
||||
"display_name": "OpenAI API Key",
|
||||
"field_type": "str",
|
||||
"advanced": False,
|
||||
"required": False,
|
||||
"password": True,
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"field_type": "float",
|
||||
"advanced": False,
|
||||
"required": False,
|
||||
"value": 0.7,
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
max_tokens: Optional[int] = 256,
|
||||
model_kwargs: NestedDict = {},
|
||||
model_name: str = "gpt-4-1106-preview",
|
||||
openai_api_base: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None,
|
||||
temperature: float = 0.7,
|
||||
) -> Union[BaseLanguageModel, BaseLLM]:
|
||||
if not openai_api_base:
|
||||
openai_api_base = "https://api.openai.com/v1"
|
||||
return ChatOpenAI(
|
||||
max_tokens=max_tokens,
|
||||
model_kwargs=model_kwargs,
|
||||
model=model_name,
|
||||
base_url=openai_api_base,
|
||||
api_key=openai_api_key,
|
||||
temperature=temperature,
|
||||
)
|
||||
87
src/backend/langflow/components/llms/ChatVertexAI.py
Normal file
87
src/backend/langflow/components/llms/ChatVertexAI.py
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
from typing import List, Optional, Union
|
||||
|
||||
from langchain.llms import BaseLLM
|
||||
from langchain_community.chat_models.vertexai import ChatVertexAI
|
||||
from langchain_core.messages.base import BaseMessage
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
|
||||
|
||||
class ChatVertexAIComponent(CustomComponent):
|
||||
display_name = "ChatVertexAI"
|
||||
description = "`Vertex AI` Chat large language models API."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"credentials": {
|
||||
"display_name": "Credentials",
|
||||
"field_type": "file",
|
||||
"file_types": [".json"],
|
||||
"file_path": None,
|
||||
},
|
||||
"examples": {
|
||||
"display_name": "Examples",
|
||||
"multiline": True,
|
||||
},
|
||||
"location": {
|
||||
"display_name": "Location",
|
||||
"value": "us-central1",
|
||||
},
|
||||
"max_output_tokens": {
|
||||
"display_name": "Max Output Tokens",
|
||||
"value": 128,
|
||||
"advanced": True,
|
||||
},
|
||||
"model_name": {
|
||||
"display_name": "Model Name",
|
||||
"value": "chat-bison",
|
||||
},
|
||||
"project": {
|
||||
"display_name": "Project",
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"value": 0.0,
|
||||
},
|
||||
"top_k": {
|
||||
"display_name": "Top K",
|
||||
"value": 40,
|
||||
"advanced": True,
|
||||
},
|
||||
"top_p": {
|
||||
"display_name": "Top P",
|
||||
"value": 0.95,
|
||||
"advanced": True,
|
||||
},
|
||||
"verbose": {
|
||||
"display_name": "Verbose",
|
||||
"value": False,
|
||||
"advanced": True,
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
credentials: Optional[str],
|
||||
project: str,
|
||||
examples: Optional[List[BaseMessage]] = [],
|
||||
location: str = "us-central1",
|
||||
max_output_tokens: int = 128,
|
||||
model_name: str = "chat-bison",
|
||||
temperature: float = 0.0,
|
||||
top_k: int = 40,
|
||||
top_p: float = 0.95,
|
||||
verbose: bool = False,
|
||||
) -> Union[BaseLanguageModel, BaseLLM]:
|
||||
return ChatVertexAI(
|
||||
credentials=credentials,
|
||||
examples=examples,
|
||||
location=location,
|
||||
max_output_tokens=max_output_tokens,
|
||||
model_name=model_name,
|
||||
project=project,
|
||||
temperature=temperature,
|
||||
top_k=top_k,
|
||||
top_p=top_p,
|
||||
verbose=verbose,
|
||||
)
|
||||
24
src/backend/langflow/components/llms/Cohere.py
Normal file
24
src/backend/langflow/components/llms/Cohere.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
from langchain_community.llms.cohere import Cohere
|
||||
from langchain_core.language_models.base import BaseLanguageModel
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class CohereComponent(CustomComponent):
|
||||
display_name = "Cohere"
|
||||
description = "Cohere large language models."
|
||||
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"cohere_api_key": {"display_name": "Cohere API Key", "type": "password", "password": True},
|
||||
"max_tokens": {"display_name": "Max Tokens", "default": 256, "type": "int", "show": True},
|
||||
"temperature": {"display_name": "Temperature", "default": 0.75, "type": "float", "show": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
cohere_api_key: str,
|
||||
max_tokens: int = 256,
|
||||
temperature: float = 0.75,
|
||||
) -> BaseLanguageModel:
|
||||
return Cohere(cohere_api_key=cohere_api_key, max_tokens=max_tokens, temperature=temperature) # type: ignore
|
||||
129
src/backend/langflow/components/llms/LlamaCpp.py
Normal file
129
src/backend/langflow/components/llms/LlamaCpp.py
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
from typing import Optional, List, Dict, Any
|
||||
from langflow import CustomComponent
|
||||
from langchain_community.llms.llamacpp import LlamaCpp
|
||||
|
||||
|
||||
class LlamaCppComponent(CustomComponent):
|
||||
display_name = "LlamaCpp"
|
||||
description = "llama.cpp model."
|
||||
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"grammar": {"display_name": "Grammar", "advanced": True},
|
||||
"cache": {"display_name": "Cache", "advanced": True},
|
||||
"client": {"display_name": "Client", "advanced": True},
|
||||
"echo": {"display_name": "Echo", "advanced": True},
|
||||
"f16_kv": {"display_name": "F16 KV", "advanced": True},
|
||||
"grammar_path": {"display_name": "Grammar Path", "advanced": True},
|
||||
"last_n_tokens_size": {"display_name": "Last N Tokens Size", "advanced": True},
|
||||
"logits_all": {"display_name": "Logits All", "advanced": True},
|
||||
"logprobs": {"display_name": "Logprobs", "advanced": True},
|
||||
"lora_base": {"display_name": "Lora Base", "advanced": True},
|
||||
"lora_path": {"display_name": "Lora Path", "advanced": True},
|
||||
"max_tokens": {"display_name": "Max Tokens", "advanced": True},
|
||||
"metadata": {"display_name": "Metadata", "advanced": True},
|
||||
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
|
||||
"model_path": {
|
||||
"display_name": "Model Path",
|
||||
"field_type": "file",
|
||||
"file_types": [".bin"],
|
||||
"required": True,
|
||||
},
|
||||
"n_batch": {"display_name": "N Batch", "advanced": True},
|
||||
"n_ctx": {"display_name": "N Ctx", "advanced": True},
|
||||
"n_gpu_layers": {"display_name": "N GPU Layers", "advanced": True},
|
||||
"n_parts": {"display_name": "N Parts", "advanced": True},
|
||||
"n_threads": {"display_name": "N Threads", "advanced": True},
|
||||
"repeat_penalty": {"display_name": "Repeat Penalty", "advanced": True},
|
||||
"rope_freq_base": {"display_name": "Rope Freq Base", "advanced": True},
|
||||
"rope_freq_scale": {"display_name": "Rope Freq Scale", "advanced": True},
|
||||
"seed": {"display_name": "Seed", "advanced": True},
|
||||
"stop": {"display_name": "Stop", "advanced": True},
|
||||
"streaming": {"display_name": "Streaming", "advanced": True},
|
||||
"suffix": {"display_name": "Suffix", "advanced": True},
|
||||
"tags": {"display_name": "Tags", "advanced": True},
|
||||
"temperature": {"display_name": "Temperature"},
|
||||
"top_k": {"display_name": "Top K", "advanced": True},
|
||||
"top_p": {"display_name": "Top P", "advanced": True},
|
||||
"use_mlock": {"display_name": "Use Mlock", "advanced": True},
|
||||
"use_mmap": {"display_name": "Use Mmap", "advanced": True},
|
||||
"verbose": {"display_name": "Verbose", "advanced": True},
|
||||
"vocab_only": {"display_name": "Vocab Only", "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
model_path: str,
|
||||
grammar: Optional[str] = None,
|
||||
cache: Optional[bool] = None,
|
||||
client: Optional[Any] = None,
|
||||
echo: Optional[bool] = False,
|
||||
f16_kv: bool = True,
|
||||
grammar_path: Optional[str] = None,
|
||||
last_n_tokens_size: Optional[int] = 64,
|
||||
logits_all: bool = False,
|
||||
logprobs: Optional[int] = None,
|
||||
lora_base: Optional[str] = None,
|
||||
lora_path: Optional[str] = None,
|
||||
max_tokens: Optional[int] = 256,
|
||||
metadata: Optional[Dict] = None,
|
||||
model_kwargs: Dict = {},
|
||||
n_batch: Optional[int] = 8,
|
||||
n_ctx: int = 512,
|
||||
n_gpu_layers: Optional[int] = 1,
|
||||
n_parts: int = -1,
|
||||
n_threads: Optional[int] = 1,
|
||||
repeat_penalty: Optional[float] = 1.1,
|
||||
rope_freq_base: float = 10000.0,
|
||||
rope_freq_scale: float = 1.0,
|
||||
seed: int = -1,
|
||||
stop: Optional[List[str]] = [],
|
||||
streaming: bool = True,
|
||||
suffix: Optional[str] = "",
|
||||
tags: Optional[List[str]] = [],
|
||||
temperature: Optional[float] = 0.8,
|
||||
top_k: Optional[int] = 40,
|
||||
top_p: Optional[float] = 0.95,
|
||||
use_mlock: bool = False,
|
||||
use_mmap: Optional[bool] = True,
|
||||
verbose: bool = True,
|
||||
vocab_only: bool = False,
|
||||
) -> LlamaCpp:
|
||||
return LlamaCpp(
|
||||
model_path=model_path,
|
||||
grammar=grammar,
|
||||
cache=cache,
|
||||
client=client,
|
||||
echo=echo,
|
||||
f16_kv=f16_kv,
|
||||
grammar_path=grammar_path,
|
||||
last_n_tokens_size=last_n_tokens_size,
|
||||
logits_all=logits_all,
|
||||
logprobs=logprobs,
|
||||
lora_base=lora_base,
|
||||
lora_path=lora_path,
|
||||
max_tokens=max_tokens,
|
||||
metadata=metadata,
|
||||
model_kwargs=model_kwargs,
|
||||
n_batch=n_batch,
|
||||
n_ctx=n_ctx,
|
||||
n_gpu_layers=n_gpu_layers,
|
||||
n_parts=n_parts,
|
||||
n_threads=n_threads,
|
||||
repeat_penalty=repeat_penalty,
|
||||
rope_freq_base=rope_freq_base,
|
||||
rope_freq_scale=rope_freq_scale,
|
||||
seed=seed,
|
||||
stop=stop,
|
||||
streaming=streaming,
|
||||
suffix=suffix,
|
||||
tags=tags,
|
||||
temperature=temperature,
|
||||
top_k=top_k,
|
||||
top_p=top_p,
|
||||
use_mlock=use_mlock,
|
||||
use_mmap=use_mmap,
|
||||
verbose=verbose,
|
||||
vocab_only=vocab_only,
|
||||
)
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Optional, List
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.llms import Ollama
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain_community.llms.ollama import Ollama
|
||||
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
|
@ -133,29 +133,25 @@ class OllamaLLM(CustomComponent):
|
|||
mirostat_eta = None
|
||||
mirostat_tau = None
|
||||
|
||||
llm_params = {
|
||||
"base_url": base_url,
|
||||
"model": model,
|
||||
"mirostat": mirostat_value,
|
||||
"mirostat_eta": mirostat_eta,
|
||||
"mirostat_tau": mirostat_tau,
|
||||
"num_ctx": num_ctx,
|
||||
"num_gpu": num_gpu,
|
||||
"num_thread": num_thread,
|
||||
"repeat_last_n": repeat_last_n,
|
||||
"repeat_penalty": repeat_penalty,
|
||||
"temperature": temperature,
|
||||
"stop": stop,
|
||||
"tfs_z": tfs_z,
|
||||
"top_k": top_k,
|
||||
"top_p": top_p,
|
||||
}
|
||||
|
||||
# None Value remove
|
||||
llm_params = {k: v for k, v in llm_params.items() if v is not None}
|
||||
|
||||
try:
|
||||
llm = Ollama(**llm_params)
|
||||
llm = Ollama(
|
||||
base_url=base_url,
|
||||
model=model,
|
||||
mirostat=mirostat_value,
|
||||
mirostat_eta=mirostat_eta,
|
||||
mirostat_tau=mirostat_tau,
|
||||
num_ctx=num_ctx,
|
||||
num_gpu=num_gpu,
|
||||
num_thread=num_thread,
|
||||
repeat_last_n=repeat_last_n,
|
||||
repeat_penalty=repeat_penalty,
|
||||
temperature=temperature,
|
||||
stop=stop,
|
||||
tfs_z=tfs_z,
|
||||
top_k=top_k,
|
||||
top_p=top_p,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise ValueError("Could not connect to Ollama.") from e
|
||||
|
||||
|
|
|
|||
147
src/backend/langflow/components/llms/VertexAI.py
Normal file
147
src/backend/langflow/components/llms/VertexAI.py
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.llms import BaseLLM
|
||||
from typing import Optional, Union, Callable, Dict
|
||||
from langchain_community.llms.vertexai import VertexAI
|
||||
|
||||
|
||||
class VertexAIComponent(CustomComponent):
|
||||
display_name = "VertexAI"
|
||||
description = "Google Vertex AI large language models"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"credentials": {
|
||||
"display_name": "Credentials",
|
||||
"field_type": "file",
|
||||
"file_types": [".json"],
|
||||
"required": False,
|
||||
"value": None,
|
||||
},
|
||||
"location": {
|
||||
"display_name": "Location",
|
||||
"type": "str",
|
||||
"advanced": True,
|
||||
"value": "us-central1",
|
||||
"required": False,
|
||||
},
|
||||
"max_output_tokens": {
|
||||
"display_name": "Max Output Tokens",
|
||||
"field_type": "int",
|
||||
"value": 128,
|
||||
"required": False,
|
||||
"advanced": True,
|
||||
},
|
||||
"max_retries": {
|
||||
"display_name": "Max Retries",
|
||||
"type": "int",
|
||||
"value": 6,
|
||||
"required": False,
|
||||
"advanced": True,
|
||||
},
|
||||
"metadata": {
|
||||
"display_name": "Metadata",
|
||||
"field_type": "dict",
|
||||
"required": False,
|
||||
"default": {},
|
||||
},
|
||||
"model_name": {
|
||||
"display_name": "Model Name",
|
||||
"type": "str",
|
||||
"value": "text-bison",
|
||||
"required": False,
|
||||
},
|
||||
"n": {
|
||||
"advanced": True,
|
||||
"display_name": "N",
|
||||
"field_type": "int",
|
||||
"value": 1,
|
||||
"required": False,
|
||||
},
|
||||
"project": {
|
||||
"display_name": "Project",
|
||||
"type": "str",
|
||||
"required": False,
|
||||
"default": None,
|
||||
},
|
||||
"request_parallelism": {
|
||||
"display_name": "Request Parallelism",
|
||||
"field_type": "int",
|
||||
"value": 5,
|
||||
"required": False,
|
||||
"advanced": True,
|
||||
},
|
||||
"streaming": {
|
||||
"display_name": "Streaming",
|
||||
"field_type": "bool",
|
||||
"value": False,
|
||||
"required": False,
|
||||
"advanced": True,
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"field_type": "float",
|
||||
"value": 0.0,
|
||||
"required": False,
|
||||
"advanced": True,
|
||||
},
|
||||
"top_k": {"display_name": "Top K", "type": "int", "default": 40, "required": False, "advanced": True},
|
||||
"top_p": {
|
||||
"display_name": "Top P",
|
||||
"field_type": "float",
|
||||
"value": 0.95,
|
||||
"required": False,
|
||||
"advanced": True,
|
||||
},
|
||||
"tuned_model_name": {
|
||||
"display_name": "Tuned Model Name",
|
||||
"type": "str",
|
||||
"required": False,
|
||||
"value": None,
|
||||
"advanced": True,
|
||||
},
|
||||
"verbose": {
|
||||
"display_name": "Verbose",
|
||||
"field_type": "bool",
|
||||
"value": False,
|
||||
"required": False,
|
||||
},
|
||||
"name": {"display_name": "Name", "field_type": "str"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
credentials: Optional[str] = None,
|
||||
location: str = "us-central1",
|
||||
max_output_tokens: int = 128,
|
||||
max_retries: int = 6,
|
||||
metadata: Dict = {},
|
||||
model_name: str = "text-bison",
|
||||
n: int = 1,
|
||||
name: Optional[str] = None,
|
||||
project: Optional[str] = None,
|
||||
request_parallelism: int = 5,
|
||||
streaming: bool = False,
|
||||
temperature: float = 0.0,
|
||||
top_k: int = 40,
|
||||
top_p: float = 0.95,
|
||||
tuned_model_name: Optional[str] = None,
|
||||
verbose: bool = False,
|
||||
) -> Union[BaseLLM, Callable]:
|
||||
return VertexAI(
|
||||
credentials=credentials,
|
||||
location=location,
|
||||
max_output_tokens=max_output_tokens,
|
||||
max_retries=max_retries,
|
||||
metadata=metadata,
|
||||
model_name=model_name,
|
||||
n=n,
|
||||
name=name,
|
||||
project=project,
|
||||
request_parallelism=request_parallelism,
|
||||
streaming=streaming,
|
||||
temperature=temperature,
|
||||
top_k=top_k,
|
||||
top_p=top_p,
|
||||
tuned_model_name=tuned_model_name,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
from typing import Callable, Optional, Union
|
||||
|
||||
from langchain.retrievers import MultiQueryRetriever
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import BaseLLM, BaseRetriever, PromptTemplate
|
||||
|
||||
|
||||
class MultiQueryRetrieverComponent(CustomComponent):
|
||||
display_name = "MultiQueryRetriever"
|
||||
description = "Initialize from llm using default template."
|
||||
documentation = "https://python.langchain.com/docs/modules/data_connection/retrievers/how_to/MultiQueryRetriever"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM"},
|
||||
"prompt": {
|
||||
"display_name": "Prompt",
|
||||
"default": {
|
||||
"input_variables": ["question"],
|
||||
"input_types": {},
|
||||
"output_parser": None,
|
||||
"partial_variables": {},
|
||||
"template": "You are an AI language model assistant. Your task is \n"
|
||||
"to generate 3 different versions of the given user \n"
|
||||
"question to retrieve relevant documents from a vector database. \n"
|
||||
"By generating multiple perspectives on the user question, \n"
|
||||
"your goal is to help the user overcome some of the limitations \n"
|
||||
"of distance-based similarity search. Provide these alternative \n"
|
||||
"questions separated by newlines. Original question: {question}",
|
||||
"template_format": "f-string",
|
||||
"validate_template": False,
|
||||
"_type": "prompt",
|
||||
},
|
||||
},
|
||||
"retriever": {"display_name": "Retriever"},
|
||||
"parser_key": {"display_name": "Parser Key", "default": "lines"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLLM,
|
||||
retriever: BaseRetriever,
|
||||
prompt: Optional[PromptTemplate] = None,
|
||||
parser_key: str = "lines",
|
||||
) -> Union[Callable, MultiQueryRetriever]:
|
||||
if not prompt:
|
||||
return MultiQueryRetriever.from_llm(llm=llm, retriever=retriever, parser_key=parser_key)
|
||||
else:
|
||||
return MultiQueryRetriever.from_llm(llm=llm, retriever=retriever, prompt=prompt, parser_key=parser_key)
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.text_splitter import CharacterTextSplitter
|
||||
from langchain_core.documents.base import Document
|
||||
from typing import List
|
||||
|
||||
|
||||
class CharacterTextSplitterComponent(CustomComponent):
|
||||
display_name = "CharacterTextSplitter"
|
||||
description = "Splitting text that looks at characters."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"documents": {"display_name": "Documents"},
|
||||
"chunk_overlap": {"display_name": "Chunk Overlap", "default": 200},
|
||||
"chunk_size": {"display_name": "Chunk Size", "default": 1000},
|
||||
"separator": {"display_name": "Separator", "default": "\n"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
documents: List[Document],
|
||||
chunk_overlap: int = 200,
|
||||
chunk_size: int = 1000,
|
||||
separator: str = "\n",
|
||||
) -> List[Document]:
|
||||
return CharacterTextSplitter(
|
||||
chunk_overlap=chunk_overlap,
|
||||
chunk_size=chunk_size,
|
||||
separator=separator,
|
||||
).split_documents(documents)
|
||||
|
|
@ -1,7 +1,9 @@
|
|||
from typing import Optional
|
||||
from langflow import CustomComponent
|
||||
from langchain.text_splitter import Language
|
||||
|
||||
from langchain.schema import Document
|
||||
from langchain.text_splitter import Language
|
||||
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class LanguageRecursiveTextSplitterComponent(CustomComponent):
|
||||
|
|
@ -48,7 +50,7 @@ class LanguageRecursiveTextSplitterComponent(CustomComponent):
|
|||
documents: list[Document],
|
||||
chunk_size: Optional[int] = 1000,
|
||||
chunk_overlap: Optional[int] = 200,
|
||||
separator_type: Optional[str] = "Python",
|
||||
separator_type: str = "Python",
|
||||
) -> list[Document]:
|
||||
"""
|
||||
Split text into chunks of a specified length.
|
||||
|
|
|
|||
16
src/backend/langflow/components/toolkits/JsonToolkit.py
Normal file
16
src/backend/langflow/components/toolkits/JsonToolkit.py
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain_community.tools.json.tool import JsonSpec
|
||||
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
|
||||
|
||||
|
||||
class JsonToolkitComponent(CustomComponent):
|
||||
display_name = "JsonToolkit"
|
||||
description = "Toolkit for interacting with a JSON spec."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"spec": {"display_name": "Spec", "type": JsonSpec},
|
||||
}
|
||||
|
||||
def build(self, spec: JsonSpec) -> JsonToolkit:
|
||||
return JsonToolkit(spec=spec)
|
||||
23
src/backend/langflow/components/toolkits/OpenAPIToolkit.py
Normal file
23
src/backend/langflow/components/toolkits/OpenAPIToolkit.py
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
from langflow import CustomComponent
|
||||
from langflow.field_typing import AgentExecutor
|
||||
from typing import Callable
|
||||
from langchain_community.utilities.requests import TextRequestsWrapper
|
||||
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
|
||||
|
||||
|
||||
class OpenAPIToolkitComponent(CustomComponent):
|
||||
display_name = "OpenAPIToolkit"
|
||||
description = "Toolkit for interacting with an OpenAPI API."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"json_agent": {"display_name": "JSON Agent"},
|
||||
"requests_wrapper": {"display_name": "Text Requests Wrapper"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
json_agent: AgentExecutor,
|
||||
requests_wrapper: TextRequestsWrapper,
|
||||
) -> Callable:
|
||||
return OpenAPIToolkit(json_agent=json_agent, requests_wrapper=requests_wrapper)
|
||||
26
src/backend/langflow/components/toolkits/VectorStoreInfo.py
Normal file
26
src/backend/langflow/components/toolkits/VectorStoreInfo.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
from typing import Callable, Union
|
||||
|
||||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class VectorStoreInfoComponent(CustomComponent):
|
||||
display_name = "VectorStoreInfo"
|
||||
description = "Information about a VectorStore"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"vectorstore": {"display_name": "VectorStore"},
|
||||
"description": {"display_name": "Description", "multiline": True},
|
||||
"name": {"display_name": "Name"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
vectorstore: VectorStore,
|
||||
description: str,
|
||||
name: str,
|
||||
) -> Union[VectorStoreInfo, Callable]:
|
||||
return VectorStoreInfo(vectorstore=vectorstore, description=description, name=name)
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
from langflow import CustomComponent
|
||||
from typing import List, Union
|
||||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
|
||||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
|
||||
from langflow.field_typing import BaseLanguageModel, Tool
|
||||
|
||||
|
||||
class VectorStoreRouterToolkitComponent(CustomComponent):
|
||||
display_name = "VectorStoreRouterToolkit"
|
||||
description = "Toolkit for routing between Vector Stores."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"vectorstores": {"display_name": "Vector Stores"},
|
||||
"llm": {"display_name": "LLM"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self, vectorstores: List[VectorStoreInfo], llm: BaseLanguageModel
|
||||
) -> Union[Tool, VectorStoreRouterToolkit]:
|
||||
print("vectorstores", vectorstores)
|
||||
print("llm", llm)
|
||||
return VectorStoreRouterToolkit(vectorstores=vectorstores, llm=llm)
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreToolkit
|
||||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
|
||||
from langflow.field_typing import (
|
||||
BaseLanguageModel,
|
||||
)
|
||||
from langflow.field_typing import (
|
||||
Tool,
|
||||
)
|
||||
from typing import Union
|
||||
|
||||
|
||||
class VectorStoreToolkitComponent(CustomComponent):
|
||||
display_name = "VectorStoreToolkit"
|
||||
description = "Toolkit for interacting with a Vector Store."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"vectorstore_info": {"display_name": "Vector Store Info"},
|
||||
"llm": {"display_name": "LLM"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
vectorstore_info: VectorStoreInfo,
|
||||
llm: BaseLanguageModel,
|
||||
) -> Union[Tool, VectorStoreToolkit]:
|
||||
return VectorStoreToolkit(vectorstore_info=vectorstore_info, llm=llm)
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
from langflow import CustomComponent
|
||||
|
||||
# Assuming `BingSearchAPIWrapper` is a class that exists in the context
|
||||
# and has the appropriate methods and attributes.
|
||||
# We need to make sure this class is importable from the context where this code will be running.
|
||||
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
|
||||
|
||||
|
||||
class BingSearchAPIWrapperComponent(CustomComponent):
|
||||
display_name = "BingSearchAPIWrapper"
|
||||
description = "Wrapper for Bing Search API."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"bing_search_url": {"display_name": "Bing Search URL"},
|
||||
"bing_subscription_key": {
|
||||
"display_name": "Bing Subscription Key",
|
||||
"password": True,
|
||||
},
|
||||
"k": {"display_name": "Number of results", "advanced": True},
|
||||
# 'k' is not included as it is not shown (show=False)
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
bing_search_url: str,
|
||||
bing_subscription_key: str,
|
||||
k: int = 10,
|
||||
) -> BingSearchAPIWrapper:
|
||||
# 'k' has a default value and is not shown (show=False), so it is hardcoded here
|
||||
return BingSearchAPIWrapper(bing_search_url=bing_search_url, bing_subscription_key=bing_subscription_key, k=k)
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
from langflow import CustomComponent
|
||||
from typing import Union, Callable
|
||||
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
|
||||
|
||||
|
||||
class GoogleSearchAPIWrapperComponent(CustomComponent):
|
||||
display_name = "GoogleSearchAPIWrapper"
|
||||
description = "Wrapper for Google Search API."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"google_api_key": {"display_name": "Google API Key", "password": True},
|
||||
"google_cse_id": {"display_name": "Google CSE ID", "password": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
google_api_key: str,
|
||||
google_cse_id: str,
|
||||
) -> Union[GoogleSearchAPIWrapper, Callable]:
|
||||
return GoogleSearchAPIWrapper(google_api_key=google_api_key, google_cse_id=google_cse_id)
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
from langflow import CustomComponent
|
||||
from typing import Dict, Optional
|
||||
|
||||
# Assuming the existence of GoogleSerperAPIWrapper class in the serper module
|
||||
# If this class does not exist, you would need to create it or import the appropriate class from another module
|
||||
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
|
||||
|
||||
|
||||
class GoogleSerperAPIWrapperComponent(CustomComponent):
|
||||
display_name = "GoogleSerperAPIWrapper"
|
||||
description = "Wrapper around the Serper.dev Google Search API."
|
||||
|
||||
def build_config(self) -> Dict[str, Dict]:
|
||||
return {
|
||||
"result_key_for_type": {
|
||||
"display_name": "Result Key for Type",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "result_key_for_type",
|
||||
"advanced": False,
|
||||
"dynamic": False,
|
||||
"info": "",
|
||||
"field_type": "dict",
|
||||
"list": False,
|
||||
"value": {"news": "news", "places": "places", "images": "images", "search": "organic"},
|
||||
},
|
||||
"serper_api_key": {
|
||||
"display_name": "Serper API Key",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": True,
|
||||
"name": "serper_api_key",
|
||||
"advanced": False,
|
||||
"dynamic": False,
|
||||
"info": "",
|
||||
"type": "str",
|
||||
"list": False,
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
serper_api_key: str,
|
||||
result_key_for_type: Optional[Dict[str, str]] = None,
|
||||
) -> GoogleSerperAPIWrapper:
|
||||
return GoogleSerperAPIWrapper(result_key_for_type=result_key_for_type, serper_api_key=serper_api_key)
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
from langflow import CustomComponent
|
||||
from typing import Optional, Dict
|
||||
from langchain_community.utilities.searx_search import SearxSearchWrapper
|
||||
|
||||
|
||||
class SearxSearchWrapperComponent(CustomComponent):
|
||||
display_name = "SearxSearchWrapper"
|
||||
description = "Wrapper for Searx API."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"headers": {
|
||||
"field_type": "dict",
|
||||
"display_name": "Headers",
|
||||
"multiline": True,
|
||||
"value": '{"Authorization": "Bearer <token>"}',
|
||||
},
|
||||
"k": {"display_name": "k", "advanced": True, "field_type": "int", "value": 10},
|
||||
"searx_host": {
|
||||
"display_name": "Searx Host",
|
||||
"field_type": "str",
|
||||
"value": "https://searx.example.com",
|
||||
"advanced": True,
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
k: int = 10,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
searx_host: str = "https://searx.example.com",
|
||||
) -> SearxSearchWrapper:
|
||||
return SearxSearchWrapper(headers=headers, k=k, searx_host=searx_host)
|
||||
31
src/backend/langflow/components/utilities/SerpAPIWrapper.py
Normal file
31
src/backend/langflow/components/utilities/SerpAPIWrapper.py
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
from typing import Callable, Union
|
||||
|
||||
from langchain_community.utilities.serpapi import SerpAPIWrapper
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class SerpAPIWrapperComponent(CustomComponent):
|
||||
display_name = "SerpAPIWrapper"
|
||||
description = "Wrapper around SerpAPI"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"serpapi_api_key": {"display_name": "SerpAPI API Key", "type": "str", "password": True},
|
||||
"params": {
|
||||
"display_name": "Parameters",
|
||||
"type": "dict",
|
||||
"advanced": True,
|
||||
"multiline": True,
|
||||
"value": '{"engine": "google","google_domain": "google.com","gl": "us","hl": "en"}',
|
||||
},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
serpapi_api_key: str,
|
||||
params: dict,
|
||||
) -> Union[SerpAPIWrapper, Callable]: # Removed quotes around SerpAPIWrapper
|
||||
return SerpAPIWrapper( # type: ignore
|
||||
serpapi_api_key=serpapi_api_key,
|
||||
params=params,
|
||||
)
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
from typing import Callable, Union
|
||||
|
||||
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
|
||||
from langflow import CustomComponent
|
||||
|
||||
# Assuming WikipediaAPIWrapper is a class that needs to be imported.
|
||||
# The import statement is not included as it is not provided in the JSON
|
||||
# and the actual implementation details are unknown.
|
||||
|
||||
|
||||
class WikipediaAPIWrapperComponent(CustomComponent):
|
||||
display_name = "WikipediaAPIWrapper"
|
||||
description = "Wrapper around WikipediaAPI."
|
||||
|
||||
def build_config(self):
|
||||
return {}
|
||||
|
||||
def build(
|
||||
self,
|
||||
top_k_results: int = 3,
|
||||
lang: str = "en",
|
||||
load_all_available_meta: bool = False,
|
||||
doc_content_chars_max: int = 4000,
|
||||
) -> Union[WikipediaAPIWrapper, Callable]:
|
||||
return WikipediaAPIWrapper( # type: ignore
|
||||
top_k_results=top_k_results,
|
||||
lang=lang,
|
||||
load_all_available_meta=load_all_available_meta,
|
||||
doc_content_chars_max=doc_content_chars_max,
|
||||
)
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
from typing import Callable, Union
|
||||
|
||||
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
|
||||
from langflow import CustomComponent
|
||||
|
||||
# Since all the fields in the JSON have show=False, we will only create a basic component
|
||||
# without any configurable fields.
|
||||
|
||||
|
||||
class WolframAlphaAPIWrapperComponent(CustomComponent):
|
||||
display_name = "WolframAlphaAPIWrapper"
|
||||
description = "Wrapper for Wolfram Alpha."
|
||||
|
||||
def build_config(self):
|
||||
return {"appid": {"display_name": "App ID", "type": "str", "password": True}}
|
||||
|
||||
def build(self, appid: str) -> Union[Callable, WolframAlphaAPIWrapper]:
|
||||
return WolframAlphaAPIWrapper(wolfram_alpha_appid=appid) # type: ignore
|
||||
|
|
@ -3,9 +3,8 @@ from typing import List, Optional, Union
|
|||
import chromadb # type: ignore
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.schema import BaseRetriever, Document
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
from langchain_community.vectorstores.chroma import Chroma
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
|
|
|
|||
26
src/backend/langflow/components/vectorstores/FAISS.py
Normal file
26
src/backend/langflow/components/vectorstores/FAISS.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
from typing import List, Union
|
||||
|
||||
from langchain.schema import BaseRetriever
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
from langchain_community.vectorstores.faiss import FAISS
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Document, Embeddings
|
||||
|
||||
|
||||
class FAISSComponent(CustomComponent):
|
||||
display_name = "FAISS"
|
||||
description = "Construct FAISS wrapper from raw documents."
|
||||
documentation = "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"documents": {"display_name": "Documents"},
|
||||
"embedding": {"display_name": "Embedding"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
embedding: Embeddings,
|
||||
documents: List[Document],
|
||||
) -> Union[VectorStore, FAISS, BaseRetriever]:
|
||||
return FAISS.from_documents(documents=documents, embedding=embedding)
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
from typing import List, Optional
|
||||
|
||||
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import (
|
||||
Document,
|
||||
Embeddings,
|
||||
NestedDict,
|
||||
)
|
||||
|
||||
|
||||
class MongoDBAtlasComponent(CustomComponent):
|
||||
display_name = "MongoDB Atlas"
|
||||
description = "Construct a `MongoDB Atlas Vector Search` vector store from raw documents."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"documents": {"display_name": "Documents"},
|
||||
"embedding": {"display_name": "Embedding"},
|
||||
"collection_name": {"display_name": "Collection Name"},
|
||||
"db_name": {"display_name": "Database Name"},
|
||||
"index_name": {"display_name": "Index Name"},
|
||||
"mongodb_atlas_cluster_uri": {"display_name": "MongoDB Atlas Cluster URI"},
|
||||
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
documents: List[Document],
|
||||
embedding: Embeddings,
|
||||
collection_name: str = "",
|
||||
db_name: str = "",
|
||||
index_name: str = "",
|
||||
mongodb_atlas_cluster_uri: str = "",
|
||||
search_kwargs: Optional[NestedDict] = None,
|
||||
) -> MongoDBAtlasVectorSearch:
|
||||
search_kwargs = search_kwargs or {}
|
||||
return MongoDBAtlasVectorSearch(
|
||||
documents=documents,
|
||||
embedding=embedding,
|
||||
collection_name=collection_name,
|
||||
db_name=db_name,
|
||||
index_name=index_name,
|
||||
mongodb_atlas_cluster_uri=mongodb_atlas_cluster_uri,
|
||||
search_kwargs=search_kwargs,
|
||||
)
|
||||
62
src/backend/langflow/components/vectorstores/Pinecone.py
Normal file
62
src/backend/langflow/components/vectorstores/Pinecone.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
import os
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import pinecone # type: ignore
|
||||
from langchain.schema import BaseRetriever
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
from langchain_community.vectorstores.pinecone import Pinecone
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Document, Embeddings
|
||||
|
||||
|
||||
class PineconeComponent(CustomComponent):
|
||||
display_name = "Pinecone"
|
||||
description = "Construct Pinecone wrapper from raw documents."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"documents": {"display_name": "Documents"},
|
||||
"embedding": {"display_name": "Embedding"},
|
||||
"index_name": {"display_name": "Index Name"},
|
||||
"namespace": {"display_name": "Namespace"},
|
||||
"pinecone_api_key": {"display_name": "Pinecone API Key", "default": "", "password": True, "required": True},
|
||||
"pinecone_env": {"display_name": "Pinecone Environment", "default": "", "required": True},
|
||||
"search_kwargs": {"display_name": "Search Kwargs", "default": "{}"},
|
||||
"pool_threads": {"display_name": "Pool Threads", "default": 1, "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
embedding: Embeddings,
|
||||
pinecone_env: str,
|
||||
documents: List[Document],
|
||||
index_name: Optional[str] = None,
|
||||
pinecone_api_key: Optional[str] = None,
|
||||
text_key: Optional[str] = "text",
|
||||
namespace: Optional[str] = "default",
|
||||
pool_threads: Optional[int] = None,
|
||||
) -> Union[VectorStore, Pinecone, BaseRetriever]:
|
||||
if pinecone_api_key is None or pinecone_env is None:
|
||||
raise ValueError("Pinecone API Key and Environment are required.")
|
||||
if os.getenv("PINECONE_API_KEY") is None and pinecone_api_key is None:
|
||||
raise ValueError("Pinecone API Key is required.")
|
||||
|
||||
pinecone.init(api_key=pinecone_api_key, environment=pinecone_env) # type: ignore
|
||||
if documents:
|
||||
return Pinecone.from_documents(
|
||||
documents=documents,
|
||||
embedding=embedding,
|
||||
index_name=index_name,
|
||||
pool_threads=pool_threads,
|
||||
namespace=namespace,
|
||||
text_key=text_key,
|
||||
)
|
||||
|
||||
return Pinecone.from_existing_index(
|
||||
index_name=index_name,
|
||||
embedding=embedding,
|
||||
text_key=text_key,
|
||||
namespace=namespace,
|
||||
pool_threads=pool_threads,
|
||||
)
|
||||
76
src/backend/langflow/components/vectorstores/Qdrant.py
Normal file
76
src/backend/langflow/components/vectorstores/Qdrant.py
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
from typing import List, Optional, Union
|
||||
|
||||
from langchain.schema import BaseRetriever
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
from langchain_community.vectorstores.qdrant import Qdrant
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Document, Embeddings, NestedDict
|
||||
|
||||
|
||||
class QdrantComponent(CustomComponent):
|
||||
display_name = "Qdrant"
|
||||
description = "Construct Qdrant wrapper from a list of texts."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"documents": {"display_name": "Documents"},
|
||||
"embedding": {"display_name": "Embedding"},
|
||||
"api_key": {"display_name": "API Key", "password": True},
|
||||
"collection_name": {"display_name": "Collection Name"},
|
||||
"content_payload_key": {"display_name": "Content Payload Key", "advanced": True},
|
||||
"distance_func": {"display_name": "Distance Function", "advanced": True},
|
||||
"grpc_port": {"display_name": "gRPC Port", "advanced": True},
|
||||
"host": {"display_name": "Host", "advanced": True},
|
||||
"https": {"display_name": "HTTPS", "advanced": True},
|
||||
"location": {"display_name": "Location", "advanced": True},
|
||||
"metadata_payload_key": {"display_name": "Metadata Payload Key", "advanced": True},
|
||||
"path": {"display_name": "Path", "advanced": True},
|
||||
"port": {"display_name": "Port", "advanced": True},
|
||||
"prefer_grpc": {"display_name": "Prefer gRPC", "advanced": True},
|
||||
"prefix": {"display_name": "Prefix", "advanced": True},
|
||||
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
|
||||
"timeout": {"display_name": "Timeout", "advanced": True},
|
||||
"url": {"display_name": "URL", "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
embedding: Embeddings,
|
||||
documents: List[Document],
|
||||
api_key: Optional[str] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
content_payload_key: str = "page_content",
|
||||
distance_func: str = "Cosine",
|
||||
grpc_port: Optional[int] = 6334,
|
||||
host: Optional[str] = None,
|
||||
https: bool = False,
|
||||
location: str = ":memory:",
|
||||
metadata_payload_key: str = "metadata",
|
||||
path: Optional[str] = None,
|
||||
port: Optional[int] = 6333,
|
||||
prefer_grpc: bool = False,
|
||||
prefix: Optional[str] = None,
|
||||
search_kwargs: Optional[NestedDict] = None,
|
||||
timeout: Optional[float] = None,
|
||||
url: Optional[str] = None,
|
||||
) -> Union[VectorStore, Qdrant, BaseRetriever]:
|
||||
return Qdrant.from_documents(
|
||||
documents=documents,
|
||||
embedding=embedding,
|
||||
api_key=api_key,
|
||||
collection_name=collection_name,
|
||||
content_payload_key=content_payload_key,
|
||||
distance_func=distance_func,
|
||||
grpc_port=grpc_port,
|
||||
host=host,
|
||||
https=https,
|
||||
location=location,
|
||||
metadata_payload_key=metadata_payload_key,
|
||||
path=path,
|
||||
port=port,
|
||||
prefer_grpc=prefer_grpc,
|
||||
prefix=prefix,
|
||||
search_kwargs=search_kwargs,
|
||||
timeout=timeout,
|
||||
url=url,
|
||||
)
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
from typing import Optional
|
||||
from langflow import CustomComponent
|
||||
|
||||
from langchain.vectorstores.redis import Redis
|
||||
from langchain.schema import Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.schema import Document
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
from langchain_community.vectorstores.redis import Redis
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class RedisComponent(CustomComponent):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
from typing import List, Union
|
||||
|
||||
from langchain.schema import BaseRetriever
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
from langchain_community.vectorstores.supabase import SupabaseVectorStore
|
||||
from langflow import CustomComponent
|
||||
from langflow.field_typing import Document, Embeddings, NestedDict
|
||||
from supabase.client import Client, create_client
|
||||
|
||||
|
||||
class SupabaseComponent(CustomComponent):
|
||||
display_name = "Supabase"
|
||||
description = "Return VectorStore initialized from texts and embeddings."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"documents": {"display_name": "Documents"},
|
||||
"embedding": {"display_name": "Embedding"},
|
||||
"query_name": {"display_name": "Query Name"},
|
||||
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
|
||||
"supabase_service_key": {"display_name": "Supabase Service Key"},
|
||||
"supabase_url": {"display_name": "Supabase URL"},
|
||||
"table_name": {"display_name": "Table Name", "advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
embedding: Embeddings,
|
||||
documents: List[Document],
|
||||
query_name: str = "",
|
||||
search_kwargs: NestedDict = {},
|
||||
supabase_service_key: str = "",
|
||||
supabase_url: str = "",
|
||||
table_name: str = "",
|
||||
) -> Union[VectorStore, SupabaseVectorStore, BaseRetriever]:
|
||||
supabase: Client = create_client(supabase_url, supabase_key=supabase_service_key)
|
||||
return SupabaseVectorStore.from_documents(
|
||||
documents=documents,
|
||||
embedding=embedding,
|
||||
query_name=query_name,
|
||||
search_kwargs=search_kwargs,
|
||||
client=supabase,
|
||||
table_name=table_name,
|
||||
)
|
||||
|
|
@ -1,14 +1,13 @@
|
|||
from typing import Optional, Union, List
|
||||
from langflow import CustomComponent
|
||||
import tempfile
|
||||
import urllib.request
|
||||
import urllib
|
||||
import urllib.request
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from langchain.vectorstores import Vectara
|
||||
from langchain.schema import Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain.schema import BaseRetriever
|
||||
from langchain.embeddings import FakeEmbeddings
|
||||
from langchain.schema import BaseRetriever, Document
|
||||
from langchain_community.vectorstores import Vectara, VectorStore
|
||||
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class VectaraComponent(CustomComponent):
|
||||
|
|
|
|||
|
|
@ -1,12 +1,11 @@
|
|||
import weaviate # type: ignore
|
||||
from typing import Optional, Union
|
||||
from langflow import CustomComponent
|
||||
|
||||
from langchain.vectorstores import Weaviate
|
||||
from langchain.schema import Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain.schema import BaseRetriever
|
||||
import weaviate # type: ignore
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.schema import BaseRetriever, Document
|
||||
from langchain_community.vectorstores import VectorStore, Weaviate
|
||||
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class WeaviateVectorStore(CustomComponent):
|
||||
|
|
@ -45,7 +44,7 @@ class WeaviateVectorStore(CustomComponent):
|
|||
search_by_text: bool = False,
|
||||
api_key: Optional[str] = None,
|
||||
index_name: Optional[str] = None,
|
||||
text_key: Optional[str] = "text",
|
||||
text_key: str = "text",
|
||||
embedding: Optional[Embeddings] = None,
|
||||
documents: Optional[Document] = None,
|
||||
attributes: Optional[list] = None,
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
from typing import Optional, List
|
||||
from langflow import CustomComponent
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.vectorstores.pgvector import PGVector
|
||||
from langchain.schema import Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.schema import Document
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
from langchain_community.vectorstores.pgvector import PGVector
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class PostgresqlVectorComponent(CustomComponent):
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from langchain.schema.language_model import BaseLanguageModel
|
|||
from langchain.schema.memory import BaseMemory
|
||||
from langchain.text_splitter import TextSplitter
|
||||
from langchain.tools import Tool
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
|
||||
# Type alias for more complex dicts
|
||||
NestedDict = Dict[str, Union[str, Dict]]
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ from langflow.interface.retrievers.base import retriever_creator
|
|||
from langflow.interface.text_splitters.base import textsplitter_creator
|
||||
from langflow.interface.toolkits.base import toolkits_creator
|
||||
from langflow.interface.tools.base import tool_creator
|
||||
from langflow.interface.vector_store.base import vectorstore_creator
|
||||
from langflow.interface.wrappers.base import wrapper_creator
|
||||
from langflow.utils.lazy_load import LazyLoadDictBase
|
||||
|
||||
|
|
@ -46,7 +45,7 @@ class VertexTypesDict(LazyLoadDictBase):
|
|||
**{t: types.LLMVertex for t in llm_creator.to_list()},
|
||||
**{t: types.MemoryVertex for t in memory_creator.to_list()},
|
||||
**{t: types.EmbeddingVertex for t in embedding_creator.to_list()},
|
||||
**{t: types.VectorStoreVertex for t in vectorstore_creator.to_list()},
|
||||
# **{t: types.VectorStoreVertex for t in vectorstore_creator.to_list()},
|
||||
**{t: types.DocumentLoaderVertex for t in documentloader_creator.to_list()},
|
||||
**{t: types.TextSplitterVertex for t in textsplitter_creator.to_list()},
|
||||
**{t: types.OutputParserVertex for t in output_parser_creator.to_list()},
|
||||
|
|
|
|||
|
|
@ -2,14 +2,10 @@ from typing import Any, Optional
|
|||
|
||||
from langchain.agents import AgentExecutor, ZeroShotAgent
|
||||
from langchain.agents.agent_toolkits import (
|
||||
SQLDatabaseToolkit,
|
||||
VectorStoreInfo,
|
||||
VectorStoreRouterToolkit,
|
||||
VectorStoreToolkit,
|
||||
)
|
||||
from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
|
||||
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
|
||||
from langchain.agents.agent_toolkits.sql.prompt import SQL_PREFIX, SQL_SUFFIX
|
||||
from langchain.agents.agent_toolkits.vectorstore.prompt import PREFIX as VECTORSTORE_PREFIX
|
||||
from langchain.agents.agent_toolkits.vectorstore.prompt import ROUTER_PREFIX as VECTORSTORE_ROUTER_PREFIX
|
||||
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
|
||||
|
|
@ -17,9 +13,14 @@ from langchain.base_language import BaseLanguageModel
|
|||
from langchain.chains.llm import LLMChain
|
||||
from langchain.sql_database import SQLDatabase
|
||||
from langchain.tools.sql_database.prompt import QUERY_CHECKER
|
||||
from langchain_community.agent_toolkits import SQLDatabaseToolkit
|
||||
from langchain_community.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
|
||||
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
|
||||
from langchain_community.agent_toolkits.sql.prompt import SQL_PREFIX, SQL_SUFFIX
|
||||
from langchain_experimental.agents.agent_toolkits.pandas.prompt import PREFIX as PANDAS_PREFIX
|
||||
from langchain_experimental.agents.agent_toolkits.pandas.prompt import SUFFIX_WITH_DF as PANDAS_SUFFIX
|
||||
from langchain_experimental.tools.python.tool import PythonAstREPLTool
|
||||
|
||||
from langflow.interface.base import CustomAgentExecutor
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -66,18 +66,18 @@ class DirectoryReader:
|
|||
def filter_loaded_components(self, data: dict, with_errors: bool) -> dict:
|
||||
from langflow.interface.custom.utils import build_component
|
||||
|
||||
items = [
|
||||
{
|
||||
"name": menu["name"],
|
||||
"path": menu["path"],
|
||||
"components": [
|
||||
(*build_component(component), component)
|
||||
for component in menu["components"]
|
||||
if (component["error"] if with_errors else not component["error"])
|
||||
],
|
||||
}
|
||||
for menu in data["menu"]
|
||||
]
|
||||
items = []
|
||||
for menu in data["menu"]:
|
||||
components = []
|
||||
for component in menu["components"]:
|
||||
try:
|
||||
if component["error"] if with_errors else not component["error"]:
|
||||
component_tuple = (*build_component(component), component)
|
||||
components.append(component_tuple)
|
||||
except Exception as e:
|
||||
logger.error(f"Error while loading component: {e}")
|
||||
continue
|
||||
items.append({"name": menu["name"], "path": menu["path"], "components": components})
|
||||
filtered = [menu for menu in items if menu["components"]]
|
||||
logger.debug(f'Filtered components {"with errors" if with_errors else ""}: {len(filtered)}')
|
||||
return {"menu": filtered}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
import inspect
|
||||
from typing import Any
|
||||
|
||||
from langchain import document_loaders, embeddings, llms, memory, requests, text_splitter
|
||||
from langchain.agents import agent_toolkits
|
||||
from langchain.chat_models import AzureChatOpenAI, ChatAnthropic, ChatOpenAI, ChatVertexAI
|
||||
from langchain import llms, memory, requests, text_splitter
|
||||
from langchain_community.chat_models import AzureChatOpenAI, ChatAnthropic, ChatOpenAI, ChatVertexAI
|
||||
from langchain_community import agent_toolkits, document_loaders, embeddings
|
||||
|
||||
from langflow.interface.agents.custom import CUSTOM_AGENTS
|
||||
from langflow.interface.chains.custom import CUSTOM_CHAINS
|
||||
from langflow.interface.importing.utils import import_class
|
||||
|
|
@ -24,14 +25,14 @@ llm_type_to_cls_dict["vertexai-chat"] = ChatVertexAI # type: ignore
|
|||
|
||||
# Toolkits
|
||||
toolkit_type_to_loader_dict: dict[str, Any] = {
|
||||
toolkit_name: import_class(f"langchain.agents.agent_toolkits.{toolkit_name}")
|
||||
toolkit_name: import_class(f"langchain_community.agent_toolkits.{toolkit_name}")
|
||||
# if toolkit_name is lower case it is a loader
|
||||
for toolkit_name in agent_toolkits.__all__
|
||||
if toolkit_name.islower()
|
||||
}
|
||||
|
||||
toolkit_type_to_cls_dict: dict[str, Any] = {
|
||||
toolkit_name: import_class(f"langchain.agents.agent_toolkits.{toolkit_name}")
|
||||
toolkit_name: import_class(f"langchain_community.agent_toolkits.{toolkit_name}")
|
||||
# if toolkit_name is not lower case it is a class
|
||||
for toolkit_name in agent_toolkits.__all__
|
||||
if not toolkit_name.islower()
|
||||
|
|
@ -47,13 +48,14 @@ wrapper_type_to_cls_dict: dict[str, Any] = {wrapper.__name__: wrapper for wrappe
|
|||
|
||||
# Embeddings
|
||||
embedding_type_to_cls_dict: dict[str, Any] = {
|
||||
embedding_name: import_class(f"langchain.embeddings.{embedding_name}") for embedding_name in embeddings.__all__
|
||||
embedding_name: import_class(f"langchain_community.embeddings.{embedding_name}")
|
||||
for embedding_name in embeddings.__all__
|
||||
}
|
||||
|
||||
|
||||
# Document Loaders
|
||||
documentloaders_type_to_cls_dict: dict[str, Any] = {
|
||||
documentloader_name: import_class(f"langchain.document_loaders.{documentloader_name}")
|
||||
documentloader_name: import_class(f"langchain_community.document_loaders.{documentloader_name}")
|
||||
for documentloader_name in document_loaders.__all__
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ from typing import Any, Type
|
|||
from langchain.agents import Agent
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.tools import BaseTool
|
||||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
from langflow.interface.custom.custom_component import CustomComponent
|
||||
from langflow.interface.wrappers.base import wrapper_creator
|
||||
from langflow.utils import validate
|
||||
|
|
@ -71,7 +71,7 @@ def import_output_parser(output_parser: str) -> Any:
|
|||
|
||||
def import_chat_llm(llm: str) -> BaseChatModel:
|
||||
"""Import chat llm from llm name"""
|
||||
return import_class(f"langchain.chat_models.{llm}")
|
||||
return import_class(f"langchain_community.chat_models.{llm}")
|
||||
|
||||
|
||||
def import_retriever(retriever: str) -> Any:
|
||||
|
|
@ -148,17 +148,17 @@ def import_chain(chain: str) -> Type[Chain]:
|
|||
|
||||
def import_embedding(embedding: str) -> Any:
|
||||
"""Import embedding from embedding name"""
|
||||
return import_class(f"langchain.embeddings.{embedding}")
|
||||
return import_class(f"langchain_community.embeddings.{embedding}")
|
||||
|
||||
|
||||
def import_vectorstore(vectorstore: str) -> Any:
|
||||
"""Import vectorstore from vectorstore name"""
|
||||
return import_class(f"langchain.vectorstores.{vectorstore}")
|
||||
return import_class(f"langchain_community.vectorstores.{vectorstore}")
|
||||
|
||||
|
||||
def import_documentloader(documentloader: str) -> Any:
|
||||
"""Import documentloader from documentloader name"""
|
||||
return import_class(f"langchain.document_loaders.{documentloader}")
|
||||
return import_class(f"langchain_community.document_loaders.{documentloader}")
|
||||
|
||||
|
||||
def import_textsplitter(textsplitter: str) -> Any:
|
||||
|
|
@ -169,8 +169,8 @@ def import_textsplitter(textsplitter: str) -> Any:
|
|||
def import_utility(utility: str) -> Any:
|
||||
"""Import utility from utility name"""
|
||||
if utility == "SQLDatabase":
|
||||
return import_class(f"langchain.sql_database.{utility}")
|
||||
return import_class(f"langchain.utilities.{utility}")
|
||||
return import_class(f"langchain_community.sql_database.{utility}")
|
||||
return import_class(f"langchain_community.utilities.{utility}")
|
||||
|
||||
|
||||
def get_function(code):
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ from langchain.agents.tools import BaseTool
|
|||
from langchain.chains.base import Chain
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
from langchain.schema import Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
from loguru import logger
|
||||
from pydantic import ValidationError
|
||||
|
||||
|
|
|
|||
|
|
@ -1,18 +1,18 @@
|
|||
from typing import Any, Callable, Dict, Type
|
||||
from langchain.vectorstores import (
|
||||
Pinecone,
|
||||
ElasticsearchStore,
|
||||
Qdrant,
|
||||
Chroma,
|
||||
FAISS,
|
||||
Weaviate,
|
||||
SupabaseVectorStore,
|
||||
MongoDBAtlasVectorSearch,
|
||||
)
|
||||
from langchain.schema import Document
|
||||
import os
|
||||
from typing import Any, Callable, Dict, Type
|
||||
|
||||
import orjson
|
||||
from langchain.schema import Document
|
||||
from langchain_community.vectorstores import (
|
||||
FAISS,
|
||||
Chroma,
|
||||
ElasticsearchStore,
|
||||
MongoDBAtlasVectorSearch,
|
||||
Pinecone,
|
||||
Qdrant,
|
||||
SupabaseVectorStore,
|
||||
Weaviate,
|
||||
)
|
||||
|
||||
|
||||
def docs_in_params(params: dict) -> bool:
|
||||
|
|
@ -27,8 +27,8 @@ def initialize_mongodb(class_object: Type[MongoDBAtlasVectorSearch], params: dic
|
|||
MONGODB_ATLAS_CLUSTER_URI = params.pop("mongodb_atlas_cluster_uri")
|
||||
if not MONGODB_ATLAS_CLUSTER_URI:
|
||||
raise ValueError("Mongodb atlas cluster uri must be provided in the params")
|
||||
from pymongo import MongoClient
|
||||
import certifi
|
||||
from pymongo import MongoClient
|
||||
|
||||
client: MongoClient = MongoClient(MONGODB_ATLAS_CLUSTER_URI, tlsCAFile=certifi.where())
|
||||
db_name = params.pop("db_name", None)
|
||||
|
|
|
|||
|
|
@ -1,14 +1,12 @@
|
|||
from typing import Any, ClassVar, Dict, List, Optional, Type
|
||||
|
||||
from langchain import retrievers
|
||||
|
||||
from langchain_community import retrievers
|
||||
from langflow.interface.base import LangChainTypeCreator
|
||||
from langflow.interface.importing.utils import import_class
|
||||
from langflow.services.deps import get_settings_service
|
||||
|
||||
from langflow.template.frontend_node.retrievers import RetrieverFrontendNode
|
||||
from langflow.utils.util import build_template_from_class, build_template_from_method
|
||||
from loguru import logger
|
||||
from langflow.utils.util import build_template_from_method, build_template_from_class
|
||||
|
||||
|
||||
class RetrieverCreator(LangChainTypeCreator):
|
||||
|
|
@ -27,7 +25,7 @@ class RetrieverCreator(LangChainTypeCreator):
|
|||
def type_to_loader_dict(self) -> Dict:
|
||||
if self.type_dict is None:
|
||||
self.type_dict: dict[str, Any] = {
|
||||
retriever_name: import_class(f"langchain.retrievers.{retriever_name}")
|
||||
retriever_name: import_class(f"langchain_community.retrievers.{retriever_name}")
|
||||
for retriever_name in retrievers.__all__
|
||||
}
|
||||
return self.type_dict
|
||||
|
|
|
|||
|
|
@ -1,18 +1,9 @@
|
|||
from langchain import tools
|
||||
from langchain.agents import Tool
|
||||
from langchain.agents.load_tools import (
|
||||
_BASE_TOOLS,
|
||||
_EXTRA_LLM_TOOLS,
|
||||
_EXTRA_OPTIONAL_TOOLS,
|
||||
_LLM_TOOLS,
|
||||
)
|
||||
from langchain.agents.load_tools import _BASE_TOOLS, _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS
|
||||
from langchain.tools.json.tool import JsonSpec
|
||||
|
||||
from langflow.interface.importing.utils import import_class
|
||||
from langflow.interface.tools.custom import (
|
||||
PythonFunctionTool,
|
||||
PythonFunction,
|
||||
)
|
||||
from langflow.interface.tools.custom import PythonFunction, PythonFunctionTool
|
||||
|
||||
FILE_TOOLS = {"JsonSpec": JsonSpec}
|
||||
CUSTOM_TOOLS = {
|
||||
|
|
@ -21,7 +12,7 @@ CUSTOM_TOOLS = {
|
|||
"PythonFunction": PythonFunction,
|
||||
}
|
||||
|
||||
OTHER_TOOLS = {tool: import_class(f"langchain.tools.{tool}") for tool in tools.__all__}
|
||||
OTHER_TOOLS = {tool: import_class(f"langchain_community.tools.{tool}") for tool in tools.__all__}
|
||||
|
||||
ALL_TOOLS_NAMES = {
|
||||
**_BASE_TOOLS,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
from cachetools import LRUCache, cached
|
||||
|
||||
from langflow.interface.agents.base import agent_creator
|
||||
from langflow.interface.chains.base import chain_creator
|
||||
from langflow.interface.custom.directory_reader.utils import merge_nested_dicts_with_renaming
|
||||
|
|
@ -14,7 +15,6 @@ from langflow.interface.text_splitters.base import textsplitter_creator
|
|||
from langflow.interface.toolkits.base import toolkits_creator
|
||||
from langflow.interface.tools.base import tool_creator
|
||||
from langflow.interface.utilities.base import utility_creator
|
||||
from langflow.interface.vector_store.base import vectorstore_creator
|
||||
from langflow.interface.wrappers.base import wrapper_creator
|
||||
|
||||
|
||||
|
|
@ -46,7 +46,7 @@ def build_langchain_types_dict(): # sourcery skip: dict-assign-update-to-union
|
|||
toolkits_creator,
|
||||
wrapper_creator,
|
||||
embedding_creator,
|
||||
vectorstore_creator,
|
||||
# vectorstore_creator,
|
||||
documentloader_creator,
|
||||
textsplitter_creator,
|
||||
utility_creator,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from typing import Dict, List, Optional, Type
|
||||
|
||||
from langchain import utilities
|
||||
from langchain_community import utilities
|
||||
from loguru import logger
|
||||
|
||||
from langflow.custom.customs import get_custom_nodes
|
||||
|
|
@ -30,7 +30,7 @@ class UtilityCreator(LangChainTypeCreator):
|
|||
self.type_dict = {}
|
||||
for utility_name in utilities.__all__:
|
||||
try:
|
||||
imported = import_class(f"langchain.utilities.{utility_name}")
|
||||
imported = import_class(f"langchain_community.utilities.{utility_name}")
|
||||
self.type_dict[utility_name] = imported
|
||||
except Exception:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -1,13 +1,12 @@
|
|||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from langchain import vectorstores
|
||||
from loguru import logger
|
||||
|
||||
from langflow.interface.base import LangChainTypeCreator
|
||||
from langflow.interface.importing.utils import import_class
|
||||
from langflow.services.deps import get_settings_service
|
||||
|
||||
from langflow.template.frontend_node.vectorstores import VectorStoreFrontendNode
|
||||
from loguru import logger
|
||||
from langflow.utils.util import build_template_from_method
|
||||
|
||||
|
||||
|
|
@ -22,7 +21,7 @@ class VectorstoreCreator(LangChainTypeCreator):
|
|||
def type_to_loader_dict(self) -> Dict:
|
||||
if self.type_dict is None:
|
||||
self.type_dict: dict[str, Any] = {
|
||||
vectorstore_name: import_class(f"langchain.vectorstores.{vectorstore_name}")
|
||||
vectorstore_name: import_class(f"langchain_community.vectorstores.{vectorstore_name}")
|
||||
for vectorstore_name in vectorstores.__all__
|
||||
}
|
||||
return self.type_dict
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from typing import ClassVar, Dict, List, Optional
|
||||
|
||||
from langchain.utilities import requests, sql_database
|
||||
from langchain_community.utilities import requests, sql_database
|
||||
from loguru import logger
|
||||
|
||||
from langflow.interface.base import LangChainTypeCreator
|
||||
from loguru import logger
|
||||
from langflow.utils.util import build_template_from_class, build_template_from_method
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from typing import Any, Coroutine, Dict, List, Optional, Tuple, Union
|
|||
from langchain.agents import AgentExecutor
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.schema import AgentAction, Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain_community.vectorstores import VectorStore
|
||||
from langchain_core.messages import AIMessage
|
||||
from langchain_core.runnables.base import Runnable
|
||||
from langflow.graph.graph.base import Graph
|
||||
|
|
|
|||
|
|
@ -68,110 +68,110 @@ LOADERS_INFO: List[Dict[str, Any]] = [
|
|||
{
|
||||
"loader": "AirbyteJSONLoader",
|
||||
"name": "Airbyte JSON (.jsonl)",
|
||||
"import": "langchain.document_loaders.AirbyteJSONLoader",
|
||||
"import": "langchain_community.document_loaders.AirbyteJSONLoader",
|
||||
"defaultFor": ["jsonl"],
|
||||
"allowdTypes": ["jsonl"],
|
||||
},
|
||||
{
|
||||
"loader": "JSONLoader",
|
||||
"name": "JSON (.json)",
|
||||
"import": "langchain.document_loaders.JSONLoader",
|
||||
"import": "langchain_community.document_loaders.JSONLoader",
|
||||
"defaultFor": ["json"],
|
||||
"allowdTypes": ["json"],
|
||||
},
|
||||
{
|
||||
"loader": "BSHTMLLoader",
|
||||
"name": "BeautifulSoup4 HTML (.html, .htm)",
|
||||
"import": "langchain.document_loaders.BSHTMLLoader",
|
||||
"import": "langchain_community.document_loaders.BSHTMLLoader",
|
||||
"allowdTypes": ["html", "htm"],
|
||||
},
|
||||
{
|
||||
"loader": "CSVLoader",
|
||||
"name": "CSV (.csv)",
|
||||
"import": "langchain.document_loaders.CSVLoader",
|
||||
"import": "langchain_community.document_loaders.CSVLoader",
|
||||
"defaultFor": ["csv"],
|
||||
"allowdTypes": ["csv"],
|
||||
},
|
||||
{
|
||||
"loader": "CoNLLULoader",
|
||||
"name": "CoNLL-U (.conllu)",
|
||||
"import": "langchain.document_loaders.CoNLLULoader",
|
||||
"import": "langchain_community.document_loaders.CoNLLULoader",
|
||||
"defaultFor": ["conllu"],
|
||||
"allowdTypes": ["conllu"],
|
||||
},
|
||||
{
|
||||
"loader": "EverNoteLoader",
|
||||
"name": "EverNote (.enex)",
|
||||
"import": "langchain.document_loaders.EverNoteLoader",
|
||||
"import": "langchain_community.document_loaders.EverNoteLoader",
|
||||
"defaultFor": ["enex"],
|
||||
"allowdTypes": ["enex"],
|
||||
},
|
||||
{
|
||||
"loader": "FacebookChatLoader",
|
||||
"name": "Facebook Chat (.json)",
|
||||
"import": "langchain.document_loaders.FacebookChatLoader",
|
||||
"import": "langchain_community.document_loaders.FacebookChatLoader",
|
||||
"allowdTypes": ["json"],
|
||||
},
|
||||
{
|
||||
"loader": "OutlookMessageLoader",
|
||||
"name": "Outlook Message (.msg)",
|
||||
"import": "langchain.document_loaders.OutlookMessageLoader",
|
||||
"import": "langchain_community.document_loaders.OutlookMessageLoader",
|
||||
"defaultFor": ["msg"],
|
||||
"allowdTypes": ["msg"],
|
||||
},
|
||||
{
|
||||
"loader": "PyPDFLoader",
|
||||
"name": "PyPDF (.pdf)",
|
||||
"import": "langchain.document_loaders.PyPDFLoader",
|
||||
"import": "langchain_community.document_loaders.PyPDFLoader",
|
||||
"defaultFor": ["pdf"],
|
||||
"allowdTypes": ["pdf"],
|
||||
},
|
||||
{
|
||||
"loader": "STRLoader",
|
||||
"name": "Subtitle (.str)",
|
||||
"import": "langchain.document_loaders.STRLoader",
|
||||
"import": "langchain_community.document_loaders.STRLoader",
|
||||
"defaultFor": ["str"],
|
||||
"allowdTypes": ["str"],
|
||||
},
|
||||
{
|
||||
"loader": "TextLoader",
|
||||
"name": "Text (.txt)",
|
||||
"import": "langchain.document_loaders.TextLoader",
|
||||
"import": "langchain_community.document_loaders.TextLoader",
|
||||
"defaultFor": ["txt"],
|
||||
"allowdTypes": ["txt"],
|
||||
},
|
||||
{
|
||||
"loader": "UnstructuredEmailLoader",
|
||||
"name": "Unstructured Email (.eml)",
|
||||
"import": "langchain.document_loaders.UnstructuredEmailLoader",
|
||||
"import": "langchain_community.document_loaders.UnstructuredEmailLoader",
|
||||
"defaultFor": ["eml"],
|
||||
"allowdTypes": ["eml"],
|
||||
},
|
||||
{
|
||||
"loader": "UnstructuredHTMLLoader",
|
||||
"name": "Unstructured HTML (.html, .htm)",
|
||||
"import": "langchain.document_loaders.UnstructuredHTMLLoader",
|
||||
"import": "langchain_community.document_loaders.UnstructuredHTMLLoader",
|
||||
"defaultFor": ["html", "htm"],
|
||||
"allowdTypes": ["html", "htm"],
|
||||
},
|
||||
{
|
||||
"loader": "UnstructuredMarkdownLoader",
|
||||
"name": "Unstructured Markdown (.md)",
|
||||
"import": "langchain.document_loaders.UnstructuredMarkdownLoader",
|
||||
"import": "langchain_community.document_loaders.UnstructuredMarkdownLoader",
|
||||
"defaultFor": ["md"],
|
||||
"allowdTypes": ["md"],
|
||||
},
|
||||
{
|
||||
"loader": "UnstructuredPowerPointLoader",
|
||||
"name": "Unstructured PowerPoint (.pptx)",
|
||||
"import": "langchain.document_loaders.UnstructuredPowerPointLoader",
|
||||
"import": "langchain_community.document_loaders.UnstructuredPowerPointLoader",
|
||||
"defaultFor": ["pptx"],
|
||||
"allowdTypes": ["pptx"],
|
||||
},
|
||||
{
|
||||
"loader": "UnstructuredWordLoader",
|
||||
"name": "Unstructured Word (.docx)",
|
||||
"import": "langchain.document_loaders.UnstructuredWordLoader",
|
||||
"import": "langchain_community.document_loaders.UnstructuredWordLoader",
|
||||
"defaultFor": ["docx"],
|
||||
"allowdTypes": ["docx"],
|
||||
},
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain.llms.base import BaseLLM
|
|||
from langchain.chains import LLMChain
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema import Document
|
||||
from langchain.field_typing import NestedDict
|
||||
from langflow.field_typing import NestedDict
|
||||
|
||||
import requests
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
from locust import FastHttpUser, task, between
|
||||
import random
|
||||
import time
|
||||
import orjson
|
||||
from rich import print
|
||||
import httpx
|
||||
from pathlib import Path
|
||||
|
||||
import httpx
|
||||
import orjson
|
||||
from locust import FastHttpUser, between, task
|
||||
from rich import print
|
||||
|
||||
|
||||
class NameTest(FastHttpUser):
|
||||
wait_time = between(1, 5)
|
||||
|
|
@ -13,7 +14,7 @@ class NameTest(FastHttpUser):
|
|||
with open("names.txt", "r") as file:
|
||||
names = [line.strip() for line in file.readlines()]
|
||||
|
||||
headers = {}
|
||||
headers: dict = {}
|
||||
|
||||
def poll_task(self, task_id, sleep_time=1):
|
||||
while True:
|
||||
|
|
|
|||
|
|
@ -1,210 +0,0 @@
|
|||
from fastapi.testclient import TestClient
|
||||
|
||||
|
||||
def test_zero_shot_agent(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
agents = json_response["agents"]
|
||||
|
||||
zero_shot_agent = agents["ZeroShotAgent"]
|
||||
assert set(zero_shot_agent["base_classes"]) == {
|
||||
"ZeroShotAgent",
|
||||
"BaseSingleActionAgent",
|
||||
"Agent",
|
||||
"Callable",
|
||||
}
|
||||
template = zero_shot_agent["template"]
|
||||
|
||||
assert template["tools"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "tools",
|
||||
"type": "BaseTool",
|
||||
"list": True,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
|
||||
# Additional assertions for other template variables
|
||||
assert template["callback_manager"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "callback_manager",
|
||||
"type": "BaseCallbackManager",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["llm"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "llm",
|
||||
"type": "BaseLanguageModel",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["output_parser"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "output_parser",
|
||||
"type": "AgentOutputParser",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["input_variables"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "input_variables",
|
||||
"type": "str",
|
||||
"list": True,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["prefix"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": True,
|
||||
"value": "Answer the following questions as best you can. You have access to the following tools:",
|
||||
"password": False,
|
||||
"name": "prefix",
|
||||
"type": "str",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["suffix"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": True,
|
||||
"value": "Begin!\n\nQuestion: {input}\nThought:{agent_scratchpad}",
|
||||
"password": False,
|
||||
"name": "suffix",
|
||||
"type": "str",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
|
||||
|
||||
def test_json_agent(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
agents = json_response["agents"]
|
||||
|
||||
json_agent = agents["JsonAgent"]
|
||||
assert json_agent["base_classes"] == ["AgentExecutor"]
|
||||
template = json_agent["template"]
|
||||
|
||||
assert template["toolkit"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "toolkit",
|
||||
"type": "BaseToolkit",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"file_path": "",
|
||||
"fileTypes": [],
|
||||
"value": "",
|
||||
}
|
||||
assert template["llm"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "llm",
|
||||
"type": "BaseLanguageModel",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"display_name": "LLM",
|
||||
"info": "",
|
||||
"file_path": "",
|
||||
"fileTypes": [],
|
||||
"value": "",
|
||||
}
|
||||
|
||||
|
||||
def test_csv_agent(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
agents = json_response["agents"]
|
||||
|
||||
csv_agent = agents["CSVAgent"]
|
||||
assert csv_agent["base_classes"] == ["AgentExecutor"]
|
||||
template = csv_agent["template"]
|
||||
|
||||
assert template["path"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": "",
|
||||
"fileTypes": [".csv"],
|
||||
"password": False,
|
||||
"name": "path",
|
||||
"type": "file",
|
||||
"list": False,
|
||||
"file_path": "",
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
}
|
||||
assert template["llm"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "llm",
|
||||
"type": "BaseLanguageModel",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"display_name": "LLM",
|
||||
"info": "",
|
||||
"file_path": "",
|
||||
"fileTypes": [],
|
||||
"value": "",
|
||||
}
|
||||
|
|
@ -1,312 +0,0 @@
|
|||
from fastapi.testclient import TestClient
|
||||
|
||||
# def test_chains_settings(client: TestClient, logged_in_headers):
|
||||
# response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
# assert response.status_code == 200
|
||||
# json_response = response.json()
|
||||
# chains = json_response["chains"]
|
||||
# assert set(chains.keys()) == set(settings.chains)
|
||||
|
||||
|
||||
def test_llm_checker_chain(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
chain = chains["LLMCheckerChain"]
|
||||
|
||||
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
|
||||
assert set(chain["base_classes"]) == {
|
||||
"Callable",
|
||||
"LLMCheckerChain",
|
||||
"Chain",
|
||||
}
|
||||
|
||||
template = chain["template"]
|
||||
assert template["llm"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "llm",
|
||||
"type": "BaseLanguageModel",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["_type"] == "LLMCheckerChain"
|
||||
|
||||
# Test the description object
|
||||
assert chain["description"] == ""
|
||||
|
||||
|
||||
def test_llm_math_chain(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
||||
chain = chains["LLMMathChain"]
|
||||
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
|
||||
assert set(chain["base_classes"]) == {
|
||||
"Callable",
|
||||
"LLMMathChain",
|
||||
"Chain",
|
||||
}
|
||||
|
||||
template = chain["template"]
|
||||
assert template["memory"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "memory",
|
||||
"type": "BaseMemory",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["verbose"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": False,
|
||||
"password": False,
|
||||
"name": "verbose",
|
||||
"type": "bool",
|
||||
"list": False,
|
||||
"advanced": True,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["llm"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "llm",
|
||||
"type": "BaseLanguageModel",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["input_key"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": "question",
|
||||
"password": False,
|
||||
"name": "input_key",
|
||||
"type": "str",
|
||||
"list": False,
|
||||
"advanced": True,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["output_key"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": "answer",
|
||||
"password": False,
|
||||
"name": "output_key",
|
||||
"type": "str",
|
||||
"list": False,
|
||||
"advanced": True,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["_type"] == "LLMMathChain"
|
||||
|
||||
# Test the description object
|
||||
assert chain["description"] == "Chain that interprets a prompt and executes python code to do math."
|
||||
|
||||
|
||||
def test_series_character_chain(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
|
||||
chain = chains["SeriesCharacterChain"]
|
||||
|
||||
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
|
||||
assert set(chain["base_classes"]) == {
|
||||
"Callable",
|
||||
"LLMChain",
|
||||
"BaseCustomChain",
|
||||
"Chain",
|
||||
"ConversationChain",
|
||||
"SeriesCharacterChain",
|
||||
}
|
||||
template = chain["template"]
|
||||
|
||||
assert template["llm"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"display_name": "LLM",
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "llm",
|
||||
"type": "BaseLanguageModel",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
"file_path": "",
|
||||
"value": "",
|
||||
}
|
||||
assert template["character"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "character",
|
||||
"type": "str",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
"file_path": "",
|
||||
"value": "",
|
||||
}
|
||||
assert template["series"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "series",
|
||||
"type": "str",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
"file_path": "",
|
||||
"value": "",
|
||||
}
|
||||
assert template["_type"] == "SeriesCharacterChain"
|
||||
|
||||
# Test the description object
|
||||
assert (
|
||||
chain["description"]
|
||||
== "SeriesCharacterChain is a chain you can use to have a conversation with a character from a series."
|
||||
)
|
||||
|
||||
|
||||
def test_mid_journey_prompt_chain(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
chain = chains["MidJourneyPromptChain"]
|
||||
assert isinstance(chain, dict)
|
||||
|
||||
# Test the base_classes object
|
||||
assert set(chain["base_classes"]) == {
|
||||
"LLMChain",
|
||||
"BaseCustomChain",
|
||||
"Chain",
|
||||
"ConversationChain",
|
||||
"MidJourneyPromptChain",
|
||||
}
|
||||
|
||||
# Test the template object
|
||||
template = chain["template"]
|
||||
|
||||
assert template["llm"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"display_name": "LLM",
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "llm",
|
||||
"type": "BaseLanguageModel",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"file_path": "",
|
||||
"fileTypes": [],
|
||||
"value": "",
|
||||
}
|
||||
# Test the description object
|
||||
assert chain["description"] == "MidJourneyPromptChain is a chain you can use to generate new MidJourney prompts."
|
||||
|
||||
|
||||
def test_time_travel_guide_chain(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
chains = json_response["chains"]
|
||||
chain = chains["TimeTravelGuideChain"]
|
||||
assert isinstance(chain, dict)
|
||||
|
||||
# Test the base_classes object
|
||||
assert set(chain["base_classes"]) == {
|
||||
"LLMChain",
|
||||
"BaseCustomChain",
|
||||
"TimeTravelGuideChain",
|
||||
"Chain",
|
||||
"ConversationChain",
|
||||
}
|
||||
|
||||
# Test the template object
|
||||
template = chain["template"]
|
||||
|
||||
assert template["llm"] == {
|
||||
"required": True,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"display_name": "LLM",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "llm",
|
||||
"type": "BaseLanguageModel",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"file_path": "",
|
||||
"fileTypes": [],
|
||||
"value": "",
|
||||
}
|
||||
assert template["memory"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "memory",
|
||||
"type": "BaseChatMemory",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"file_path": "",
|
||||
"fileTypes": [],
|
||||
"value": "",
|
||||
}
|
||||
|
||||
assert chain["description"] == "Time travel guide chain."
|
||||
|
|
@ -1,559 +0,0 @@
|
|||
from fastapi.testclient import TestClient
|
||||
|
||||
|
||||
def test_openai(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
language_models = json_response["llms"]
|
||||
|
||||
model = language_models["OpenAI"]
|
||||
template = model["template"]
|
||||
|
||||
assert template["cache"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "cache",
|
||||
"type": "bool",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["verbose"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "verbose",
|
||||
"type": "bool",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["client"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "client",
|
||||
"type": "Any",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["model_name"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": "text-davinci-003",
|
||||
"password": False,
|
||||
"options": [
|
||||
"text-davinci-003",
|
||||
"text-davinci-002",
|
||||
"text-curie-001",
|
||||
"text-babbage-001",
|
||||
"text-ada-001",
|
||||
],
|
||||
"name": "model_name",
|
||||
"type": "str",
|
||||
"list": True,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
# Add more assertions for other properties here
|
||||
assert template["temperature"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": 0.7,
|
||||
"password": False,
|
||||
"name": "temperature",
|
||||
"type": "float",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"rangeSpec": {"max": 1.0, "min": -1.0, "step": 0.1},
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["max_tokens"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": 256,
|
||||
"password": True,
|
||||
"name": "max_tokens",
|
||||
"type": "int",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["top_p"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": 1,
|
||||
"password": False,
|
||||
"name": "top_p",
|
||||
"type": "float",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"rangeSpec": {"max": 1.0, "min": -1.0, "step": 0.1},
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["frequency_penalty"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": 0,
|
||||
"password": False,
|
||||
"name": "frequency_penalty",
|
||||
"type": "float",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"rangeSpec": {"max": 1.0, "min": -1.0, "step": 0.1},
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["presence_penalty"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": 0,
|
||||
"password": False,
|
||||
"name": "presence_penalty",
|
||||
"type": "float",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"rangeSpec": {"max": 1.0, "min": -1.0, "step": 0.1},
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["n"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": 1,
|
||||
"password": False,
|
||||
"name": "n",
|
||||
"type": "int",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["best_of"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": 1,
|
||||
"password": False,
|
||||
"name": "best_of",
|
||||
"type": "int",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["model_kwargs"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "model_kwargs",
|
||||
"type": "dict",
|
||||
"list": False,
|
||||
"advanced": True,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["openai_api_key"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": "",
|
||||
"password": True,
|
||||
"name": "openai_api_key",
|
||||
"display_name": "OpenAI API Key",
|
||||
"type": "str",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["batch_size"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": 20,
|
||||
"password": False,
|
||||
"name": "batch_size",
|
||||
"type": "int",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["request_timeout"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "request_timeout",
|
||||
"type": "float",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"rangeSpec": {"max": 1.0, "min": -1.0, "step": 0.1},
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["logit_bias"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "logit_bias",
|
||||
"type": "dict",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["max_retries"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": 2,
|
||||
"password": False,
|
||||
"name": "max_retries",
|
||||
"type": "int",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["streaming"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": False,
|
||||
"password": False,
|
||||
"name": "streaming",
|
||||
"type": "bool",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
|
||||
|
||||
def test_chat_open_ai(client: TestClient, logged_in_headers):
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
language_models = json_response["llms"]
|
||||
|
||||
model = language_models["ChatOpenAI"]
|
||||
template = model["template"]
|
||||
|
||||
assert template["verbose"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": False,
|
||||
"password": False,
|
||||
"name": "verbose",
|
||||
"type": "bool",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["client"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "client",
|
||||
"type": "Any",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["model_name"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": "gpt-4-1106-preview",
|
||||
"password": False,
|
||||
"options": [
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-32k",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
],
|
||||
"name": "model_name",
|
||||
"type": "str",
|
||||
"list": True,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["temperature"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": 0.7,
|
||||
"password": False,
|
||||
"name": "temperature",
|
||||
"type": "float",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"rangeSpec": {"max": 1.0, "min": -1.0, "step": 0.1},
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["model_kwargs"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "model_kwargs",
|
||||
"type": "dict",
|
||||
"list": False,
|
||||
"advanced": True,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["openai_api_key"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"value": "",
|
||||
"password": True,
|
||||
"name": "openai_api_key",
|
||||
"display_name": "OpenAI API Key",
|
||||
"type": "str",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["request_timeout"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"password": False,
|
||||
"name": "request_timeout",
|
||||
"type": "float",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"rangeSpec": {"max": 1.0, "min": -1.0, "step": 0.1},
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["max_retries"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": 2,
|
||||
"password": False,
|
||||
"name": "max_retries",
|
||||
"type": "int",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["streaming"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": False,
|
||||
"password": False,
|
||||
"name": "streaming",
|
||||
"type": "bool",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["n"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": False,
|
||||
"multiline": False,
|
||||
"value": 1,
|
||||
"password": False,
|
||||
"name": "n",
|
||||
"type": "int",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
|
||||
assert template["max_tokens"] == {
|
||||
"required": False,
|
||||
"dynamic": False,
|
||||
"placeholder": "",
|
||||
"show": True,
|
||||
"multiline": False,
|
||||
"password": True,
|
||||
"name": "max_tokens",
|
||||
"type": "int",
|
||||
"list": False,
|
||||
"advanced": False,
|
||||
"info": "",
|
||||
"fileTypes": [],
|
||||
}
|
||||
assert template["_type"] == "ChatOpenAI"
|
||||
assert (
|
||||
model["description"] == "`OpenAI` Chat large language models API." # noqa E501
|
||||
)
|
||||
assert set(model["base_classes"]) == {
|
||||
"BaseLLM",
|
||||
"BaseChatModel",
|
||||
"ChatOpenAI",
|
||||
"BaseLanguageModel",
|
||||
}
|
||||
|
||||
|
||||
# Commenting this out for now, as it requires to activate the nodes
|
||||
# def test_azure_open_ai(client: TestClient):
|
||||
# response = client.get("/all")
|
||||
# assert response.status_code == 200
|
||||
# json_response = response.json()
|
||||
# language_models = json_response["llms"]
|
||||
|
||||
# model = language_models["AzureOpenAI"]
|
||||
# template = model["template"]
|
||||
|
||||
# assert template["model_name"]["show"] is False
|
||||
# assert template["deployment_name"] == {
|
||||
# "required": False,
|
||||
# "placeholder": "",
|
||||
# "show": True,
|
||||
# "multiline": False,
|
||||
# "value": "",
|
||||
# "password": False,
|
||||
# "name": "deployment_name",
|
||||
# "advanced": False,
|
||||
# "type": "str",
|
||||
# "list": False,
|
||||
# }
|
||||
|
||||
|
||||
# def test_azure_chat_open_ai(client: TestClient):
|
||||
# response = client.get("/all")
|
||||
# assert response.status_code == 200
|
||||
# json_response = response.json()
|
||||
# language_models = json_response["llms"]
|
||||
|
||||
# model = language_models["AzureChatOpenAI"]
|
||||
# template = model["template"]
|
||||
|
||||
# assert template["model_name"]["show"] is False
|
||||
# assert template["deployment_name"] == {
|
||||
# "required": False,
|
||||
# "placeholder": "",
|
||||
# "show": True,
|
||||
# "multiline": False,
|
||||
# "value": "",
|
||||
# "password": False,
|
||||
# "name": "deployment_name",
|
||||
# "advanced": False,
|
||||
# "type": "str",
|
||||
# "list": False,
|
||||
# }
|
||||
# assert template["openai_api_type"] == {
|
||||
# "required": False,
|
||||
# "placeholder": "",
|
||||
# "show": False,
|
||||
# "multiline": False,
|
||||
# "value": "azure",
|
||||
# "password": False,
|
||||
# "name": "openai_api_type",
|
||||
# "display_name": "OpenAI API Type",
|
||||
# "advanced": False,
|
||||
# "type": "str",
|
||||
# "list": False,
|
||||
# }
|
||||
# assert template["openai_api_version"] == {
|
||||
# "required": False,
|
||||
# "placeholder": "",
|
||||
# "show": True,
|
||||
# "multiline": False,
|
||||
# "value": "2023-03-15-preview",
|
||||
# "password": False,
|
||||
# "name": "openai_api_version",
|
||||
# "display_name": "OpenAI API Version",
|
||||
# "advanced": False,
|
||||
# "type": "str",
|
||||
# "list": False,
|
||||
# }
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
from fastapi.testclient import TestClient
|
||||
from langflow.services.deps import get_settings_service
|
||||
|
||||
|
||||
# check that all agents are in settings.agents
|
||||
# are in json_response["agents"]
|
||||
def test_vectorstores_settings(client: TestClient, logged_in_headers):
|
||||
settings_service = get_settings_service()
|
||||
response = client.get("api/v1/all", headers=logged_in_headers)
|
||||
assert response.status_code == 200
|
||||
json_response = response.json()
|
||||
vectorstores = json_response["vectorstores"]
|
||||
settings_vecs = set(settings_service.settings.VECTORSTORES)
|
||||
assert all(vs in vectorstores for vs in settings_vecs)
|
||||
Loading…
Add table
Add a link
Reference in a new issue