Merge branch 'main' into dev

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-02-05 18:34:19 -03:00 committed by GitHub
commit 7b5225f857
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
239 changed files with 28526 additions and 15389 deletions

5
.gitignore vendored
View file

@ -17,9 +17,6 @@ qdrant_storage
.chroma
.ruff_cache
# PyCharm
.idea/
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
@ -259,4 +256,4 @@ langflow.db
/tmp/*
src/backend/langflow/frontend/
.docker
scratchpad*
scratchpad*

View file

@ -45,13 +45,6 @@ run_frontend:
@-kill -9 `lsof -t -i:3000`
cd src/frontend && npm start
tests_frontend:
ifeq ($(UI), true)
cd src/frontend && ./run-tests.sh --ui
else
cd src/frontend && ./run-tests.sh
endif
run_cli:
poetry run langflow run --path src/frontend/build

View file

@ -133,7 +133,7 @@ Follow our step-by-step guide to deploy Langflow on Google Cloud Platform (GCP)
Alternatively, click the **"Open in Cloud Shell"** button below to launch Google Cloud Shell, clone the Langflow repository, and start an **interactive tutorial** that will guide you through the process of setting up the necessary resources and deploying Langflow on your GCP project.
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
[![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial_spot.md)
## Deploy on Railway

View file

@ -1,13 +1,11 @@
import Admonition from "@theme/Admonition";
import Admonition from '@theme/Admonition';
# Embeddings
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may
contain some rough edges. Share your feedback or report issues to help us
improve! 🛠️📝
</p>
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
</Admonition>
Embeddings are vector representations of text that capture the semantic meaning of the text. They are created using text embedding models and allow us to think about the text in a vector space, enabling us to perform tasks like semantic search, where we look for pieces of text that are most similar in the vector space.
@ -112,12 +110,4 @@ Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP).
- **top_k:** How the model selects tokens for output, the next token is selected from defaults to `40`.
- **top_p:** Tokens are selected from most probable to least until the sum of their defaults to `0.95`.
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output defaults to `False`.
### OllamaEmbeddings
Used to load [Ollamas](https://ollama.ai/) embedding models. Wrapper around LangChain's [Ollama API](https://python.langchain.com/docs/integrations/text_embedding/ollama).
- **model** The name of the Ollama model to use defaults to `llama2`.
- **base_url** The base URL for the Ollama API defaults to `http://localhost:11434`.
- **temperature** Tunes the degree of randomness in text generations. Should be a non-negative value defaults to `0`.
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output defaults to `False`.

View file

@ -21,7 +21,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`ConversationBufferMemory`](https://python.langchain.com/docs/modules/memory/types/buffer)
- [`ConversationBufferMemory`](https://python.langchain.com/docs/modules/memory/how_to/buffer)
- [`ConversationChain`](https://python.langchain.com/docs/modules/chains/)
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)

20117
docs/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
{
"name": "langflow-docs",
"name": "docusaurus",
"version": "0.0.0",
"private": true,
"scripts": {
@ -36,8 +36,8 @@
"path-browserify": "^1.0.1",
"postcss": "^8.4.31",
"prism-react-renderer": "^1.3.5",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react": "^17.0.2",
"react-dom": "^17.0.2",
"react-images": "^0.6.7",
"react-medium-image-zoom": "^5.1.6",
"react-player": "^2.12.0",

3312
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -25,6 +25,8 @@ documentation = "https://docs.langflow.org"
langflow = "langflow.__main__:main"
[tool.poetry.dependencies]
python = ">=3.9,<3.11"
fastapi = "^0.108.0"
uvicorn = "^0.25.0"
@ -33,8 +35,8 @@ google-search-results = "^2.4.1"
google-api-python-client = "^2.79.0"
typer = "^0.9.0"
gunicorn = "^21.2.0"
langchain = "~0.1.0"
openai = "^1.10.0"
langchain = "~0.0.345"
openai = "^1.6.1"
pandas = "2.0.3"
chromadb = "^0.4.0"
huggingface-hub = { version = "^0.19.0", extras = ["inference"] }
@ -53,7 +55,7 @@ tiktoken = "~0.5.0"
wikipedia = "^1.4.0"
qdrant-client = "^1.7.0"
websockets = "^10.3"
weaviate-client = "*"
weaviate-client = "^3.26.0"
jina = "*"
sentence-transformers = { version = "^2.2.2", optional = true }
ctransformers = { version = "^0.2.10", optional = true }
@ -61,7 +63,7 @@ cohere = "^4.39.0"
python-multipart = "^0.0.6"
sqlmodel = "^0.0.14"
faiss-cpu = "^1.7.4"
anthropic = "^0.13.0"
anthropic = "^0.8.0"
orjson = "3.9.3"
multiprocess = "^0.70.14"
cachetools = "^5.3.1"
@ -96,22 +98,19 @@ markupsafe = "^2.1.3"
extract-msg = "^0.45.0"
# jq is not available for windows
jq = { version = "^1.6.0", markers = "sys_platform != 'win32'" }
boto3 = "^1.34.0"
boto3 = "^1.28.63"
numexpr = "^2.8.6"
qianfan = "0.2.0"
pgvector = "^0.2.3"
pyautogen = "^0.2.0"
langchain-google-genai = "^0.0.2"
elasticsearch = "^8.11.1"
pytube = "^15.0.0"
llama-index = "^0.9.24"
langchain-openai = "^0.0.2"
[tool.poetry.group.dev.dependencies]
pytest-asyncio = "^0.23.1"
types-redis = "^4.6.0.5"
ipykernel = "^6.27.0"
mypy = "^1.8.0"
mypy = "^1.7.1"
ruff = "^0.1.5"
httpx = "*"
pytest = "^7.4.2"
@ -153,8 +152,7 @@ exclude = ["src/backend/langflow/alembic/*"]
line-length = 120
[tool.mypy]
plugins = ["pydantic.mypy"]
follow_imports = "silent"
plugins = "pydantic.mypy"
[build-system]
requires = ["poetry-core"]

View file

@ -1,12 +1,15 @@
import platform
import socket
import sys
import time
import webbrowser
from pathlib import Path
from typing import Optional
import httpx
import typer
from dotenv import load_dotenv
from multiprocess import cpu_count # type: ignore
from multiprocess import Process, cpu_count # type: ignore
from rich import box
from rich import print as rprint
from rich.console import Console
@ -209,12 +212,23 @@ def run(
run_on_windows(host, port, log_level, options, app)
else:
# Run using gunicorn on Linux
run_on_mac_or_linux(host, port, log_level, options, app)
run_on_mac_or_linux(host, port, log_level, options, app, open_browser)
def run_on_mac_or_linux(host, port, log_level, options, app):
def run_on_mac_or_linux(host, port, log_level, options, app, open_browser=True):
webapp_process = Process(target=run_langflow, args=(host, port, log_level, options, app))
webapp_process.start()
status_code = 0
while status_code != 200:
try:
status_code = httpx.get(f"http://{host}:{port}/health").status_code
except Exception:
time.sleep(1)
print_banner(host, port)
run_langflow(host, port, log_level, options, app)
if open_browser:
webbrowser.open(f"http://{host}:{port}")
def run_on_windows(host, port, log_level, options, app):
@ -289,26 +303,19 @@ def run_langflow(host, port, log_level, options, app):
Run Langflow server on localhost
"""
try:
if platform.system() in ["Windows", "Darwin"]:
if platform.system() in ["Windows"]:
# Run using uvicorn on MacOS and Windows
# Windows doesn't support gunicorn
# MacOS requires an env variable to be set to use gunicorn
import uvicorn
uvicorn.run(
app,
host=host,
port=port,
log_level=log_level,
)
uvicorn.run(app, host=host, port=port, log_level=log_level)
else:
from langflow.server import LangflowApplication
LangflowApplication(app, options).run()
except KeyboardInterrupt:
logger.info("Shutting down server")
sys.exit(0)
pass
except Exception as e:
logger.exception(e)
sys.exit(1)

View file

@ -27,8 +27,7 @@ def upgrade() -> None:
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_unique_constraint('uq_user_id', ['id'])
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###
@ -45,7 +44,6 @@ def downgrade() -> None:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.drop_constraint('uq_apikey_id', type_='unique')
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###

View file

@ -1,71 +0,0 @@
"""empty message
Revision ID: 0b8757876a7c
Revises: 006b3990db50
Create Date: 2024-01-17 10:32:56.686287
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '0b8757876a7c'
down_revision: Union[str, None] = '006b3990db50'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_apikey_api_key'), ['api_key'], unique=True)
batch_op.create_index(batch_op.f('ix_apikey_name'), ['name'], unique=False)
batch_op.create_index(batch_op.f('ix_apikey_user_id'), ['user_id'], unique=False)
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_flow_description'), ['description'], unique=False)
batch_op.create_index(batch_op.f('ix_flow_name'), ['name'], unique=False)
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], unique=True)
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_username'))
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
batch_op.drop_index(batch_op.f('ix_flow_name'))
batch_op.drop_index(batch_op.f('ix_flow_description'))
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_apikey_user_id'))
batch_op.drop_index(batch_op.f('ix_apikey_name'))
batch_op.drop_index(batch_op.f('ix_apikey_api_key'))
except Exception as e:
print(e)
pass
# ### end Alembic commands ###

View file

@ -60,8 +60,8 @@ def upgrade() -> None:
sa.Column("create_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("last_login_at", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id", name="pk_user"),
sa.UniqueConstraint("id", name="uq_user_id"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
)
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.create_index(
@ -83,8 +83,8 @@ def upgrade() -> None:
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id", name="pk_apikey"),
sa.UniqueConstraint("id", name="uq_apikey_id"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
)
with op.batch_alter_table("apikey", schema=None) as batch_op:
batch_op.create_index(
@ -106,8 +106,8 @@ def upgrade() -> None:
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id", name="pk_flow"),
sa.UniqueConstraint("id", name="uq_flow_id"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
)
# Conditionally create indices for 'flow' table
# if _alembic_tmp_flow exists, then we need to drop it first
@ -145,7 +145,7 @@ def upgrade() -> None:
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
# List existing tables
existing_tables = inspector.get_table_names()

View file

@ -29,10 +29,9 @@ def upgrade() -> None:
sa.Column('id', sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.PrimaryKeyConstraint('id')
)
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###
@ -41,7 +40,6 @@ def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
op.drop_table('credential')
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###

View file

@ -45,7 +45,6 @@ def downgrade() -> None:
with op.batch_alter_table("flow", schema=None) as batch_op:
batch_op.drop_column("is_component")
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###

View file

@ -37,6 +37,7 @@ def upgrade() -> None:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
# ### end Alembic commands ###

View file

@ -1,59 +0,0 @@
"""Add unique constraints
Revision ID: b2fa308044b5
Revises: 0b8757876a7c
Create Date: 2024-01-26 13:31:14.797548
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision: str = 'b2fa308044b5'
down_revision: Union[str, None] = '0b8757876a7c'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
op.drop_table('flowstyle')
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_component', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
batch_op.add_column(sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=True))
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
batch_op.create_foreign_key('fk_flow_user_id_user', 'user', ['user_id'], ['id'])
except Exception:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_constraint('fk_flow_user_id_user', type_='foreignkey')
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
batch_op.drop_column('user_id')
batch_op.drop_column('folder')
batch_op.drop_column('updated_at')
batch_op.drop_column('is_component')
op.create_table('flowstyle',
sa.Column('color', sa.VARCHAR(), nullable=False),
sa.Column('emoji', sa.VARCHAR(), nullable=False),
sa.Column('flow_id', sa.CHAR(length=32), nullable=True),
sa.Column('id', sa.CHAR(length=32), nullable=False),
sa.ForeignKeyConstraint(['flow_id'], ['flow.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
except Exception:
pass
# ### end Alembic commands ###

View file

@ -1,50 +0,0 @@
"""New fixes
Revision ID: bc2f01c40e4a
Revises: b2fa308044b5
Create Date: 2024-01-26 13:34:14.496769
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision: str = 'bc2f01c40e4a'
down_revision: Union[str, None] = 'b2fa308044b5'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_component', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
batch_op.add_column(sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=True))
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
batch_op.create_foreign_key('flow_user_id_fkey'
, 'user', ['user_id'], ['id'])
except Exception:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_constraint('flow_user_id_fkey', type_='foreignkey')
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
batch_op.drop_column('user_id')
batch_op.drop_column('folder')
batch_op.drop_column('updated_at')
batch_op.drop_column('is_component')
except Exception:
pass
# ### end Alembic commands ###

View file

@ -29,8 +29,7 @@ def upgrade() -> None:
except exc.SQLAlchemyError:
# connection.execute(text("ROLLBACK"))
pass
except Exception as e:
print(e)
except Exception:
pass
try:
@ -38,8 +37,7 @@ def upgrade() -> None:
except exc.SQLAlchemyError:
# connection.execute(text("ROLLBACK"))
pass
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###
@ -59,15 +57,14 @@ def downgrade() -> None:
sa.Column("is_read_only", sa.BOOLEAN(), nullable=False),
sa.Column("create_at", sa.DATETIME(), nullable=False),
sa.Column("update_at", sa.DATETIME(), nullable=False),
sa.PrimaryKeyConstraint("id", name="pk_component"),
sa.PrimaryKeyConstraint("id"),
)
with op.batch_alter_table("component", schema=None) as batch_op:
batch_op.create_index("ix_component_name", ["name"], unique=False)
batch_op.create_index(
"ix_component_frontend_node_id", ["frontend_node_id"], unique=False
)
except Exception as e:
print(e)
except Exception:
pass
try:
@ -81,10 +78,9 @@ def downgrade() -> None:
["flow_id"],
["flow.id"],
),
sa.PrimaryKeyConstraint("id", name="pk_flowstyle"),
sa.UniqueConstraint("id", name="uq_flowstyle_id"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
)
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###

View file

@ -7,8 +7,10 @@ Create Date: 2023-10-18 23:12:27.297016
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision: str = "f5ee9749d1a6"
@ -24,8 +26,7 @@ def upgrade() -> None:
batch_op.alter_column(
"user_id", existing_type=sa.CHAR(length=32), nullable=True
)
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###
@ -38,8 +39,7 @@ def downgrade() -> None:
batch_op.alter_column(
"user_id", existing_type=sa.CHAR(length=32), nullable=False
)
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###

View file

@ -21,8 +21,7 @@ def upgrade() -> None:
try:
with op.batch_alter_table('credential', schema=None) as batch_op:
batch_op.create_foreign_key("fk_credential_user_id", 'user', ['user_id'], ['id'])
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###
@ -33,8 +32,7 @@ def downgrade() -> None:
try:
with op.batch_alter_table('credential', schema=None) as batch_op:
batch_op.drop_constraint("fk_credential_user_id", type_='foreignkey')
except Exception as e:
print(e)
except Exception:
pass
# ### end Alembic commands ###

View file

@ -1,11 +1,11 @@
import time
from fastapi import APIRouter, Depends, HTTPException, WebSocket, WebSocketException, status
from fastapi import APIRouter, Depends, HTTPException, Query, WebSocket, WebSocketException, status
from fastapi.responses import StreamingResponse
from langflow.api.utils import build_input_keys_response, format_elapsed_time
from langflow.api.v1.schemas import BuildStatus, BuiltResponse, InitResponse, StreamData
from langflow.graph.graph.base import Graph
from langflow.services.auth.utils import get_current_active_user, get_current_user_for_websocket
from langflow.services.auth.utils import get_current_active_user, get_current_user_by_jwt
from langflow.services.cache.service import BaseCacheService
from langflow.services.cache.utils import update_build_status
from langflow.services.chat.service import ChatService
@ -20,16 +20,17 @@ router = APIRouter(tags=["Chat"])
async def chat(
client_id: str,
websocket: WebSocket,
token: str = Query(...),
db: Session = Depends(get_session),
chat_service: "ChatService" = Depends(get_chat_service),
):
"""Websocket endpoint for chat."""
try:
user = await get_current_user_for_websocket(websocket, db)
user = await get_current_user_by_jwt(token, db)
await websocket.accept()
if not user:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
elif not user.is_active:
if not user.is_active:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
if client_id in chat_service.cache_service:

View file

@ -3,10 +3,12 @@ from typing import Annotated, Any, List, Optional, Union
import sqlalchemy as sa
from fastapi import APIRouter, Body, Depends, HTTPException, UploadFile, status
from loguru import logger
from sqlmodel import select
from langflow.api.utils import update_frontend_node_with_template_values
from langflow.api.v1.schemas import (
CustomComponentCode,
PreloadResponse,
ProcessResponse,
TaskResponse,
TaskStatusResponse,
@ -15,15 +17,12 @@ from langflow.api.v1.schemas import (
from langflow.interface.custom.custom_component import CustomComponent
from langflow.interface.custom.directory_reader import DirectoryReader
from langflow.interface.custom.utils import build_custom_component_template
from langflow.processing.process import build_graph_and_generate_result, process_graph_cached, process_tweaks
from langflow.processing.process import process_graph_cached, process_tweaks
from langflow.services.auth.utils import api_key_security, get_current_active_user
from langflow.services.cache.utils import save_uploaded_file
from langflow.services.database.models.flow import Flow
from langflow.services.database.models.user.model import User
from langflow.services.deps import get_session, get_session_service, get_settings_service, get_task_service
from langflow.services.session.service import SessionService
from loguru import logger
from sqlmodel import select
try:
from langflow.worker import process_graph_cached_task
@ -33,9 +32,10 @@ except ImportError:
raise NotImplementedError("Celery is not installed")
from langflow.services.task.service import TaskService
from sqlmodel import Session
from langflow.services.task.service import TaskService
# build router
router = APIRouter(tags=["Base"])
@ -148,55 +148,6 @@ async def process_json(
raise HTTPException(status_code=500, detail=str(exc)) from exc
# Endpoint to preload a graph
@router.post("/process/preload/{flow_id}", response_model=PreloadResponse)
async def preload_flow(
session: Annotated[Session, Depends(get_session)],
flow_id: str,
session_id: Optional[str] = None,
session_service: SessionService = Depends(get_session_service),
api_key_user: User = Depends(api_key_security),
clear_session: Annotated[bool, Body(embed=True)] = False, # noqa: F821
):
try:
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
if clear_session:
session_service.clear_session(session_id)
# Check if the session exists
session_data = await session_service.load_session(session_id)
# Session data is a tuple of (graph, artifacts)
# or (None, None) if the session is empty
if isinstance(session_data, tuple):
graph, artifacts = session_data
is_clear = graph is None and artifacts is None
else:
is_clear = session_data is None
return PreloadResponse(session_id=session_id, is_clear=is_clear)
else:
if session_id is None:
session_id = flow_id
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
session_service.clear_session(session_id)
# Load the graph using SessionService
session_data = await session_service.load_session(session_id, graph_data)
graph, artifacts = session_data if session_data else (None, None)
if not graph:
raise ValueError("Graph not found in the session")
_ = await graph.build()
session_service.update_session(session_id, (graph, artifacts))
return PreloadResponse(session_id=session_id)
except Exception as exc:
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
@router.post(
"/predict/{flow_id}",
response_model=ProcessResponse,
@ -216,75 +167,36 @@ async def process(
task_service: "TaskService" = Depends(get_task_service),
api_key_user: User = Depends(api_key_security),
sync: Annotated[bool, Body(embed=True)] = True, # noqa: F821
session_service: SessionService = Depends(get_session_service),
):
"""
Endpoint to process an input with a given flow_id.
"""
try:
if session_id:
session_data = await session_service.load_session(session_id)
graph, artifacts = session_data if session_data else (None, None)
task_result: Any = None
task_status = None
task_id = None
if not graph:
raise ValueError("Graph not found in the session")
result = await build_graph_and_generate_result(
graph=graph,
inputs=inputs,
artifacts=artifacts,
session_id=session_id,
session_service=session_service,
)
task_id = str(id(result))
if isinstance(result, dict) and "result" in result:
task_result = result["result"]
session_id = result["session_id"]
elif hasattr(result, "result") and hasattr(result, "session_id"):
task_result = result.result
session_id = result.session_id
else:
task_result = result
if task_id:
task_response = TaskResponse(id=task_id, href=f"api/v1/task/{task_id}")
else:
task_response = None
return ProcessResponse(
result=task_result,
status=task_status,
task=task_response,
session_id=session_id,
backend=task_service.backend_name,
if api_key_user is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API Key",
)
else:
if api_key_user is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API Key",
)
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
return await process_graph_data(
graph_data=graph_data,
inputs=inputs,
tweaks=tweaks,
clear_cache=clear_cache,
session_id=session_id,
task_service=task_service,
sync=sync,
)
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
return await process_graph_data(
graph_data=graph_data,
inputs=inputs,
tweaks=tweaks,
clear_cache=clear_cache,
session_id=session_id,
task_service=task_service,
sync=sync,
)
except sa.exc.StatementError as exc:
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
if "badly formed hexadecimal UUID string" in str(exc):

View file

@ -1,4 +1,4 @@
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from sqlmodel import Session
@ -16,7 +16,6 @@ router = APIRouter(tags=["Login"])
@router.post("/login", response_model=Token)
async def login_to_get_access_token(
response: Response,
form_data: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(get_session),
# _: Session = Depends(get_current_active_user)
@ -32,10 +31,7 @@ async def login_to_get_access_token(
) from exc
if user:
tokens = create_user_tokens(user_id=user.id, db=db, update_last_login=True)
response.set_cookie("refresh_token_lf", tokens["refresh_token"], httponly=True)
response.set_cookie("access_token_lf", tokens["access_token"], httponly=False)
return tokens
return create_user_tokens(user_id=user.id, db=db, update_last_login=True)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
@ -45,13 +41,9 @@ async def login_to_get_access_token(
@router.get("/auto_login")
async def auto_login(
response: Response, db: Session = Depends(get_session), settings_service=Depends(get_settings_service)
):
async def auto_login(db: Session = Depends(get_session), settings_service=Depends(get_settings_service)):
if settings_service.auth_settings.AUTO_LOGIN:
tokens = create_user_longterm_token(db)
response.set_cookie("access_token_lf", tokens["access_token"], httponly=False)
return tokens
return create_user_longterm_token(db)
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
@ -63,23 +55,12 @@ async def auto_login(
@router.post("/refresh")
async def refresh_token(request: Request, response: Response):
token = request.cookies.get("refresh_token_lf")
async def refresh_token(token: str):
if token:
tokens = create_refresh_token(token)
response.set_cookie("refresh_token_lf", tokens["refresh_token"], httponly=True)
response.set_cookie("access_token_lf", tokens["access_token"], httponly=False)
return tokens
return create_refresh_token(token)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid refresh token",
headers={"WWW-Authenticate": "Bearer"},
)
@router.post("/logout")
async def logout(response: Response):
response.delete_cookie("refresh_token_lf")
response.delete_cookie("access_token_lf")
return {"message": "Logout successful"}

View file

@ -64,13 +64,6 @@ class ProcessResponse(BaseModel):
backend: Optional[str] = None
class PreloadResponse(BaseModel):
"""Preload response schema."""
session_id: Optional[str] = None
is_clear: Optional[bool] = None
# TaskStatusResponse(
# status=task.status, result=task.result if task.ready() else None
# )

View file

@ -41,11 +41,12 @@ class AgentInitializerComponent(CustomComponent):
handle_parsing_errors=True,
max_iterations=max_iterations,
)
return initialize_agent(
tools=tools,
llm=llm,
agent=agent,
return_intermediate_steps=True,
handle_parsing_errors=True,
max_iterations=max_iterations,
)
else:
return initialize_agent(
tools=tools,
llm=llm,
agent=agent,
return_intermediate_steps=True,
handle_parsing_errors=True,
max_iterations=max_iterations,
)

View file

@ -1,23 +0,0 @@
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, AgentExecutor
from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_agent
class CSVAgentComponent(CustomComponent):
display_name = "CSVAgent"
description = "Construct a CSV agent from a CSV and tools."
documentation = "https://python.langchain.com/docs/modules/agents/toolkits/csv"
def build_config(self):
return {
"llm": {"display_name": "LLM", "type": BaseLanguageModel},
"path": {"display_name": "Path", "field_type": "file", "suffixes": [".csv"], "file_types": [".csv"]},
}
def build(
self,
llm: BaseLanguageModel,
path: str,
) -> AgentExecutor:
# Instantiate and return the CSV agent class with the provided llm and path
return create_csv_agent(llm=llm, path=path)

View file

@ -1,24 +0,0 @@
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_json_agent
from langflow.field_typing import (
BaseLanguageModel,
)
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonAgentComponent(CustomComponent):
display_name = "JsonAgent"
description = "Construct a json agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"toolkit": {"display_name": "Toolkit"},
}
def build(
self,
llm: BaseLanguageModel,
toolkit: JsonToolkit,
) -> AgentExecutor:
return create_json_agent(llm=llm, toolkit=toolkit)

View file

@ -3,12 +3,13 @@ from typing import List, Optional
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.conversational_retrieval.openai_functions import _get_default_system_message
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.chat_models import ChatOpenAI
from langchain.memory.token_buffer import ConversationTokenBufferMemory
from langchain.prompts import SystemMessagePromptTemplate
from langchain.prompts.chat import MessagesPlaceholder
from langchain.schema.memory import BaseMemory
from langchain.tools import Tool
from langchain_community.chat_models import ChatOpenAI
from langflow import CustomComponent
from langflow.field_typing.range_spec import RangeSpec
@ -19,12 +20,11 @@ class ConversationalAgent(CustomComponent):
def build_config(self):
openai_function_models = [
"gpt-4-turbo-preview",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-4",
"gpt-4-32k",
]
return {
"tools": {"display_name": "Tools"},

View file

@ -1,29 +0,0 @@
from langflow import CustomComponent
from typing import Union, Callable
from langchain.agents import AgentExecutor
from langflow.field_typing import BaseLanguageModel
from langchain_community.agent_toolkits.sql.base import create_sql_agent
from langchain.sql_database import SQLDatabase
from langchain_community.agent_toolkits import SQLDatabaseToolkit
class SQLAgentComponent(CustomComponent):
display_name = "SQLAgent"
description = "Construct an SQL agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"database_uri": {"display_name": "Database URI"},
"verbose": {"display_name": "Verbose", "value": False, "advanced": True},
}
def build(
self,
llm: BaseLanguageModel,
database_uri: str,
verbose: bool = False,
) -> Union[AgentExecutor, Callable]:
db = SQLDatabase.from_uri(database_uri)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
return create_sql_agent(llm=llm, toolkit=toolkit)

View file

@ -1,23 +0,0 @@
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_vectorstore_agent
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreToolkit
from typing import Union, Callable
from langflow.field_typing import BaseLanguageModel
class VectorStoreAgentComponent(CustomComponent):
display_name = "VectorStoreAgent"
description = "Construct an agent from a Vector Store."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"vector_store_toolkit": {"display_name": "Vector Store Info"},
}
def build(
self,
llm: BaseLanguageModel,
vector_store_toolkit: VectorStoreToolkit,
) -> Union[AgentExecutor, Callable]:
return create_vectorstore_agent(llm=llm, toolkit=vector_store_toolkit)

View file

@ -1,19 +0,0 @@
from langflow import CustomComponent
from langchain_core.language_models.base import BaseLanguageModel
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
from langchain.agents import create_vectorstore_router_agent
from typing import Callable
class VectorStoreRouterAgentComponent(CustomComponent):
display_name = "VectorStoreRouterAgent"
description = "Construct an agent from a Vector Store Router."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"vectorstoreroutertoolkit": {"display_name": "Vector Store Router Toolkit"},
}
def build(self, llm: BaseLanguageModel, vectorstoreroutertoolkit: VectorStoreRouterToolkit) -> Callable:
return create_vectorstore_router_agent(llm=llm, toolkit=vectorstoreroutertoolkit)

View file

@ -28,5 +28,5 @@ class LLMChainComponent(CustomComponent):
prompt: BasePromptTemplate,
llm: BaseLanguageModel,
memory: Optional[BaseMemory] = None,
) -> Union[Chain, Callable, LLMChain]:
) -> Union[Chain, Callable]:
return LLMChain(prompt=prompt, llm=llm, memory=memory)

View file

@ -1,24 +0,0 @@
from langflow import CustomComponent
from langchain.chains import LLMCheckerChain
from typing import Union, Callable
from langflow.field_typing import (
BaseLanguageModel,
Chain,
)
class LLMCheckerChainComponent(CustomComponent):
display_name = "LLMCheckerChain"
description = ""
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_checker"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
}
def build(
self,
llm: BaseLanguageModel,
) -> Union[Chain, Callable]:
return LLMCheckerChain(llm=llm)

View file

@ -1,31 +0,0 @@
from typing import Callable, Optional, Union
from langchain.chains import LLMChain, LLMMathChain
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, BaseMemory, Chain
class LLMMathChainComponent(CustomComponent):
display_name = "LLMMathChain"
description = "Chain that interprets a prompt and executes python code to do math."
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_math"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"llm_chain": {"display_name": "LLM Chain"},
"memory": {"display_name": "Memory"},
"input_key": {"display_name": "Input Key"},
"output_key": {"display_name": "Output Key"},
}
def build(
self,
llm: BaseLanguageModel,
llm_chain: LLMChain,
input_key: str = "question",
output_key: str = "answer",
memory: Optional[BaseMemory] = None,
) -> Union[LLMMathChain, Callable, Chain]:
return LLMMathChain(llm=llm, llm_chain=llm_chain, input_key=input_key, output_key=output_key, memory=memory)

View file

@ -1,8 +1,8 @@
from langflow import CustomComponent
from langchain.llms.base import BaseLLM
from langchain.prompts import PromptTemplate
from langchain_core.documents import Document
from langflow import CustomComponent
from langchain.schema import Document
class PromptRunner(CustomComponent):

View file

@ -1,39 +0,0 @@
from typing import Callable, Optional, Union
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
from langflow import CustomComponent
from langflow.field_typing import BaseMemory, BaseRetriever
class RetrievalQAComponent(CustomComponent):
display_name = "RetrievalQA"
description = "Chain for question-answering against an index."
def build_config(self):
return {
"combine_documents_chain": {"display_name": "Combine Documents Chain"},
"retriever": {"display_name": "Retriever"},
"memory": {"display_name": "Memory", "required": False},
"input_key": {"display_name": "Input Key", "advanced": True},
"output_key": {"display_name": "Output Key", "advanced": True},
"return_source_documents": {"display_name": "Return Source Documents"},
}
def build(
self,
combine_documents_chain: BaseCombineDocumentsChain,
retriever: BaseRetriever,
memory: Optional[BaseMemory] = None,
input_key: str = "query",
output_key: str = "result",
return_source_documents: bool = True,
) -> Union[BaseRetrievalQA, Callable]:
return RetrievalQA(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
memory=memory,
input_key=input_key,
output_key=output_key,
return_source_documents=return_source_documents,
)

View file

@ -1,42 +0,0 @@
from typing import Optional
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever
class RetrievalQAWithSourcesChainComponent(CustomComponent):
display_name = "RetrievalQAWithSourcesChain"
description = "Question-answering with sources over an index."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"chain_type": {
"display_name": "Chain Type",
"options": ["stuff", "map_reduce", "map_rerank", "refine"],
},
"memory": {"display_name": "Memory"},
"return_source_documents": {"display_name": "Return Source Documents"},
}
def build(
self,
retriever: BaseRetriever,
llm: BaseLanguageModel,
combine_documents_chain: BaseCombineDocumentsChain,
chain_type: str,
memory: Optional[BaseMemory] = None,
return_source_documents: Optional[bool] = True,
) -> BaseQAWithSourcesChain:
return RetrievalQAWithSourcesChain.from_chain_type(
llm=llm,
chain_type=chain_type,
combine_documents_chain=combine_documents_chain,
memory=memory,
return_source_documents=return_source_documents,
retriever=retriever,
)

View file

@ -1,25 +0,0 @@
from langflow import CustomComponent
from typing import Callable, Union
from langflow.field_typing import BasePromptTemplate, BaseLanguageModel, Chain
from langchain_community.utilities.sql_database import SQLDatabase
from langchain_experimental.sql.base import SQLDatabaseChain
class SQLDatabaseChainComponent(CustomComponent):
display_name = "SQLDatabaseChain"
description = ""
def build_config(self):
return {
"db": {"display_name": "Database"},
"llm": {"display_name": "LLM"},
"prompt": {"display_name": "Prompt"},
}
def build(
self,
db: SQLDatabase,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
) -> Union[Chain, Callable, SQLDatabaseChain]:
return SQLDatabaseChain.from_llm(llm=llm, db=db, prompt=prompt)

View file

@ -1,42 +0,0 @@
from langflow import CustomComponent
from langchain.docstore.document import Document
from typing import Optional, Dict, Any
class DirectoryLoaderComponent(CustomComponent):
display_name = "DirectoryLoader"
description = "Load from a directory."
def build_config(self) -> Dict[str, Any]:
return {
"glob": {"display_name": "Glob Pattern", "value": "**/*.txt"},
"load_hidden": {"display_name": "Load Hidden Files", "value": False, "advanced": True},
"max_concurrency": {"display_name": "Max Concurrency", "value": 10, "advanced": True},
"metadata": {"display_name": "Metadata", "value": {}},
"path": {"display_name": "Local Directory"},
"recursive": {"display_name": "Recursive", "value": True, "advanced": True},
"silent_errors": {"display_name": "Silent Errors", "value": False, "advanced": True},
"use_multithreading": {"display_name": "Use Multithreading", "value": True, "advanced": True},
}
def build(
self,
glob: str,
path: str,
load_hidden: Optional[bool] = False,
max_concurrency: Optional[int] = 10,
metadata: Optional[dict] = {},
recursive: Optional[bool] = True,
silent_errors: Optional[bool] = False,
use_multithreading: Optional[bool] = True,
) -> Document:
return Document(
glob=glob,
path=path,
load_hidden=load_hidden,
max_concurrency=max_concurrency,
metadata=metadata,
recursive=recursive,
silent_errors=silent_errors,
use_multithreading=use_multithreading,
)

View file

@ -1,4 +1,5 @@
from langchain_core.documents import Document
from langchain.schema import Document
from langflow import CustomComponent
from langflow.utils.constants import LOADERS_INFO

View file

@ -1,8 +1,7 @@
from typing import List
from langchain import document_loaders
from langchain_core.documents import Document
from langchain.schema import Document
from langflow import CustomComponent

View file

@ -1,65 +0,0 @@
from langchain.embeddings.base import Embeddings
from langchain_community.embeddings import AzureOpenAIEmbeddings
from langflow import CustomComponent
class AzureOpenAIEmbeddingsComponent(CustomComponent):
display_name: str = "AzureOpenAIEmbeddings"
description: str = "Embeddings model from Azure OpenAI."
documentation: str = "https://python.langchain.com/docs/integrations/text_embedding/azureopenai"
beta = False
API_VERSION_OPTIONS = [
"2022-12-01",
"2023-03-15-preview",
"2023-05-15",
"2023-06-01-preview",
"2023-07-01-preview",
"2023-08-01-preview",
]
def build_config(self):
return {
"azure_endpoint": {
"display_name": "Azure Endpoint",
"required": True,
"info": "Your Azure endpoint, including the resource.. Example: `https://example-resource.azure.openai.com/`",
},
"azure_deployment": {
"display_name": "Deployment Name",
"required": True,
},
"api_version": {
"display_name": "API Version",
"options": self.API_VERSION_OPTIONS,
"value": self.API_VERSION_OPTIONS[-1],
"advanced": True,
},
"api_key": {
"display_name": "API Key",
"required": True,
"password": True,
},
"code": {"show": False},
}
def build(
self,
azure_endpoint: str,
azure_deployment: str,
api_version: str,
api_key: str,
) -> Embeddings:
try:
embeddings = AzureOpenAIEmbeddings(
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
api_version=api_version,
api_key=api_key,
)
except Exception as e:
raise ValueError("Could not connect to AzureOpenAIEmbeddings API.") from e
return embeddings

View file

@ -1,36 +0,0 @@
from typing import Optional
from langchain_community.embeddings.cohere import CohereEmbeddings
from langflow import CustomComponent
class CohereEmbeddingsComponent(CustomComponent):
display_name = "CohereEmbeddings"
description = "Cohere embedding models."
def build_config(self):
return {
"cohere_api_key": {"display_name": "Cohere API Key", "password": True},
"model": {"display_name": "Model", "default": "embed-english-v2.0", "advanced": True},
"truncate": {"display_name": "Truncate", "advanced": True},
"max_retries": {"display_name": "Max Retries", "advanced": True},
"user_agent": {"display_name": "User Agent", "advanced": True},
}
def build(
self,
request_timeout: Optional[float] = None,
cohere_api_key: str = "",
max_retries: Optional[int] = None,
model: str = "embed-english-v2.0",
truncate: Optional[str] = None,
user_agent: str = "langchain",
) -> CohereEmbeddings:
return CohereEmbeddings( # type: ignore
max_retries=max_retries,
user_agent=user_agent,
request_timeout=request_timeout,
cohere_api_key=cohere_api_key,
model=model,
truncate=truncate,
)

View file

@ -1,36 +0,0 @@
from langflow import CustomComponent
from typing import Optional, Dict
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
class HuggingFaceEmbeddingsComponent(CustomComponent):
display_name = "HuggingFaceEmbeddings"
description = "HuggingFace sentence_transformers embedding models."
documentation = (
"https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/sentence_transformers"
)
def build_config(self):
return {
"cache_folder": {"display_name": "Cache Folder", "advanced": True},
"encode_kwargs": {"display_name": "Encode Kwargs", "advanced": True, "field_type": "dict"},
"model_kwargs": {"display_name": "Model Kwargs", "field_type": "dict", "advanced": True},
"model_name": {"display_name": "Model Name"},
"multi_process": {"display_name": "Multi Process", "advanced": True},
}
def build(
self,
cache_folder: Optional[str] = None,
encode_kwargs: Optional[Dict] = {},
model_kwargs: Optional[Dict] = {},
model_name: str = "sentence-transformers/all-mpnet-base-v2",
multi_process: bool = False,
) -> HuggingFaceEmbeddings:
return HuggingFaceEmbeddings(
cache_folder=cache_folder,
encode_kwargs=encode_kwargs,
model_kwargs=model_kwargs,
model_name=model_name,
multi_process=multi_process,
)

View file

@ -1,38 +0,0 @@
from typing import Optional
from langflow import CustomComponent
from langchain.embeddings.base import Embeddings
from langchain_community.embeddings import OllamaEmbeddings
class OllamaEmbeddingsComponent(CustomComponent):
"""
A custom component for implementing an Embeddings Model using Ollama.
"""
display_name: str = "Ollama Embeddings"
description: str = "Embeddings model from Ollama."
documentation = "https://python.langchain.com/docs/integrations/text_embedding/ollama"
beta = True
def build_config(self):
return {
"model": {
"display_name": "Ollama Model",
},
"base_url": {"display_name": "Ollama Base URL"},
"temperature": {"display_name": "Model Temperature"},
"code": {"show": False},
}
def build(
self,
model: str = "llama2",
base_url: str = "http://localhost:11434",
temperature: Optional[float] = None,
) -> Embeddings:
try:
output = OllamaEmbeddings(model=model, base_url=base_url, temperature=temperature) # type: ignore
except Exception as e:
raise ValueError("Could not connect to Ollama API.") from e
return output

View file

@ -1,121 +0,0 @@
from typing import Any, Callable, Dict, List, Optional, Union
from langchain_openai.embeddings.base import OpenAIEmbeddings
from langflow import CustomComponent
from langflow.field_typing import NestedDict
class OpenAIEmbeddingsComponent(CustomComponent):
display_name = "OpenAIEmbeddings"
description = "OpenAI embedding models"
def build_config(self):
return {
"allowed_special": {
"display_name": "Allowed Special",
"advanced": True,
"field_type": "str",
"is_list": True,
},
"default_headers": {
"display_name": "Default Headers",
"advanced": True,
"field_type": "dict",
},
"default_query": {
"display_name": "Default Query",
"advanced": True,
"field_type": "NestedDict",
},
"disallowed_special": {
"display_name": "Disallowed Special",
"advanced": True,
"field_type": "str",
"is_list": True,
},
"chunk_size": {"display_name": "Chunk Size", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"deployment": {"display_name": "Deployment", "advanced": True},
"embedding_ctx_length": {
"display_name": "Embedding Context Length",
"advanced": True,
},
"max_retries": {"display_name": "Max Retries", "advanced": True},
"model": {
"display_name": "Model",
"advanced": False,
"options": ["text-embedding-3-small", "text-embedding-3-large", "text-embedding-ada-002"],
},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"openai_api_base": {"display_name": "OpenAI API Base", "password": True, "advanced": True},
"openai_api_key": {"display_name": "OpenAI API Key", "password": True},
"openai_api_type": {"display_name": "OpenAI API Type", "advanced": True, "password": True},
"openai_api_version": {
"display_name": "OpenAI API Version",
"advanced": True,
},
"openai_organization": {
"display_name": "OpenAI Organization",
"advanced": True,
},
"openai_proxy": {"display_name": "OpenAI Proxy", "advanced": True},
"request_timeout": {"display_name": "Request Timeout", "advanced": True},
"show_progress_bar": {
"display_name": "Show Progress Bar",
"advanced": True,
},
"skip_empty": {"display_name": "Skip Empty", "advanced": True},
"tiktoken_model_name": {"display_name": "TikToken Model Name"},
"tikToken_enable": {"display_name": "TikToken Enable"},
}
def build(
self,
default_headers: Optional[Dict[str, str]] = None,
default_query: Optional[NestedDict] = {},
allowed_special: List[str] = [],
disallowed_special: List[str] = ["all"],
chunk_size: int = 1000,
client: Optional[Any] = None,
deployment: str = "text-embedding-3-small",
embedding_ctx_length: int = 8191,
max_retries: int = 6,
model: str = "text-embedding-3-small",
model_kwargs: NestedDict = {},
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = "",
openai_api_type: Optional[str] = None,
openai_api_version: Optional[str] = None,
openai_organization: Optional[str] = None,
openai_proxy: Optional[str] = None,
request_timeout: Optional[float] = None,
show_progress_bar: bool = False,
skip_empty: bool = False,
tikToken_enable: bool = True,
tiktoken_model_name: Optional[str] = None,
) -> Union[OpenAIEmbeddings, Callable]:
return OpenAIEmbeddings(
tiktoken_enabled=tikToken_enable,
default_headers=default_headers,
default_query=default_query,
allowed_special=set(allowed_special),
disallowed_special=set(disallowed_special),
chunk_size=chunk_size,
client=client,
deployment=deployment,
embedding_ctx_length=embedding_ctx_length,
max_retries=max_retries,
model=model,
model_kwargs=model_kwargs,
base_url=openai_api_base,
api_key=openai_api_key,
openai_api_type=openai_api_type,
api_version=openai_api_version,
organization=openai_organization,
openai_proxy=openai_proxy,
timeout=request_timeout,
show_progress_bar=show_progress_bar,
skip_empty=skip_empty,
tiktoken_model_name=tiktoken_model_name,
)

View file

@ -1,60 +0,0 @@
from langflow import CustomComponent
from langchain.embeddings import VertexAIEmbeddings
from typing import Optional, List
class VertexAIEmbeddingsComponent(CustomComponent):
display_name = "VertexAIEmbeddings"
description = "Google Cloud VertexAI embedding models."
def build_config(self):
return {
"credentials": {"display_name": "Credentials", "value": "", "file_types": [".json"], "field_type": "file"},
"instance": {"display_name": "instance", "advanced": True, "field_type": "dict"},
"location": {"display_name": "Location", "value": "us-central1", "advanced": True},
"max_output_tokens": {"display_name": "Max Output Tokens", "value": 128},
"max_retries": {"display_name": "Max Retries", "value": 6, "advanced": True},
"model_name": {"display_name": "Model Name", "value": "textembedding-gecko"},
"n": {"display_name": "N", "value": 1, "advanced": True},
"project": {"display_name": "Project", "advanced": True},
"request_parallelism": {"display_name": "Request Parallelism", "value": 5, "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "value": False, "advanced": True},
"temperature": {"display_name": "Temperature", "value": 0.0},
"top_k": {"display_name": "Top K", "value": 40, "advanced": True},
"top_p": {"display_name": "Top P", "value": 0.95, "advanced": True},
}
def build(
self,
instance: Optional[str] = None,
credentials: Optional[str] = None,
location: str = "us-central1",
max_output_tokens: int = 128,
max_retries: int = 6,
model_name: str = "textembedding-gecko",
n: int = 1,
project: Optional[str] = None,
request_parallelism: int = 5,
stop: Optional[List[str]] = None,
streaming: bool = False,
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
) -> VertexAIEmbeddings:
return VertexAIEmbeddings(
instance=instance,
credentials=credentials,
location=location,
max_output_tokens=max_output_tokens,
max_retries=max_retries,
model_name=model_name,
n=n,
project=project,
request_parallelism=request_parallelism,
stop=stop,
streaming=streaming,
temperature=temperature,
top_k=top_k,
top_p=top_p,
)

View file

@ -1,8 +1,7 @@
from typing import Optional
from langchain.llms.base import BaseLLM
from langchain.llms.bedrock import Bedrock
from langflow import CustomComponent
from langchain.llms.bedrock import Bedrock
from langchain.llms.base import BaseLLM
class AmazonBedrockComponent(CustomComponent):
@ -28,32 +27,18 @@ class AmazonBedrockComponent(CustomComponent):
},
"credentials_profile_name": {"display_name": "Credentials Profile Name"},
"streaming": {"display_name": "Streaming", "field_type": "bool"},
"endpoint_url": {"display_name": "Endpoint URL"},
"region_name": {"display_name": "Region Name"},
"model_kwargs": {"display_name": "Model Kwargs"},
"cache": {"display_name": "Cache"},
"code": {"advanced": True},
"code": {"show": False},
}
def build(
self,
model_id: str = "anthropic.claude-instant-v1",
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
model_kwargs: Optional[dict] = None,
endpoint_url: Optional[str] = None,
streaming: bool = False,
cache: Optional[bool] = None,
) -> BaseLLM:
try:
output = Bedrock(
credentials_profile_name=credentials_profile_name,
model_id=model_id,
region_name=region_name,
model_kwargs=model_kwargs,
endpoint_url=endpoint_url,
streaming=streaming,
cache=cache,
) # type: ignore
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e

View file

@ -1,48 +0,0 @@
from typing import Optional
from langchain_community.llms.anthropic import Anthropic
from pydantic.v1 import SecretStr
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, NestedDict
class AnthropicComponent(CustomComponent):
display_name = "Anthropic"
description = "Anthropic large language models."
def build_config(self):
return {
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"type": str,
"password": True,
},
"anthropic_api_url": {
"display_name": "Anthropic API URL",
"type": str,
},
"model_kwargs": {
"display_name": "Model Kwargs",
"field_type": "NestedDict",
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
},
}
def build(
self,
anthropic_api_key: str,
anthropic_api_url: str,
model_kwargs: NestedDict = {},
temperature: Optional[float] = None,
) -> BaseLanguageModel:
return Anthropic(
anthropic_api_key=SecretStr(anthropic_api_key),
anthropic_api_url=anthropic_api_url,
model_kwargs=model_kwargs,
temperature=temperature,
)

View file

@ -1,6 +1,6 @@
from typing import Optional
from langchain_community.chat_models.anthropic import ChatAnthropic
from langchain.chat_models.anthropic import ChatAnthropic
from langchain.llms.base import BaseLanguageModel
from pydantic.v1 import SecretStr

View file

@ -1,14 +1,13 @@
from typing import Optional
from langflow import CustomComponent
from langchain.llms.base import BaseLanguageModel
from langchain_community.chat_models.azure_openai import AzureChatOpenAI
from langchain.chat_models.azure_openai import AzureChatOpenAI
class AzureChatOpenAIComponent(CustomComponent):
display_name: str = "AzureChatOpenAI"
description: str = "LLM model from Azure OpenAI."
documentation: str = "https://python.langchain.com/docs/integrations/llms/azure_openai"
beta = False
AZURE_OPENAI_MODELS = [
"gpt-35-turbo",
@ -19,21 +18,11 @@ class AzureChatOpenAIComponent(CustomComponent):
"gpt-4-vision",
]
AZURE_OPENAI_API_VERSIONS = [
"2023-03-15-preview",
"2023-05-15",
"2023-06-01-preview",
"2023-07-01-preview",
"2023-08-01-preview",
"2023-09-01-preview",
"2023-12-01-preview",
]
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"value": self.AZURE_OPENAI_MODELS[0],
"value": "gpt-35-turbo",
"options": self.AZURE_OPENAI_MODELS,
"required": True,
},
@ -48,8 +37,7 @@ class AzureChatOpenAIComponent(CustomComponent):
},
"api_version": {
"display_name": "API Version",
"options": self.AZURE_OPENAI_API_VERSIONS,
"value": self.AZURE_OPENAI_API_VERSIONS[-1],
"value": "2023-05-15",
"required": True,
"advanced": True,
},
@ -66,7 +54,6 @@ class AzureChatOpenAIComponent(CustomComponent):
"required": False,
"field_type": "int",
"advanced": True,
"info": "Maximum number of tokens to generate.",
},
"code": {"show": False},
}
@ -77,20 +64,16 @@ class AzureChatOpenAIComponent(CustomComponent):
azure_endpoint: str,
azure_deployment: str,
api_key: str,
api_version: str,
api_version: str = "2023-05-15",
temperature: float = 0.7,
max_tokens: Optional[int] = 1000,
) -> BaseLanguageModel:
try:
llm = AzureChatOpenAI(
model=model,
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
api_version=api_version,
api_key=api_key,
temperature=temperature,
max_tokens=max_tokens,
)
except Exception as e:
raise ValueError("Could not connect to AzureOpenAI API.") from e
return llm
return AzureChatOpenAI(
model=model,
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
api_version=api_version,
api_key=api_key,
temperature=temperature,
max_tokens=max_tokens,
)

View file

@ -1,6 +1,6 @@
from typing import Optional
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from langchain.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from langchain.llms.base import BaseLLM
from pydantic.v1 import SecretStr

View file

@ -1,33 +0,0 @@
from typing import Dict, Optional
from langchain_community.llms.ctransformers import CTransformers
from langflow import CustomComponent
class CTransformersComponent(CustomComponent):
display_name = "CTransformers"
description = "C Transformers LLM models"
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
def build_config(self):
return {
"model": {"display_name": "Model", "required": True},
"model_file": {
"display_name": "Model File",
"required": False,
"field_type": "file",
"file_types": [".bin"],
},
"model_type": {"display_name": "Model Type", "required": True},
"config": {
"display_name": "Config",
"advanced": True,
"required": False,
"field_type": "dict",
"value": '{"top_k":40,"top_p":0.95,"temperature":0.8,"repetition_penalty":1.1,"last_n_tokens":64,"seed":-1,"max_new_tokens":256,"stop":"","stream":"False","reset":"True","batch_size":8,"threads":-1,"context_length":-1,"gpu_layers":0}',
},
}
def build(self, model: str, model_file: str, model_type: str, config: Optional[Dict] = None) -> CTransformers:
return CTransformers(model=model, model_file=model_file, model_type=model_type, config=config) # type: ignore

View file

@ -1,47 +0,0 @@
from pydantic import SecretStr
from langflow import CustomComponent
from typing import Optional, Union, Callable
from langflow.field_typing import BaseLanguageModel
from langchain_community.chat_models.anthropic import ChatAnthropic
class ChatAnthropicComponent(CustomComponent):
display_name = "ChatAnthropic"
description = "`Anthropic` chat large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/anthropic"
def build_config(self):
return {
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"field_type": "str",
"password": True,
},
"anthropic_api_url": {
"display_name": "Anthropic API URL",
"field_type": "str",
},
"model_kwargs": {
"display_name": "Model Kwargs",
"field_type": "dict",
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
},
}
def build(
self,
anthropic_api_key: str,
anthropic_api_url: Optional[str] = None,
model_kwargs: dict = {},
temperature: Optional[float] = None,
) -> Union[BaseLanguageModel, Callable]:
return ChatAnthropic(
anthropic_api_key=SecretStr(anthropic_api_key),
anthropic_api_url=anthropic_api_url,
model_kwargs=model_kwargs,
temperature=temperature,
)

View file

@ -1,8 +1,8 @@
from typing import Any, Dict, List, Optional
# from langchain_community.chat_models import ChatOllama
from langchain_community.chat_models import ChatOllama
from langchain_core.language_models.chat_models import BaseChatModel
from langchain.chat_models import ChatOllama
from langchain.chat_models.base import BaseChatModel
# from langchain.chat_models import ChatOllama
from langflow import CustomComponent

View file

@ -1,86 +0,0 @@
from typing import Optional, Union
from langchain.llms import BaseLLM
from langchain_community.chat_models.openai import ChatOpenAI
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, NestedDict
class ChatOpenAIComponent(CustomComponent):
display_name = "ChatOpenAI"
description = "`OpenAI` Chat large language models API."
def build_config(self):
return {
"max_tokens": {
"display_name": "Max Tokens",
"field_type": "int",
"advanced": False,
"required": False,
},
"model_kwargs": {
"display_name": "Model Kwargs",
"field_type": "NestedDict",
"advanced": True,
"required": False,
},
"model_name": {
"display_name": "Model Name",
"field_type": "str",
"advanced": False,
"required": False,
"options": [
"gpt-4-turbo-preview",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-1106",
],
},
"openai_api_base": {
"display_name": "OpenAI API Base",
"field_type": "str",
"advanced": False,
"required": False,
"info": (
"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\n"
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
),
},
"openai_api_key": {
"display_name": "OpenAI API Key",
"field_type": "str",
"advanced": False,
"required": False,
"password": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"advanced": False,
"required": False,
"value": 0.7,
},
}
def build(
self,
max_tokens: Optional[int] = 256,
model_kwargs: NestedDict = {},
model_name: str = "gpt-4-1106-preview",
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = None,
temperature: float = 0.7,
) -> Union[BaseLanguageModel, BaseLLM]:
if not openai_api_base:
openai_api_base = "https://api.openai.com/v1"
return ChatOpenAI(
max_tokens=max_tokens,
model_kwargs=model_kwargs,
model=model_name,
base_url=openai_api_base,
api_key=openai_api_key,
temperature=temperature,
)

View file

@ -1,87 +0,0 @@
from typing import List, Optional, Union
from langchain.llms import BaseLLM
from langchain_community.chat_models.vertexai import ChatVertexAI
from langchain_core.messages.base import BaseMessage
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel
class ChatVertexAIComponent(CustomComponent):
display_name = "ChatVertexAI"
description = "`Vertex AI` Chat large language models API."
def build_config(self):
return {
"credentials": {
"display_name": "Credentials",
"field_type": "file",
"file_types": [".json"],
"file_path": None,
},
"examples": {
"display_name": "Examples",
"multiline": True,
},
"location": {
"display_name": "Location",
"value": "us-central1",
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"value": 128,
"advanced": True,
},
"model_name": {
"display_name": "Model Name",
"value": "chat-bison",
},
"project": {
"display_name": "Project",
},
"temperature": {
"display_name": "Temperature",
"value": 0.0,
},
"top_k": {
"display_name": "Top K",
"value": 40,
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"value": 0.95,
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"value": False,
"advanced": True,
},
}
def build(
self,
credentials: Optional[str],
project: str,
examples: Optional[List[BaseMessage]] = [],
location: str = "us-central1",
max_output_tokens: int = 128,
model_name: str = "chat-bison",
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
verbose: bool = False,
) -> Union[BaseLanguageModel, BaseLLM]:
return ChatVertexAI(
credentials=credentials,
examples=examples,
location=location,
max_output_tokens=max_output_tokens,
model_name=model_name,
project=project,
temperature=temperature,
top_k=top_k,
top_p=top_p,
verbose=verbose,
)

View file

@ -1,24 +0,0 @@
from langchain_community.llms.cohere import Cohere
from langchain_core.language_models.base import BaseLanguageModel
from langflow import CustomComponent
class CohereComponent(CustomComponent):
display_name = "Cohere"
description = "Cohere large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
def build_config(self):
return {
"cohere_api_key": {"display_name": "Cohere API Key", "type": "password", "password": True},
"max_tokens": {"display_name": "Max Tokens", "default": 256, "type": "int", "show": True},
"temperature": {"display_name": "Temperature", "default": 0.75, "type": "float", "show": True},
}
def build(
self,
cohere_api_key: str,
max_tokens: int = 256,
temperature: float = 0.75,
) -> BaseLanguageModel:
return Cohere(cohere_api_key=cohere_api_key, max_tokens=max_tokens, temperature=temperature) # type: ignore

View file

@ -1,129 +0,0 @@
from typing import Optional, List, Dict, Any
from langflow import CustomComponent
from langchain_community.llms.llamacpp import LlamaCpp
class LlamaCppComponent(CustomComponent):
display_name = "LlamaCpp"
description = "llama.cpp model."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
def build_config(self):
return {
"grammar": {"display_name": "Grammar", "advanced": True},
"cache": {"display_name": "Cache", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"echo": {"display_name": "Echo", "advanced": True},
"f16_kv": {"display_name": "F16 KV", "advanced": True},
"grammar_path": {"display_name": "Grammar Path", "advanced": True},
"last_n_tokens_size": {"display_name": "Last N Tokens Size", "advanced": True},
"logits_all": {"display_name": "Logits All", "advanced": True},
"logprobs": {"display_name": "Logprobs", "advanced": True},
"lora_base": {"display_name": "Lora Base", "advanced": True},
"lora_path": {"display_name": "Lora Path", "advanced": True},
"max_tokens": {"display_name": "Max Tokens", "advanced": True},
"metadata": {"display_name": "Metadata", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"model_path": {
"display_name": "Model Path",
"field_type": "file",
"file_types": [".bin"],
"required": True,
},
"n_batch": {"display_name": "N Batch", "advanced": True},
"n_ctx": {"display_name": "N Ctx", "advanced": True},
"n_gpu_layers": {"display_name": "N GPU Layers", "advanced": True},
"n_parts": {"display_name": "N Parts", "advanced": True},
"n_threads": {"display_name": "N Threads", "advanced": True},
"repeat_penalty": {"display_name": "Repeat Penalty", "advanced": True},
"rope_freq_base": {"display_name": "Rope Freq Base", "advanced": True},
"rope_freq_scale": {"display_name": "Rope Freq Scale", "advanced": True},
"seed": {"display_name": "Seed", "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "advanced": True},
"suffix": {"display_name": "Suffix", "advanced": True},
"tags": {"display_name": "Tags", "advanced": True},
"temperature": {"display_name": "Temperature"},
"top_k": {"display_name": "Top K", "advanced": True},
"top_p": {"display_name": "Top P", "advanced": True},
"use_mlock": {"display_name": "Use Mlock", "advanced": True},
"use_mmap": {"display_name": "Use Mmap", "advanced": True},
"verbose": {"display_name": "Verbose", "advanced": True},
"vocab_only": {"display_name": "Vocab Only", "advanced": True},
}
def build(
self,
model_path: str,
grammar: Optional[str] = None,
cache: Optional[bool] = None,
client: Optional[Any] = None,
echo: Optional[bool] = False,
f16_kv: bool = True,
grammar_path: Optional[str] = None,
last_n_tokens_size: Optional[int] = 64,
logits_all: bool = False,
logprobs: Optional[int] = None,
lora_base: Optional[str] = None,
lora_path: Optional[str] = None,
max_tokens: Optional[int] = 256,
metadata: Optional[Dict] = None,
model_kwargs: Dict = {},
n_batch: Optional[int] = 8,
n_ctx: int = 512,
n_gpu_layers: Optional[int] = 1,
n_parts: int = -1,
n_threads: Optional[int] = 1,
repeat_penalty: Optional[float] = 1.1,
rope_freq_base: float = 10000.0,
rope_freq_scale: float = 1.0,
seed: int = -1,
stop: Optional[List[str]] = [],
streaming: bool = True,
suffix: Optional[str] = "",
tags: Optional[List[str]] = [],
temperature: Optional[float] = 0.8,
top_k: Optional[int] = 40,
top_p: Optional[float] = 0.95,
use_mlock: bool = False,
use_mmap: Optional[bool] = True,
verbose: bool = True,
vocab_only: bool = False,
) -> LlamaCpp:
return LlamaCpp(
model_path=model_path,
grammar=grammar,
cache=cache,
client=client,
echo=echo,
f16_kv=f16_kv,
grammar_path=grammar_path,
last_n_tokens_size=last_n_tokens_size,
logits_all=logits_all,
logprobs=logprobs,
lora_base=lora_base,
lora_path=lora_path,
max_tokens=max_tokens,
metadata=metadata,
model_kwargs=model_kwargs,
n_batch=n_batch,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_parts=n_parts,
n_threads=n_threads,
repeat_penalty=repeat_penalty,
rope_freq_base=rope_freq_base,
rope_freq_scale=rope_freq_scale,
seed=seed,
stop=stop,
streaming=streaming,
suffix=suffix,
tags=tags,
temperature=temperature,
top_k=top_k,
top_p=top_p,
use_mlock=use_mlock,
use_mmap=use_mmap,
verbose=verbose,
vocab_only=vocab_only,
)

View file

@ -1,7 +1,7 @@
from typing import List, Optional
from typing import Optional, List
from langchain.llms import Ollama
from langchain.llms.base import BaseLLM
from langchain_community.llms.ollama import Ollama
from langflow import CustomComponent
@ -133,25 +133,30 @@ class OllamaLLM(CustomComponent):
mirostat_eta = None
mirostat_tau = None
try:
llm = Ollama(
base_url=base_url,
model=model,
mirostat=mirostat_value,
mirostat_eta=mirostat_eta,
mirostat_tau=mirostat_tau,
num_ctx=num_ctx,
num_gpu=num_gpu,
num_thread=num_thread,
repeat_last_n=repeat_last_n,
repeat_penalty=repeat_penalty,
temperature=temperature,
stop=stop,
tfs_z=tfs_z,
top_k=top_k,
top_p=top_p,
)
llm_params = {
"base_url": base_url,
"model": model,
"mirostat": mirostat_value,
"mirostat_eta": mirostat_eta,
"mirostat_tau": mirostat_tau,
"num_ctx": num_ctx,
"num_gpu": num_gpu,
"num_thread": num_thread,
"repeat_last_n": repeat_last_n,
"repeat_penalty": repeat_penalty,
"temperature": temperature,
"stop": stop,
"tfs_z": tfs_z,
"top_k": top_k,
"top_p": top_p,
}
# None Value remove
llm_params = {k: v for k, v in llm_params.items() if v is not None}
try:
llm = Ollama(**llm_params)
except Exception as e:
raise ValueError("Could not connect to Ollama.") from e

View file

@ -1,147 +0,0 @@
from langflow import CustomComponent
from langchain.llms import BaseLLM
from typing import Optional, Union, Callable, Dict
from langchain_community.llms.vertexai import VertexAI
class VertexAIComponent(CustomComponent):
display_name = "VertexAI"
description = "Google Vertex AI large language models"
def build_config(self):
return {
"credentials": {
"display_name": "Credentials",
"field_type": "file",
"file_types": [".json"],
"required": False,
"value": None,
},
"location": {
"display_name": "Location",
"type": "str",
"advanced": True,
"value": "us-central1",
"required": False,
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"field_type": "int",
"value": 128,
"required": False,
"advanced": True,
},
"max_retries": {
"display_name": "Max Retries",
"type": "int",
"value": 6,
"required": False,
"advanced": True,
},
"metadata": {
"display_name": "Metadata",
"field_type": "dict",
"required": False,
"default": {},
},
"model_name": {
"display_name": "Model Name",
"type": "str",
"value": "text-bison",
"required": False,
},
"n": {
"advanced": True,
"display_name": "N",
"field_type": "int",
"value": 1,
"required": False,
},
"project": {
"display_name": "Project",
"type": "str",
"required": False,
"default": None,
},
"request_parallelism": {
"display_name": "Request Parallelism",
"field_type": "int",
"value": 5,
"required": False,
"advanced": True,
},
"streaming": {
"display_name": "Streaming",
"field_type": "bool",
"value": False,
"required": False,
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.0,
"required": False,
"advanced": True,
},
"top_k": {"display_name": "Top K", "type": "int", "default": 40, "required": False, "advanced": True},
"top_p": {
"display_name": "Top P",
"field_type": "float",
"value": 0.95,
"required": False,
"advanced": True,
},
"tuned_model_name": {
"display_name": "Tuned Model Name",
"type": "str",
"required": False,
"value": None,
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"field_type": "bool",
"value": False,
"required": False,
},
"name": {"display_name": "Name", "field_type": "str"},
}
def build(
self,
credentials: Optional[str] = None,
location: str = "us-central1",
max_output_tokens: int = 128,
max_retries: int = 6,
metadata: Dict = {},
model_name: str = "text-bison",
n: int = 1,
name: Optional[str] = None,
project: Optional[str] = None,
request_parallelism: int = 5,
streaming: bool = False,
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
tuned_model_name: Optional[str] = None,
verbose: bool = False,
) -> Union[BaseLLM, Callable]:
return VertexAI(
credentials=credentials,
location=location,
max_output_tokens=max_output_tokens,
max_retries=max_retries,
metadata=metadata,
model_name=model_name,
n=n,
name=name,
project=project,
request_parallelism=request_parallelism,
streaming=streaming,
temperature=temperature,
top_k=top_k,
top_p=top_p,
tuned_model_name=tuned_model_name,
verbose=verbose,
)

View file

@ -1,49 +0,0 @@
from typing import Callable, Optional, Union
from langchain.retrievers import MultiQueryRetriever
from langflow import CustomComponent
from langflow.field_typing import BaseLLM, BaseRetriever, PromptTemplate
class MultiQueryRetrieverComponent(CustomComponent):
display_name = "MultiQueryRetriever"
description = "Initialize from llm using default template."
documentation = "https://python.langchain.com/docs/modules/data_connection/retrievers/how_to/MultiQueryRetriever"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"prompt": {
"display_name": "Prompt",
"default": {
"input_variables": ["question"],
"input_types": {},
"output_parser": None,
"partial_variables": {},
"template": "You are an AI language model assistant. Your task is \n"
"to generate 3 different versions of the given user \n"
"question to retrieve relevant documents from a vector database. \n"
"By generating multiple perspectives on the user question, \n"
"your goal is to help the user overcome some of the limitations \n"
"of distance-based similarity search. Provide these alternative \n"
"questions separated by newlines. Original question: {question}",
"template_format": "f-string",
"validate_template": False,
"_type": "prompt",
},
},
"retriever": {"display_name": "Retriever"},
"parser_key": {"display_name": "Parser Key", "default": "lines"},
}
def build(
self,
llm: BaseLLM,
retriever: BaseRetriever,
prompt: Optional[PromptTemplate] = None,
parser_key: str = "lines",
) -> Union[Callable, MultiQueryRetriever]:
if not prompt:
return MultiQueryRetriever.from_llm(llm=llm, retriever=retriever, parser_key=parser_key)
else:
return MultiQueryRetriever.from_llm(llm=llm, retriever=retriever, prompt=prompt, parser_key=parser_key)

View file

@ -15,21 +15,29 @@ class VectaraSelfQueryRetriverComponent(CustomComponent):
display_name: str = "Vectara Self Query Retriever for Vectara Vector Store"
description: str = "Implementation of Vectara Self Query Retriever"
documentation = "https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query"
documentation = (
"https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query"
)
beta = True
field_config = {
"code": {"show": True},
"vectorstore": {"display_name": "Vector Store", "info": "Input Vectara Vectore Store"},
"llm": {"display_name": "LLM", "info": "For self query retriever"},
"document_content_description": {
"display_name": "Document Content Description",
"vectorstore": {
"display_name": "Vector Store",
"info": "Input Vectara Vectore Store"
},
"llm": {
"display_name": "LLM",
"info": "For self query retriever"
},
"document_content_description":{
"display_name": "Document Content Description",
"info": "For self query retriever",
},
},
"metadata_field_info": {
"display_name": "Metadata Field Info",
"info": 'Each metadata field info is a string in the form of key value pair dictionary containing additional search metadata.\nExample input: {"name":"speech","description":"what name of the speech","type":"string or list[string]"}.\nThe keys should remain constant(name, description, type)',
},
"display_name": "Metadata Field Info",
"info": "Each metadata field info is a string in the form of key value pair dictionary containing additional search metadata.\nExample input: {\"name\":\"speech\",\"description\":\"what name of the speech\",\"type\":\"string or list[string]\"}.\nThe keys should remain constant(name, description, type)",
},
}
def build(
@ -39,19 +47,24 @@ class VectaraSelfQueryRetriverComponent(CustomComponent):
llm: BaseLanguageModel,
metadata_field_info: List[str],
) -> BaseRetriever:
metadata_field_obj = []
for meta in metadata_field_info:
meta_obj = json.loads(meta)
if "name" not in meta_obj or "description" not in meta_obj or "type" not in meta_obj:
raise Exception("Incorrect metadata field info format.")
if 'name' not in meta_obj or 'description' not in meta_obj or 'type' not in meta_obj :
raise Exception('Incorrect metadata field info format.')
attribute_info = AttributeInfo(
name=meta_obj["name"],
description=meta_obj["description"],
type=meta_obj["type"],
name = meta_obj['name'],
description = meta_obj['description'],
type = meta_obj['type'],
)
metadata_field_obj.append(attribute_info)
return SelfQueryRetriever.from_llm(
llm, vectorstore, document_content_description, metadata_field_obj, verbose=True
)
llm,
vectorstore,
document_content_description,
metadata_field_obj,
verbose=True
)

View file

@ -1,30 +0,0 @@
from langflow import CustomComponent
from langchain.text_splitter import CharacterTextSplitter
from langchain_core.documents.base import Document
from typing import List
class CharacterTextSplitterComponent(CustomComponent):
display_name = "CharacterTextSplitter"
description = "Splitting text that looks at characters."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"chunk_overlap": {"display_name": "Chunk Overlap", "default": 200},
"chunk_size": {"display_name": "Chunk Size", "default": 1000},
"separator": {"display_name": "Separator", "default": "\n"},
}
def build(
self,
documents: List[Document],
chunk_overlap: int = 200,
chunk_size: int = 1000,
separator: str = "\n",
) -> List[Document]:
return CharacterTextSplitter(
chunk_overlap=chunk_overlap,
chunk_size=chunk_size,
separator=separator,
).split_documents(documents)

View file

@ -1,9 +1,7 @@
from typing import Optional
from langchain.text_splitter import Language
from langchain_core.documents import Document
from langflow import CustomComponent
from langchain.text_splitter import Language
from langchain.schema import Document
class LanguageRecursiveTextSplitterComponent(CustomComponent):
@ -50,7 +48,7 @@ class LanguageRecursiveTextSplitterComponent(CustomComponent):
documents: list[Document],
chunk_size: Optional[int] = 1000,
chunk_overlap: Optional[int] = 200,
separator_type: str = "Python",
separator_type: Optional[str] = "Python",
) -> list[Document]:
"""
Split text into chunks of a specified length.

View file

@ -1,10 +1,7 @@
from typing import Optional
from langchain_core.documents import Document
from langflow import CustomComponent
from langchain.schema import Document
from langflow.utils.util import build_loader_repr_from_documents
from langchain.text_splitter import RecursiveCharacterTextSplitter
class RecursiveCharacterTextSplitterComponent(CustomComponent):
@ -57,6 +54,7 @@ class RecursiveCharacterTextSplitterComponent(CustomComponent):
Returns:
list[str]: The chunks of text.
"""
from langchain.text_splitter import RecursiveCharacterTextSplitter
if separators == "":
separators = None

View file

@ -1,16 +0,0 @@
from langflow import CustomComponent
from langchain_community.tools.json.tool import JsonSpec
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonToolkitComponent(CustomComponent):
display_name = "JsonToolkit"
description = "Toolkit for interacting with a JSON spec."
def build_config(self):
return {
"spec": {"display_name": "Spec", "type": JsonSpec},
}
def build(self, spec: JsonSpec) -> JsonToolkit:
return JsonToolkit(spec=spec)

View file

@ -1,23 +0,0 @@
from langflow import CustomComponent
from langflow.field_typing import AgentExecutor
from typing import Callable
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
class OpenAPIToolkitComponent(CustomComponent):
display_name = "OpenAPIToolkit"
description = "Toolkit for interacting with an OpenAPI API."
def build_config(self):
return {
"json_agent": {"display_name": "JSON Agent"},
"requests_wrapper": {"display_name": "Text Requests Wrapper"},
}
def build(
self,
json_agent: AgentExecutor,
requests_wrapper: TextRequestsWrapper,
) -> Callable:
return OpenAPIToolkit(json_agent=json_agent, requests_wrapper=requests_wrapper)

View file

@ -1,26 +0,0 @@
from typing import Callable, Union
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
from langchain_community.vectorstores import VectorStore
from langflow import CustomComponent
class VectorStoreInfoComponent(CustomComponent):
display_name = "VectorStoreInfo"
description = "Information about a VectorStore"
def build_config(self):
return {
"vectorstore": {"display_name": "VectorStore"},
"description": {"display_name": "Description", "multiline": True},
"name": {"display_name": "Name"},
}
def build(
self,
vectorstore: VectorStore,
description: str,
name: str,
) -> Union[VectorStoreInfo, Callable]:
return VectorStoreInfo(vectorstore=vectorstore, description=description, name=name)

View file

@ -1,23 +0,0 @@
from langflow import CustomComponent
from typing import List, Union
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
from langflow.field_typing import BaseLanguageModel, Tool
class VectorStoreRouterToolkitComponent(CustomComponent):
display_name = "VectorStoreRouterToolkit"
description = "Toolkit for routing between Vector Stores."
def build_config(self):
return {
"vectorstores": {"display_name": "Vector Stores"},
"llm": {"display_name": "LLM"},
}
def build(
self, vectorstores: List[VectorStoreInfo], llm: BaseLanguageModel
) -> Union[Tool, VectorStoreRouterToolkit]:
print("vectorstores", vectorstores)
print("llm", llm)
return VectorStoreRouterToolkit(vectorstores=vectorstores, llm=llm)

View file

@ -1,28 +0,0 @@
from langflow import CustomComponent
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreToolkit
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
from langflow.field_typing import (
BaseLanguageModel,
)
from langflow.field_typing import (
Tool,
)
from typing import Union
class VectorStoreToolkitComponent(CustomComponent):
display_name = "VectorStoreToolkit"
description = "Toolkit for interacting with a Vector Store."
def build_config(self):
return {
"vectorstore_info": {"display_name": "Vector Store Info"},
"llm": {"display_name": "LLM"},
}
def build(
self,
vectorstore_info: VectorStoreInfo,
llm: BaseLanguageModel,
) -> Union[Tool, VectorStoreToolkit]:
return VectorStoreToolkit(vectorstore_info=vectorstore_info, llm=llm)

View file

@ -1,31 +0,0 @@
from langflow import CustomComponent
# Assuming `BingSearchAPIWrapper` is a class that exists in the context
# and has the appropriate methods and attributes.
# We need to make sure this class is importable from the context where this code will be running.
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
class BingSearchAPIWrapperComponent(CustomComponent):
display_name = "BingSearchAPIWrapper"
description = "Wrapper for Bing Search API."
def build_config(self):
return {
"bing_search_url": {"display_name": "Bing Search URL"},
"bing_subscription_key": {
"display_name": "Bing Subscription Key",
"password": True,
},
"k": {"display_name": "Number of results", "advanced": True},
# 'k' is not included as it is not shown (show=False)
}
def build(
self,
bing_search_url: str,
bing_subscription_key: str,
k: int = 10,
) -> BingSearchAPIWrapper:
# 'k' has a default value and is not shown (show=False), so it is hardcoded here
return BingSearchAPIWrapper(bing_search_url=bing_search_url, bing_subscription_key=bing_subscription_key, k=k)

View file

@ -1,9 +1,8 @@
from typing import Optional
import requests
from langchain_core.documents import Document
from langflow import CustomComponent
from langchain.schema import Document
from langflow.services.database.models.base import orjson_dumps
import requests
from typing import Optional
class GetRequest(CustomComponent):

View file

@ -1,21 +0,0 @@
from langflow import CustomComponent
from typing import Union, Callable
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
class GoogleSearchAPIWrapperComponent(CustomComponent):
display_name = "GoogleSearchAPIWrapper"
description = "Wrapper for Google Search API."
def build_config(self):
return {
"google_api_key": {"display_name": "Google API Key", "password": True},
"google_cse_id": {"display_name": "Google CSE ID", "password": True},
}
def build(
self,
google_api_key: str,
google_cse_id: str,
) -> Union[GoogleSearchAPIWrapper, Callable]:
return GoogleSearchAPIWrapper(google_api_key=google_api_key, google_cse_id=google_cse_id)

View file

@ -1,47 +0,0 @@
from langflow import CustomComponent
from typing import Dict, Optional
# Assuming the existence of GoogleSerperAPIWrapper class in the serper module
# If this class does not exist, you would need to create it or import the appropriate class from another module
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
class GoogleSerperAPIWrapperComponent(CustomComponent):
display_name = "GoogleSerperAPIWrapper"
description = "Wrapper around the Serper.dev Google Search API."
def build_config(self) -> Dict[str, Dict]:
return {
"result_key_for_type": {
"display_name": "Result Key for Type",
"show": True,
"multiline": False,
"password": False,
"name": "result_key_for_type",
"advanced": False,
"dynamic": False,
"info": "",
"field_type": "dict",
"list": False,
"value": {"news": "news", "places": "places", "images": "images", "search": "organic"},
},
"serper_api_key": {
"display_name": "Serper API Key",
"show": True,
"multiline": False,
"password": True,
"name": "serper_api_key",
"advanced": False,
"dynamic": False,
"info": "",
"type": "str",
"list": False,
},
}
def build(
self,
serper_api_key: str,
result_key_for_type: Optional[Dict[str, str]] = None,
) -> GoogleSerperAPIWrapper:
return GoogleSerperAPIWrapper(result_key_for_type=result_key_for_type, serper_api_key=serper_api_key)

View file

@ -11,7 +11,7 @@
# - **Document:** The Document containing the JSON object.
from langchain_core.documents import Document
from langchain.schema import Document
from langflow import CustomComponent
from langflow.services.database.models.base import orjson_dumps

View file

@ -1,9 +1,8 @@
from typing import Optional
import requests
from langchain_core.documents import Document
from langflow import CustomComponent
from langchain.schema import Document
from langflow.services.database.models.base import orjson_dumps
import requests
from typing import Optional
class PostRequest(CustomComponent):

View file

@ -1,33 +0,0 @@
from langflow import CustomComponent
from typing import Optional, Dict
from langchain_community.utilities.searx_search import SearxSearchWrapper
class SearxSearchWrapperComponent(CustomComponent):
display_name = "SearxSearchWrapper"
description = "Wrapper for Searx API."
def build_config(self):
return {
"headers": {
"field_type": "dict",
"display_name": "Headers",
"multiline": True,
"value": '{"Authorization": "Bearer <token>"}',
},
"k": {"display_name": "k", "advanced": True, "field_type": "int", "value": 10},
"searx_host": {
"display_name": "Searx Host",
"field_type": "str",
"value": "https://searx.example.com",
"advanced": True,
},
}
def build(
self,
k: int = 10,
headers: Optional[Dict[str, str]] = None,
searx_host: str = "https://searx.example.com",
) -> SearxSearchWrapper:
return SearxSearchWrapper(headers=headers, k=k, searx_host=searx_host)

View file

@ -1,31 +0,0 @@
from typing import Callable, Union
from langchain_community.utilities.serpapi import SerpAPIWrapper
from langflow import CustomComponent
class SerpAPIWrapperComponent(CustomComponent):
display_name = "SerpAPIWrapper"
description = "Wrapper around SerpAPI"
def build_config(self):
return {
"serpapi_api_key": {"display_name": "SerpAPI API Key", "type": "str", "password": True},
"params": {
"display_name": "Parameters",
"type": "dict",
"advanced": True,
"multiline": True,
"value": '{"engine": "google","google_domain": "google.com","gl": "us","hl": "en"}',
},
}
def build(
self,
serpapi_api_key: str,
params: dict,
) -> Union[SerpAPIWrapper, Callable]: # Removed quotes around SerpAPIWrapper
return SerpAPIWrapper( # type: ignore
serpapi_api_key=serpapi_api_key,
params=params,
)

View file

@ -1,8 +1,7 @@
from typing import List, Optional
import requests
from langchain_core.documents import Document
from langflow import CustomComponent
from langchain.schema import Document
from langflow.services.database.models.base import orjson_dumps

View file

@ -1,30 +0,0 @@
from typing import Callable, Union
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
from langflow import CustomComponent
# Assuming WikipediaAPIWrapper is a class that needs to be imported.
# The import statement is not included as it is not provided in the JSON
# and the actual implementation details are unknown.
class WikipediaAPIWrapperComponent(CustomComponent):
display_name = "WikipediaAPIWrapper"
description = "Wrapper around WikipediaAPI."
def build_config(self):
return {}
def build(
self,
top_k_results: int = 3,
lang: str = "en",
load_all_available_meta: bool = False,
doc_content_chars_max: int = 4000,
) -> Union[WikipediaAPIWrapper, Callable]:
return WikipediaAPIWrapper( # type: ignore
top_k_results=top_k_results,
lang=lang,
load_all_available_meta=load_all_available_meta,
doc_content_chars_max=doc_content_chars_max,
)

View file

@ -1,18 +0,0 @@
from typing import Callable, Union
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langflow import CustomComponent
# Since all the fields in the JSON have show=False, we will only create a basic component
# without any configurable fields.
class WolframAlphaAPIWrapperComponent(CustomComponent):
display_name = "WolframAlphaAPIWrapper"
description = "Wrapper for Wolfram Alpha."
def build_config(self):
return {"appid": {"display_name": "App ID", "type": "str", "password": True}}
def build(self, appid: str) -> Union[Callable, WolframAlphaAPIWrapper]:
return WolframAlphaAPIWrapper(wolfram_alpha_appid=appid) # type: ignore

View file

@ -3,8 +3,9 @@ from typing import List, Optional, Union
import chromadb # type: ignore
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.chroma import Chroma
from langchain.vectorstores import Chroma
from langchain.vectorstores.base import VectorStore
from langflow import CustomComponent

View file

@ -1,26 +0,0 @@
from typing import List, Union
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.faiss import FAISS
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings
class FAISSComponent(CustomComponent):
display_name = "FAISS"
description = "Construct FAISS wrapper from raw documents."
documentation = "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
}
def build(
self,
embedding: Embeddings,
documents: List[Document],
) -> Union[VectorStore, FAISS, BaseRetriever]:
return FAISS.from_documents(documents=documents, embedding=embedding)

View file

@ -1,47 +0,0 @@
from typing import List, Optional
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langflow import CustomComponent
from langflow.field_typing import (
Document,
Embeddings,
NestedDict,
)
class MongoDBAtlasComponent(CustomComponent):
display_name = "MongoDB Atlas"
description = "Construct a `MongoDB Atlas Vector Search` vector store from raw documents."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"collection_name": {"display_name": "Collection Name"},
"db_name": {"display_name": "Database Name"},
"index_name": {"display_name": "Index Name"},
"mongodb_atlas_cluster_uri": {"display_name": "MongoDB Atlas Cluster URI"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
}
def build(
self,
documents: List[Document],
embedding: Embeddings,
collection_name: str = "",
db_name: str = "",
index_name: str = "",
mongodb_atlas_cluster_uri: str = "",
search_kwargs: Optional[NestedDict] = None,
) -> MongoDBAtlasVectorSearch:
search_kwargs = search_kwargs or {}
return MongoDBAtlasVectorSearch(
documents=documents,
embedding=embedding,
collection_name=collection_name,
db_name=db_name,
index_name=index_name,
mongodb_atlas_cluster_uri=mongodb_atlas_cluster_uri,
search_kwargs=search_kwargs,
)

View file

@ -1,62 +0,0 @@
import os
from typing import List, Optional, Union
import pinecone # type: ignore
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.pinecone import Pinecone
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings
class PineconeComponent(CustomComponent):
display_name = "Pinecone"
description = "Construct Pinecone wrapper from raw documents."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"index_name": {"display_name": "Index Name"},
"namespace": {"display_name": "Namespace"},
"pinecone_api_key": {"display_name": "Pinecone API Key", "default": "", "password": True, "required": True},
"pinecone_env": {"display_name": "Pinecone Environment", "default": "", "required": True},
"search_kwargs": {"display_name": "Search Kwargs", "default": "{}"},
"pool_threads": {"display_name": "Pool Threads", "default": 1, "advanced": True},
}
def build(
self,
embedding: Embeddings,
pinecone_env: str,
documents: List[Document],
index_name: Optional[str] = None,
pinecone_api_key: Optional[str] = None,
text_key: Optional[str] = "text",
namespace: Optional[str] = "default",
pool_threads: Optional[int] = None,
) -> Union[VectorStore, Pinecone, BaseRetriever]:
if pinecone_api_key is None or pinecone_env is None:
raise ValueError("Pinecone API Key and Environment are required.")
if os.getenv("PINECONE_API_KEY") is None and pinecone_api_key is None:
raise ValueError("Pinecone API Key is required.")
pinecone.init(api_key=pinecone_api_key, environment=pinecone_env) # type: ignore
if documents:
return Pinecone.from_documents(
documents=documents,
embedding=embedding,
index_name=index_name,
pool_threads=pool_threads,
namespace=namespace,
text_key=text_key,
)
return Pinecone.from_existing_index(
index_name=index_name,
embedding=embedding,
text_key=text_key,
namespace=namespace,
pool_threads=pool_threads,
)

View file

@ -1,76 +0,0 @@
from typing import List, Optional, Union
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.qdrant import Qdrant
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings, NestedDict
class QdrantComponent(CustomComponent):
display_name = "Qdrant"
description = "Construct Qdrant wrapper from a list of texts."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"api_key": {"display_name": "API Key", "password": True},
"collection_name": {"display_name": "Collection Name"},
"content_payload_key": {"display_name": "Content Payload Key", "advanced": True},
"distance_func": {"display_name": "Distance Function", "advanced": True},
"grpc_port": {"display_name": "gRPC Port", "advanced": True},
"host": {"display_name": "Host", "advanced": True},
"https": {"display_name": "HTTPS", "advanced": True},
"location": {"display_name": "Location", "advanced": True},
"metadata_payload_key": {"display_name": "Metadata Payload Key", "advanced": True},
"path": {"display_name": "Path", "advanced": True},
"port": {"display_name": "Port", "advanced": True},
"prefer_grpc": {"display_name": "Prefer gRPC", "advanced": True},
"prefix": {"display_name": "Prefix", "advanced": True},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"timeout": {"display_name": "Timeout", "advanced": True},
"url": {"display_name": "URL", "advanced": True},
}
def build(
self,
embedding: Embeddings,
documents: List[Document],
api_key: Optional[str] = None,
collection_name: Optional[str] = None,
content_payload_key: str = "page_content",
distance_func: str = "Cosine",
grpc_port: Optional[int] = 6334,
host: Optional[str] = None,
https: bool = False,
location: str = ":memory:",
metadata_payload_key: str = "metadata",
path: Optional[str] = None,
port: Optional[int] = 6333,
prefer_grpc: bool = False,
prefix: Optional[str] = None,
search_kwargs: Optional[NestedDict] = None,
timeout: Optional[float] = None,
url: Optional[str] = None,
) -> Union[VectorStore, Qdrant, BaseRetriever]:
return Qdrant.from_documents(
documents=documents,
embedding=embedding,
api_key=api_key,
collection_name=collection_name,
content_payload_key=content_payload_key,
distance_func=distance_func,
grpc_port=grpc_port,
host=host,
https=https,
location=location,
metadata_payload_key=metadata_payload_key,
path=path,
port=port,
prefer_grpc=prefer_grpc,
prefix=prefix,
search_kwargs=search_kwargs,
timeout=timeout,
url=url,
)

View file

@ -1,13 +1,11 @@
from typing import Optional, Union
from langchain.embeddings.base import Embeddings
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.redis import Redis
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from typing import Optional
from langflow import CustomComponent
from langchain.vectorstores.redis import Redis
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.embeddings.base import Embeddings
class RedisComponent(CustomComponent):
"""
@ -44,7 +42,7 @@ class RedisComponent(CustomComponent):
redis_server_url: str,
redis_index_name: str,
documents: Optional[Document] = None,
) -> Union[VectorStore, BaseRetriever]:
) -> VectorStore:
"""
Builds the Vector Store or BaseRetriever object.
@ -57,19 +55,10 @@ class RedisComponent(CustomComponent):
Returns:
- VectorStore: The Vector Store object.
"""
if documents is None:
redis_vs = Redis.from_existing_index(
embedding=embedding,
index_name=redis_index_name,
schema=None,
key_prefix=None,
redis_url=redis_server_url,
)
else:
redis_vs = Redis.from_documents(
documents=documents, # type: ignore
embedding=embedding,
redis_url=redis_server_url,
index_name=redis_index_name,
)
return redis_vs
return Redis.from_documents(
documents=documents, # type: ignore
embedding=embedding,
redis_url=redis_server_url,
index_name=redis_index_name,
)

View file

@ -1,44 +0,0 @@
from typing import List, Union
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.supabase import SupabaseVectorStore
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings, NestedDict
from supabase.client import Client, create_client
class SupabaseComponent(CustomComponent):
display_name = "Supabase"
description = "Return VectorStore initialized from texts and embeddings."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"query_name": {"display_name": "Query Name"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"supabase_service_key": {"display_name": "Supabase Service Key"},
"supabase_url": {"display_name": "Supabase URL"},
"table_name": {"display_name": "Table Name", "advanced": True},
}
def build(
self,
embedding: Embeddings,
documents: List[Document],
query_name: str = "",
search_kwargs: NestedDict = {},
supabase_service_key: str = "",
supabase_url: str = "",
table_name: str = "",
) -> Union[VectorStore, SupabaseVectorStore, BaseRetriever]:
supabase: Client = create_client(supabase_url, supabase_key=supabase_service_key)
return SupabaseVectorStore.from_documents(
documents=documents,
embedding=embedding,
query_name=query_name,
search_kwargs=search_kwargs,
client=supabase,
table_name=table_name,
)

View file

@ -1,14 +1,14 @@
import tempfile
import urllib
import urllib.request
from typing import List, Optional, Union
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores.vectara import Vectara
from langchain_core.vectorstores import VectorStore
from typing import Optional, Union, List
from langflow import CustomComponent
from langflow.field_typing import BaseRetriever, Document
import tempfile
import urllib.request
import urllib
from langchain.vectorstores import Vectara
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.schema import BaseRetriever
from langchain.embeddings import FakeEmbeddings
class VectaraComponent(CustomComponent):
@ -19,18 +19,25 @@ class VectaraComponent(CustomComponent):
field_config = {
"vectara_customer_id": {
"display_name": "Vectara Customer ID",
"required": True,
},
"vectara_corpus_id": {
"display_name": "Vectara Corpus ID",
"required": True,
},
"vectara_api_key": {
"display_name": "Vectara API Key",
"password": True,
"required": True,
},
"code": {"show": False},
"documents": {
"display_name": "Documents",
"info": "Pass in either for Self Query Retriever or for making a Vectara Object",
},
"documents": {"display_name": "Documents", "info": "If provided, will be upserted to corpus (optional)"},
"files_url": {
"display_name": "Files Url",
"info": "Make vectara object using url of files (optional)",
"info": "Make vectara object using url of files(documents not needed)",
},
}
@ -42,8 +49,6 @@ class VectaraComponent(CustomComponent):
files_url: Optional[List[str]] = None,
documents: Optional[Document] = None,
) -> Union[VectorStore, BaseRetriever]:
source = "Langflow"
if documents is not None:
return Vectara.from_documents(
documents=documents,
@ -51,7 +56,6 @@ class VectaraComponent(CustomComponent):
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key,
source=source,
)
if files_url is not None:
@ -67,12 +71,10 @@ class VectaraComponent(CustomComponent):
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key,
source=source,
)
return Vectara(
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key,
source=source,
)

View file

@ -1,12 +1,13 @@
from typing import Optional, Union
import weaviate # type: ignore
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain_community.vectorstores import VectorStore, Weaviate
from typing import Optional, Union
from langflow import CustomComponent
from langchain.vectorstores import Weaviate
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.schema import BaseRetriever
from langchain.embeddings.base import Embeddings
class WeaviateVectorStore(CustomComponent):
display_name: str = "Weaviate"
@ -44,7 +45,7 @@ class WeaviateVectorStore(CustomComponent):
search_by_text: bool = False,
api_key: Optional[str] = None,
index_name: Optional[str] = None,
text_key: str = "text",
text_key: Optional[str] = "text",
embedding: Optional[Embeddings] = None,
documents: Optional[Document] = None,
attributes: Optional[list] = None,

View file

@ -1,15 +1,13 @@
from typing import Optional, Union
from langchain.embeddings.base import Embeddings
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.pgvector import PGVector
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from typing import Optional, List
from langflow import CustomComponent
from langchain.vectorstores.pgvector import PGVector
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.embeddings.base import Embeddings
class PGVectorComponent(CustomComponent):
class PostgresqlVectorComponent(CustomComponent):
"""
A custom component for implementing a Vector Store using PostgreSQL.
"""
@ -17,6 +15,7 @@ class PGVectorComponent(CustomComponent):
display_name: str = "PGVector"
description: str = "Implementation of Vector Store using PostgreSQL"
documentation = "https://python.langchain.com/docs/integrations/vectorstores/pgvector"
beta = True
def build_config(self):
"""
@ -26,7 +25,8 @@ class PGVectorComponent(CustomComponent):
- dict: A dictionary containing the configuration options for the component.
"""
return {
"code": {"show": False},
"index_name": {"display_name": "Index Name", "value": "your_index"},
"code": {"show": True, "display_name": "Code"},
"documents": {"display_name": "Documents", "is_list": True},
"embedding": {"display_name": "Embedding"},
"pg_server_url": {
@ -41,8 +41,8 @@ class PGVectorComponent(CustomComponent):
embedding: Embeddings,
pg_server_url: str,
collection_name: str,
documents: Optional[Document] = None,
) -> Union[VectorStore, BaseRetriever]:
documents: Optional[List[Document]] = None,
) -> VectorStore:
"""
Builds the Vector Store or BaseRetriever object.
@ -58,13 +58,13 @@ class PGVectorComponent(CustomComponent):
try:
if documents is None:
vector_store = PGVector.from_existing_index(
return PGVector.from_existing_index(
embedding=embedding,
collection_name=collection_name,
connection_string=pg_server_url,
)
vector_store = PGVector.from_documents(
return PGVector.from_documents(
embedding=embedding,
documents=documents,
collection_name=collection_name,
@ -72,4 +72,3 @@ class PGVectorComponent(CustomComponent):
)
except Exception as e:
raise RuntimeError(f"Failed to build PGVector: {e}")
return vector_store

View file

@ -106,8 +106,6 @@ embeddings:
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/google_vertex_ai_palm"
AmazonBedrockEmbeddings:
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/bedrock"
OllamaEmbeddings:
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/ollama"
llms:
OpenAI:
@ -174,8 +172,6 @@ prompts:
textsplitters:
CharacterTextSplitter:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter"
RecursiveCharacterTextSplitter:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_transformers/recursive_text_splitter"
toolkits:
OpenAPIToolkit:
documentation: ""
@ -278,8 +274,6 @@ vectorstores:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
Pinecone:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/pinecone"
ElasticsearchStore:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/elasticsearch"
SupabaseVectorStore:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/supabase"
MongoDBAtlasVectorSearch:

View file

@ -12,7 +12,7 @@ from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.memory import BaseMemory
from langchain.text_splitter import TextSplitter
from langchain.tools import Tool
from langchain_community.vectorstores import VectorStore
from langchain.vectorstores.base import VectorStore
# Type alias for more complex dicts
NestedDict = Dict[str, Union[str, Dict]]

View file

@ -12,6 +12,7 @@ from langflow.interface.retrievers.base import retriever_creator
from langflow.interface.text_splitters.base import textsplitter_creator
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
from langflow.utils.lazy_load import LazyLoadDictBase
@ -45,7 +46,7 @@ class VertexTypesDict(LazyLoadDictBase):
**{t: types.LLMVertex for t in llm_creator.to_list()},
**{t: types.MemoryVertex for t in memory_creator.to_list()},
**{t: types.EmbeddingVertex for t in embedding_creator.to_list()},
# **{t: types.VectorStoreVertex for t in vectorstore_creator.to_list()},
**{t: types.VectorStoreVertex for t in vectorstore_creator.to_list()},
**{t: types.DocumentLoaderVertex for t in documentloader_creator.to_list()},
**{t: types.TextSplitterVertex for t in textsplitter_creator.to_list()},
**{t: types.OutputParserVertex for t in output_parser_creator.to_list()},

View file

@ -2,10 +2,14 @@ from typing import Any, Optional
from langchain.agents import AgentExecutor, ZeroShotAgent
from langchain.agents.agent_toolkits import (
SQLDatabaseToolkit,
VectorStoreInfo,
VectorStoreRouterToolkit,
VectorStoreToolkit,
)
from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
from langchain.agents.agent_toolkits.sql.prompt import SQL_PREFIX, SQL_SUFFIX
from langchain.agents.agent_toolkits.vectorstore.prompt import PREFIX as VECTORSTORE_PREFIX
from langchain.agents.agent_toolkits.vectorstore.prompt import ROUTER_PREFIX as VECTORSTORE_ROUTER_PREFIX
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
@ -13,14 +17,9 @@ from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.sql_database import SQLDatabase
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain_community.agent_toolkits import SQLDatabaseToolkit
from langchain_community.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
from langchain_community.agent_toolkits.sql.prompt import SQL_PREFIX, SQL_SUFFIX
from langchain_experimental.agents.agent_toolkits.pandas.prompt import PREFIX as PANDAS_PREFIX
from langchain_experimental.agents.agent_toolkits.pandas.prompt import SUFFIX_WITH_DF as PANDAS_SUFFIX
from langchain_experimental.tools.python.tool import PythonAstREPLTool
from langflow.interface.base import CustomAgentExecutor

View file

@ -67,9 +67,7 @@ Human: {input}
class MidJourneyPromptChain(BaseCustomConversationChain):
"""MidJourneyPromptChain is a chain you can use to generate new MidJourney prompts."""
template: Optional[
str
] = """I want you to act as a prompt generator for Midjourney's artificial intelligence program.
template: Optional[str] = """I want you to act as a prompt generator for Midjourney's artificial intelligence program.
Your job is to provide detailed and creative descriptions that will inspire unique and interesting images from the AI.
Keep in mind that the AI is capable of understanding a wide range of language and can interpret abstract concepts, so feel free to be as imaginative and descriptive as possible.
For example, you could describe a scene from a futuristic city, or a surreal landscape filled with strange creatures.
@ -83,9 +81,7 @@ class MidJourneyPromptChain(BaseCustomConversationChain):
class TimeTravelGuideChain(BaseCustomConversationChain):
template: Optional[
str
] = """I want you to act as my time travel guide. You are helpful and creative. I will provide you with the historical period or future time I want to visit and you will suggest the best events, sights, or people to experience. Provide the suggestions and any necessary information.
template: Optional[str] = """I want you to act as my time travel guide. You are helpful and creative. I will provide you with the historical period or future time I want to visit and you will suggest the best events, sights, or people to experience. Provide the suggestions and any necessary information.
Current conversation:
{history}
Human: {input}

View file

@ -2,7 +2,6 @@ import ast
import os
import zlib
from langflow.interface.custom.custom_component import CustomComponent
from loguru import logger
@ -67,18 +66,18 @@ class DirectoryReader:
def filter_loaded_components(self, data: dict, with_errors: bool) -> dict:
from langflow.interface.custom.utils import build_component
items = []
for menu in data["menu"]:
components = []
for component in menu["components"]:
try:
if component["error"] if with_errors else not component["error"]:
component_tuple = (*build_component(component), component)
components.append(component_tuple)
except Exception as e:
logger.error(f"Error while loading component: {e}")
continue
items.append({"name": menu["name"], "path": menu["path"], "components": components})
items = [
{
"name": menu["name"],
"path": menu["path"],
"components": [
(*build_component(component), component)
for component in menu["components"]
if (component["error"] if with_errors else not component["error"])
],
}
for menu in data["menu"]
]
filtered = [menu for menu in items if menu["components"]]
logger.debug(f'Filtered components {"with errors" if with_errors else ""}: {len(filtered)}')
return {"menu": filtered}
@ -246,18 +245,9 @@ class DirectoryReader:
else:
component_name_camelcase = component_name
if validation_result:
try:
output_types = self.get_output_types_from_code(result_content)
except Exception as exc:
logger.exception(f"Error while getting output types from code: {str(exc)}")
output_types = [component_name_camelcase]
else:
output_types = [component_name_camelcase]
component_info = {
"name": "CustomComponent",
"output_types": output_types,
"output_types": [component_name_camelcase],
"file": filename,
"code": result_content if validation_result else "",
"error": "" if validation_result else result_content,
@ -269,13 +259,3 @@ class DirectoryReader:
response["menu"].append(menu_result)
logger.debug("-------------------- Component menu list built --------------------")
return response
@staticmethod
def get_output_types_from_code(code: str) -> list:
"""
Get the output types from the code.
"""
custom_component = CustomComponent(code=code)
types_list = custom_component.get_function_entrypoint_return_type
# Get the name of types classes
return [type_.__name__ for type_ in types_list if hasattr(type_, "__name__")]

View file

@ -7,6 +7,8 @@ from typing import Any, Dict, List, Optional, Union
from uuid import UUID
from fastapi import HTTPException
from loguru import logger
from langflow.field_typing.range_spec import RangeSpec
from langflow.interface.custom.code_parser.utils import extract_inner_type
from langflow.interface.custom.custom_component import CustomComponent
@ -19,7 +21,6 @@ from langflow.interface.importing.utils import eval_custom_component_code
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.custom_components import CustomComponentFrontendNode
from langflow.utils.util import get_base_classes
from loguru import logger
def add_output_types(frontend_node: CustomComponentFrontendNode, return_types: List[str]):

Some files were not shown because too many files have changed in this diff Show more