Merge branch 'dev' into fix/vectorstores/redis

This commit is contained in:
Gabriel Luiz Freitas Almeida 2024-02-01 09:20:13 -03:00 committed by GitHub
commit 1d46ba5d74
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
219 changed files with 7746 additions and 14396 deletions

View file

@ -45,6 +45,13 @@ run_frontend:
@-kill -9 `lsof -t -i:3000`
cd src/frontend && npm start
tests_frontend:
ifeq ($(UI), true)
cd src/frontend && ./run-tests.sh --ui
else
cd src/frontend && ./run-tests.sh
endif
run_cli:
poetry run langflow run --path src/frontend/build

View file

@ -1,11 +1,13 @@
import Admonition from '@theme/Admonition';
import Admonition from "@theme/Admonition";
# Embeddings
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
<p>
We appreciate your understanding as we polish our documentation it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
</p>
<p>
We appreciate your understanding as we polish our documentation it may
contain some rough edges. Share your feedback or report issues to help us
improve! 🛠️📝
</p>
</Admonition>
Embeddings are vector representations of text that capture the semantic meaning of the text. They are created using text embedding models and allow us to think about the text in a vector space, enabling us to perform tasks like semantic search, where we look for pieces of text that are most similar in the vector space.
@ -110,4 +112,12 @@ Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP).
- **top_k:** How the model selects tokens for output, the next token is selected from defaults to `40`.
- **top_p:** Tokens are selected from most probable to least until the sum of their defaults to `0.95`.
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output defaults to `False`.
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output defaults to `False`.
### OllamaEmbeddings
Used to load [Ollamas](https://ollama.ai/) embedding models. Wrapper around LangChain's [Ollama API](https://python.langchain.com/docs/integrations/text_embedding/ollama).
- **model** The name of the Ollama model to use defaults to `llama2`.
- **base_url** The base URL for the Ollama API defaults to `http://localhost:11434`.
- **temperature** Tunes the degree of randomness in text generations. Should be a non-negative value defaults to `0`.

View file

@ -21,7 +21,7 @@ import ZoomableImage from "/src/theme/ZoomableImage.js";
<Admonition type="note" title="LangChain Components 🦜🔗">
- [`ConversationBufferMemory`](https://python.langchain.com/docs/modules/memory/how_to/buffer)
- [`ConversationBufferMemory`](https://python.langchain.com/docs/modules/memory/types/buffer)
- [`ConversationChain`](https://python.langchain.com/docs/modules/chains/)
- [`ChatOpenAI`](https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai)

3296
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.6.4"
version = "0.6.5a13"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -25,8 +25,6 @@ documentation = "https://docs.langflow.org"
langflow = "langflow.__main__:main"
[tool.poetry.dependencies]
python = ">=3.9,<3.11"
fastapi = "^0.108.0"
uvicorn = "^0.25.0"
@ -35,8 +33,8 @@ google-search-results = "^2.4.1"
google-api-python-client = "^2.79.0"
typer = "^0.9.0"
gunicorn = "^21.2.0"
langchain = "~0.0.345"
openai = "^1.6.1"
langchain = "~0.1.0"
openai = "^1.10.0"
pandas = "2.0.3"
chromadb = "^0.4.0"
huggingface-hub = { version = "^0.19.0", extras = ["inference"] }
@ -55,7 +53,7 @@ tiktoken = "~0.5.0"
wikipedia = "^1.4.0"
qdrant-client = "^1.7.0"
websockets = "^10.3"
weaviate-client = "^3.26.0"
weaviate-client = "*"
jina = "*"
sentence-transformers = { version = "^2.2.2", optional = true }
ctransformers = { version = "^0.2.10", optional = true }
@ -63,7 +61,7 @@ cohere = "^4.39.0"
python-multipart = "^0.0.6"
sqlmodel = "^0.0.14"
faiss-cpu = "^1.7.4"
anthropic = "^0.8.0"
anthropic = "^0.13.0"
orjson = "3.9.3"
multiprocess = "^0.70.14"
cachetools = "^5.3.1"
@ -98,19 +96,22 @@ markupsafe = "^2.1.3"
extract-msg = "^0.45.0"
# jq is not available for windows
jq = { version = "^1.6.0", markers = "sys_platform != 'win32'" }
boto3 = "^1.28.63"
boto3 = "^1.34.0"
numexpr = "^2.8.6"
qianfan = "0.2.0"
pgvector = "^0.2.3"
pyautogen = "^0.2.0"
langchain-google-genai = "^0.0.2"
elasticsearch = "^8.11.1"
pytube = "^15.0.0"
llama-index = "^0.9.24"
langchain-openai = "^0.0.2"
[tool.poetry.group.dev.dependencies]
pytest-asyncio = "^0.23.1"
types-redis = "^4.6.0.5"
ipykernel = "^6.27.0"
mypy = "^1.7.1"
mypy = "^1.8.0"
ruff = "^0.1.5"
httpx = "*"
pytest = "^7.4.2"
@ -152,7 +153,8 @@ exclude = ["src/backend/langflow/alembic/*"]
line-length = 120
[tool.mypy]
plugins = "pydantic.mypy"
plugins = ["pydantic.mypy"]
follow_imports = "silent"
[build-system]
requires = ["poetry-core"]

View file

@ -1,15 +1,12 @@
import platform
import socket
import sys
import time
import webbrowser
from pathlib import Path
from typing import Optional
import httpx
import typer
from dotenv import load_dotenv
from multiprocess import Process, cpu_count # type: ignore
from multiprocess import cpu_count # type: ignore
from rich import box
from rich import print as rprint
from rich.console import Console
@ -212,23 +209,12 @@ def run(
run_on_windows(host, port, log_level, options, app)
else:
# Run using gunicorn on Linux
run_on_mac_or_linux(host, port, log_level, options, app, open_browser)
run_on_mac_or_linux(host, port, log_level, options, app)
def run_on_mac_or_linux(host, port, log_level, options, app, open_browser=True):
webapp_process = Process(target=run_langflow, args=(host, port, log_level, options, app))
webapp_process.start()
status_code = 0
while status_code != 200:
try:
status_code = httpx.get(f"http://{host}:{port}/health").status_code
except Exception:
time.sleep(1)
def run_on_mac_or_linux(host, port, log_level, options, app):
print_banner(host, port)
if open_browser:
webbrowser.open(f"http://{host}:{port}")
run_langflow(host, port, log_level, options, app)
def run_on_windows(host, port, log_level, options, app):
@ -303,19 +289,26 @@ def run_langflow(host, port, log_level, options, app):
Run Langflow server on localhost
"""
try:
if platform.system() in ["Windows"]:
if platform.system() in ["Windows", "Darwin"]:
# Run using uvicorn on MacOS and Windows
# Windows doesn't support gunicorn
# MacOS requires an env variable to be set to use gunicorn
import uvicorn
uvicorn.run(app, host=host, port=port, log_level=log_level)
uvicorn.run(
app,
host=host,
port=port,
log_level=log_level,
)
else:
from langflow.server import LangflowApplication
LangflowApplication(app, options).run()
except KeyboardInterrupt:
pass
logger.info("Shutting down server")
sys.exit(0)
except Exception as e:
logger.exception(e)
sys.exit(1)

View file

@ -27,7 +27,8 @@ def upgrade() -> None:
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_unique_constraint('uq_user_id', ['id'])
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
@ -44,6 +45,7 @@ def downgrade() -> None:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.drop_constraint('uq_apikey_id', type_='unique')
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###

View file

@ -0,0 +1,71 @@
"""empty message
Revision ID: 0b8757876a7c
Revises: 006b3990db50
Create Date: 2024-01-17 10:32:56.686287
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '0b8757876a7c'
down_revision: Union[str, None] = '006b3990db50'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_apikey_api_key'), ['api_key'], unique=True)
batch_op.create_index(batch_op.f('ix_apikey_name'), ['name'], unique=False)
batch_op.create_index(batch_op.f('ix_apikey_user_id'), ['user_id'], unique=False)
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_flow_description'), ['description'], unique=False)
batch_op.create_index(batch_op.f('ix_flow_name'), ['name'], unique=False)
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], unique=True)
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_username'))
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
batch_op.drop_index(batch_op.f('ix_flow_name'))
batch_op.drop_index(batch_op.f('ix_flow_description'))
except Exception as e:
print(e)
pass
try:
with op.batch_alter_table('apikey', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_apikey_user_id'))
batch_op.drop_index(batch_op.f('ix_apikey_name'))
batch_op.drop_index(batch_op.f('ix_apikey_api_key'))
except Exception as e:
print(e)
pass
# ### end Alembic commands ###

View file

@ -60,8 +60,8 @@ def upgrade() -> None:
sa.Column("create_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("last_login_at", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
sa.PrimaryKeyConstraint("id", name="pk_user"),
sa.UniqueConstraint("id", name="uq_user_id"),
)
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.create_index(
@ -83,8 +83,8 @@ def upgrade() -> None:
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
sa.PrimaryKeyConstraint("id", name="pk_apikey"),
sa.UniqueConstraint("id", name="uq_apikey_id"),
)
with op.batch_alter_table("apikey", schema=None) as batch_op:
batch_op.create_index(
@ -106,8 +106,8 @@ def upgrade() -> None:
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
sa.PrimaryKeyConstraint("id", name="pk_flow"),
sa.UniqueConstraint("id", name="uq_flow_id"),
)
# Conditionally create indices for 'flow' table
# if _alembic_tmp_flow exists, then we need to drop it first
@ -145,7 +145,7 @@ def upgrade() -> None:
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn = op.get_bind()
inspector = Inspector.from_engine(conn) # type: ignore
# List existing tables
existing_tables = inspector.get_table_names()

View file

@ -29,9 +29,10 @@ def upgrade() -> None:
sa.Column('id', sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
sa.PrimaryKeyConstraint('id'),
)
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
@ -40,6 +41,7 @@ def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
op.drop_table('credential')
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###

View file

@ -45,6 +45,7 @@ def downgrade() -> None:
with op.batch_alter_table("flow", schema=None) as batch_op:
batch_op.drop_column("is_component")
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###

View file

@ -37,7 +37,6 @@ def upgrade() -> None:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
# ### end Alembic commands ###

View file

@ -0,0 +1,59 @@
"""Add unique constraints
Revision ID: b2fa308044b5
Revises: 0b8757876a7c
Create Date: 2024-01-26 13:31:14.797548
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision: str = 'b2fa308044b5'
down_revision: Union[str, None] = '0b8757876a7c'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
op.drop_table('flowstyle')
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_component', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
batch_op.add_column(sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=True))
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
batch_op.create_foreign_key('fk_flow_user_id_user', 'user', ['user_id'], ['id'])
except Exception:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_constraint('fk_flow_user_id_user', type_='foreignkey')
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
batch_op.drop_column('user_id')
batch_op.drop_column('folder')
batch_op.drop_column('updated_at')
batch_op.drop_column('is_component')
op.create_table('flowstyle',
sa.Column('color', sa.VARCHAR(), nullable=False),
sa.Column('emoji', sa.VARCHAR(), nullable=False),
sa.Column('flow_id', sa.CHAR(length=32), nullable=True),
sa.Column('id', sa.CHAR(length=32), nullable=False),
sa.ForeignKeyConstraint(['flow_id'], ['flow.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
except Exception:
pass
# ### end Alembic commands ###

View file

@ -0,0 +1,50 @@
"""New fixes
Revision ID: bc2f01c40e4a
Revises: b2fa308044b5
Create Date: 2024-01-26 13:34:14.496769
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision: str = 'bc2f01c40e4a'
down_revision: Union[str, None] = 'b2fa308044b5'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_component', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
batch_op.add_column(sa.Column('user_id', sqlmodel.sql.sqltypes.GUID(), nullable=True))
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
batch_op.create_foreign_key('flow_user_id_fkey'
, 'user', ['user_id'], ['id'])
except Exception:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('flow', schema=None) as batch_op:
batch_op.drop_constraint('flow_user_id_fkey', type_='foreignkey')
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
batch_op.drop_column('user_id')
batch_op.drop_column('folder')
batch_op.drop_column('updated_at')
batch_op.drop_column('is_component')
except Exception:
pass
# ### end Alembic commands ###

View file

@ -29,7 +29,8 @@ def upgrade() -> None:
except exc.SQLAlchemyError:
# connection.execute(text("ROLLBACK"))
pass
except Exception:
except Exception as e:
print(e)
pass
try:
@ -37,7 +38,8 @@ def upgrade() -> None:
except exc.SQLAlchemyError:
# connection.execute(text("ROLLBACK"))
pass
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
@ -57,14 +59,15 @@ def downgrade() -> None:
sa.Column("is_read_only", sa.BOOLEAN(), nullable=False),
sa.Column("create_at", sa.DATETIME(), nullable=False),
sa.Column("update_at", sa.DATETIME(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.PrimaryKeyConstraint("id", name="pk_component"),
)
with op.batch_alter_table("component", schema=None) as batch_op:
batch_op.create_index("ix_component_name", ["name"], unique=False)
batch_op.create_index(
"ix_component_frontend_node_id", ["frontend_node_id"], unique=False
)
except Exception:
except Exception as e:
print(e)
pass
try:
@ -78,9 +81,10 @@ def downgrade() -> None:
["flow_id"],
["flow.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
sa.PrimaryKeyConstraint("id", name="pk_flowstyle"),
sa.UniqueConstraint("id", name="uq_flowstyle_id"),
)
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###

View file

@ -7,10 +7,8 @@ Create Date: 2023-10-18 23:12:27.297016
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "f5ee9749d1a6"
@ -26,7 +24,8 @@ def upgrade() -> None:
batch_op.alter_column(
"user_id", existing_type=sa.CHAR(length=32), nullable=True
)
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
@ -39,7 +38,8 @@ def downgrade() -> None:
batch_op.alter_column(
"user_id", existing_type=sa.CHAR(length=32), nullable=False
)
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###

View file

@ -21,7 +21,8 @@ def upgrade() -> None:
try:
with op.batch_alter_table('credential', schema=None) as batch_op:
batch_op.create_foreign_key("fk_credential_user_id", 'user', ['user_id'], ['id'])
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###
@ -32,7 +33,8 @@ def downgrade() -> None:
try:
with op.batch_alter_table('credential', schema=None) as batch_op:
batch_op.drop_constraint("fk_credential_user_id", type_='foreignkey')
except Exception:
except Exception as e:
print(e)
pass
# ### end Alembic commands ###

View file

@ -1,11 +1,11 @@
import time
from fastapi import APIRouter, Depends, HTTPException, Query, WebSocket, WebSocketException, status
from fastapi import APIRouter, Depends, HTTPException, WebSocket, WebSocketException, status
from fastapi.responses import StreamingResponse
from langflow.api.utils import build_input_keys_response, format_elapsed_time
from langflow.api.v1.schemas import BuildStatus, BuiltResponse, InitResponse, StreamData
from langflow.graph.graph.base import Graph
from langflow.services.auth.utils import get_current_active_user, get_current_user_by_jwt
from langflow.services.auth.utils import get_current_active_user, get_current_user_for_websocket
from langflow.services.cache.service import BaseCacheService
from langflow.services.cache.utils import update_build_status
from langflow.services.chat.service import ChatService
@ -20,17 +20,16 @@ router = APIRouter(tags=["Chat"])
async def chat(
client_id: str,
websocket: WebSocket,
token: str = Query(...),
db: Session = Depends(get_session),
chat_service: "ChatService" = Depends(get_chat_service),
):
"""Websocket endpoint for chat."""
try:
user = await get_current_user_by_jwt(token, db)
user = await get_current_user_for_websocket(websocket, db)
await websocket.accept()
if not user:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
if not user.is_active:
elif not user.is_active:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION, reason="Unauthorized")
if client_id in chat_service.cache_service:

View file

@ -3,12 +3,10 @@ from typing import Annotated, Any, List, Optional, Union
import sqlalchemy as sa
from fastapi import APIRouter, Body, Depends, HTTPException, UploadFile, status
from loguru import logger
from sqlmodel import select
from langflow.api.utils import update_frontend_node_with_template_values
from langflow.api.v1.schemas import (
CustomComponentCode,
PreloadResponse,
ProcessResponse,
TaskResponse,
TaskStatusResponse,
@ -17,12 +15,15 @@ from langflow.api.v1.schemas import (
from langflow.interface.custom.custom_component import CustomComponent
from langflow.interface.custom.directory_reader import DirectoryReader
from langflow.interface.custom.utils import build_custom_component_template
from langflow.processing.process import process_graph_cached, process_tweaks
from langflow.processing.process import build_graph_and_generate_result, process_graph_cached, process_tweaks
from langflow.services.auth.utils import api_key_security, get_current_active_user
from langflow.services.cache.utils import save_uploaded_file
from langflow.services.database.models.flow import Flow
from langflow.services.database.models.user.model import User
from langflow.services.deps import get_session, get_session_service, get_settings_service, get_task_service
from langflow.services.session.service import SessionService
from loguru import logger
from sqlmodel import select
try:
from langflow.worker import process_graph_cached_task
@ -32,9 +33,8 @@ except ImportError:
raise NotImplementedError("Celery is not installed")
from sqlmodel import Session
from langflow.services.task.service import TaskService
from sqlmodel import Session
# build router
router = APIRouter(tags=["Base"])
@ -148,6 +148,55 @@ async def process_json(
raise HTTPException(status_code=500, detail=str(exc)) from exc
# Endpoint to preload a graph
@router.post("/process/preload/{flow_id}", response_model=PreloadResponse)
async def preload_flow(
session: Annotated[Session, Depends(get_session)],
flow_id: str,
session_id: Optional[str] = None,
session_service: SessionService = Depends(get_session_service),
api_key_user: User = Depends(api_key_security),
clear_session: Annotated[bool, Body(embed=True)] = False, # noqa: F821
):
try:
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
if clear_session:
session_service.clear_session(session_id)
# Check if the session exists
session_data = await session_service.load_session(session_id)
# Session data is a tuple of (graph, artifacts)
# or (None, None) if the session is empty
if isinstance(session_data, tuple):
graph, artifacts = session_data
is_clear = graph is None and artifacts is None
else:
is_clear = session_data is None
return PreloadResponse(session_id=session_id, is_clear=is_clear)
else:
if session_id is None:
session_id = flow_id
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
session_service.clear_session(session_id)
# Load the graph using SessionService
session_data = await session_service.load_session(session_id, graph_data)
graph, artifacts = session_data if session_data else (None, None)
if not graph:
raise ValueError("Graph not found in the session")
_ = await graph.build()
session_service.update_session(session_id, (graph, artifacts))
return PreloadResponse(session_id=session_id)
except Exception as exc:
logger.exception(exc)
raise HTTPException(status_code=500, detail=str(exc)) from exc
@router.post(
"/predict/{flow_id}",
response_model=ProcessResponse,
@ -167,36 +216,75 @@ async def process(
task_service: "TaskService" = Depends(get_task_service),
api_key_user: User = Depends(api_key_security),
sync: Annotated[bool, Body(embed=True)] = True, # noqa: F821
session_service: SessionService = Depends(get_session_service),
):
"""
Endpoint to process an input with a given flow_id.
"""
try:
if api_key_user is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API Key",
if session_id:
session_data = await session_service.load_session(session_id)
graph, artifacts = session_data if session_data else (None, None)
task_result: Any = None
task_status = None
task_id = None
if not graph:
raise ValueError("Graph not found in the session")
result = await build_graph_and_generate_result(
graph=graph,
inputs=inputs,
artifacts=artifacts,
session_id=session_id,
session_service=session_service,
)
task_id = str(id(result))
if isinstance(result, dict) and "result" in result:
task_result = result["result"]
session_id = result["session_id"]
elif hasattr(result, "result") and hasattr(result, "session_id"):
task_result = result.result
session_id = result.session_id
else:
task_result = result
if task_id:
task_response = TaskResponse(id=task_id, href=f"api/v1/task/{task_id}")
else:
task_response = None
return ProcessResponse(
result=task_result,
status=task_status,
task=task_response,
session_id=session_id,
backend=task_service.backend_name,
)
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
else:
if api_key_user is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API Key",
)
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
return await process_graph_data(
graph_data=graph_data,
inputs=inputs,
tweaks=tweaks,
clear_cache=clear_cache,
session_id=session_id,
task_service=task_service,
sync=sync,
)
# Get the flow that matches the flow_id and belongs to the user
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
if flow is None:
raise ValueError(f"Flow {flow_id} not found")
if flow.data is None:
raise ValueError(f"Flow {flow_id} has no data")
graph_data = flow.data
return await process_graph_data(
graph_data=graph_data,
inputs=inputs,
tweaks=tweaks,
clear_cache=clear_cache,
session_id=session_id,
task_service=task_service,
sync=sync,
)
except sa.exc.StatementError as exc:
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
if "badly formed hexadecimal UUID string" in str(exc):

View file

@ -1,4 +1,4 @@
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
from fastapi.security import OAuth2PasswordRequestForm
from sqlmodel import Session
@ -16,6 +16,7 @@ router = APIRouter(tags=["Login"])
@router.post("/login", response_model=Token)
async def login_to_get_access_token(
response: Response,
form_data: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(get_session),
# _: Session = Depends(get_current_active_user)
@ -31,7 +32,10 @@ async def login_to_get_access_token(
) from exc
if user:
return create_user_tokens(user_id=user.id, db=db, update_last_login=True)
tokens = create_user_tokens(user_id=user.id, db=db, update_last_login=True)
response.set_cookie("refresh_token_lf", tokens["refresh_token"], httponly=True)
response.set_cookie("access_token_lf", tokens["access_token"], httponly=False)
return tokens
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
@ -41,9 +45,13 @@ async def login_to_get_access_token(
@router.get("/auto_login")
async def auto_login(db: Session = Depends(get_session), settings_service=Depends(get_settings_service)):
async def auto_login(
response: Response, db: Session = Depends(get_session), settings_service=Depends(get_settings_service)
):
if settings_service.auth_settings.AUTO_LOGIN:
return create_user_longterm_token(db)
tokens = create_user_longterm_token(db)
response.set_cookie("access_token_lf", tokens["access_token"], httponly=False)
return tokens
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
@ -55,12 +63,23 @@ async def auto_login(db: Session = Depends(get_session), settings_service=Depend
@router.post("/refresh")
async def refresh_token(token: str):
async def refresh_token(request: Request, response: Response):
token = request.cookies.get("refresh_token_lf")
if token:
return create_refresh_token(token)
tokens = create_refresh_token(token)
response.set_cookie("refresh_token_lf", tokens["refresh_token"], httponly=True)
response.set_cookie("access_token_lf", tokens["access_token"], httponly=False)
return tokens
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid refresh token",
headers={"WWW-Authenticate": "Bearer"},
)
@router.post("/logout")
async def logout(response: Response):
response.delete_cookie("refresh_token_lf")
response.delete_cookie("access_token_lf")
return {"message": "Logout successful"}

View file

@ -64,6 +64,13 @@ class ProcessResponse(BaseModel):
backend: Optional[str] = None
class PreloadResponse(BaseModel):
"""Preload response schema."""
session_id: Optional[str] = None
is_clear: Optional[bool] = None
# TaskStatusResponse(
# status=task.status, result=task.result if task.ready() else None
# )

View file

@ -41,12 +41,11 @@ class AgentInitializerComponent(CustomComponent):
handle_parsing_errors=True,
max_iterations=max_iterations,
)
else:
return initialize_agent(
tools=tools,
llm=llm,
agent=agent,
return_intermediate_steps=True,
handle_parsing_errors=True,
max_iterations=max_iterations,
)
return initialize_agent(
tools=tools,
llm=llm,
agent=agent,
return_intermediate_steps=True,
handle_parsing_errors=True,
max_iterations=max_iterations,
)

View file

@ -0,0 +1,23 @@
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, AgentExecutor
from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_agent
class CSVAgentComponent(CustomComponent):
display_name = "CSVAgent"
description = "Construct a CSV agent from a CSV and tools."
documentation = "https://python.langchain.com/docs/modules/agents/toolkits/csv"
def build_config(self):
return {
"llm": {"display_name": "LLM", "type": BaseLanguageModel},
"path": {"display_name": "Path", "field_type": "file", "suffixes": [".csv"], "file_types": [".csv"]},
}
def build(
self,
llm: BaseLanguageModel,
path: str,
) -> AgentExecutor:
# Instantiate and return the CSV agent class with the provided llm and path
return create_csv_agent(llm=llm, path=path)

View file

@ -0,0 +1,24 @@
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_json_agent
from langflow.field_typing import (
BaseLanguageModel,
)
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonAgentComponent(CustomComponent):
display_name = "JsonAgent"
description = "Construct a json agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"toolkit": {"display_name": "Toolkit"},
}
def build(
self,
llm: BaseLanguageModel,
toolkit: JsonToolkit,
) -> AgentExecutor:
return create_json_agent(llm=llm, toolkit=toolkit)

View file

@ -3,7 +3,7 @@ from typing import List, Optional
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.conversational_retrieval.openai_functions import _get_default_system_message
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.chat_models import ChatOpenAI
from langchain_community.chat_models import ChatOpenAI
from langchain.memory.token_buffer import ConversationTokenBufferMemory
from langchain.prompts import SystemMessagePromptTemplate
from langchain.prompts.chat import MessagesPlaceholder

View file

@ -0,0 +1,29 @@
from langflow import CustomComponent
from typing import Union, Callable
from langchain.agents import AgentExecutor
from langflow.field_typing import BaseLanguageModel
from langchain_community.agent_toolkits.sql.base import create_sql_agent
from langchain.sql_database import SQLDatabase
from langchain_community.agent_toolkits import SQLDatabaseToolkit
class SQLAgentComponent(CustomComponent):
display_name = "SQLAgent"
description = "Construct an SQL agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"database_uri": {"display_name": "Database URI"},
"verbose": {"display_name": "Verbose", "value": False, "advanced": True},
}
def build(
self,
llm: BaseLanguageModel,
database_uri: str,
verbose: bool = False,
) -> Union[AgentExecutor, Callable]:
db = SQLDatabase.from_uri(database_uri)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
return create_sql_agent(llm=llm, toolkit=toolkit)

View file

@ -0,0 +1,23 @@
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_vectorstore_agent
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreToolkit
from typing import Union, Callable
from langflow.field_typing import BaseLanguageModel
class VectorStoreAgentComponent(CustomComponent):
display_name = "VectorStoreAgent"
description = "Construct an agent from a Vector Store."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"vector_store_toolkit": {"display_name": "Vector Store Info"},
}
def build(
self,
llm: BaseLanguageModel,
vector_store_toolkit: VectorStoreToolkit,
) -> Union[AgentExecutor, Callable]:
return create_vectorstore_agent(llm=llm, toolkit=vector_store_toolkit)

View file

@ -0,0 +1,19 @@
from langflow import CustomComponent
from langchain_core.language_models.base import BaseLanguageModel
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
from langchain.agents import create_vectorstore_router_agent
from typing import Callable
class VectorStoreRouterAgentComponent(CustomComponent):
display_name = "VectorStoreRouterAgent"
description = "Construct an agent from a Vector Store Router."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"vectorstoreroutertoolkit": {"display_name": "Vector Store Router Toolkit"},
}
def build(self, llm: BaseLanguageModel, vectorstoreroutertoolkit: VectorStoreRouterToolkit) -> Callable:
return create_vectorstore_router_agent(llm=llm, toolkit=vectorstoreroutertoolkit)

View file

@ -28,5 +28,5 @@ class LLMChainComponent(CustomComponent):
prompt: BasePromptTemplate,
llm: BaseLanguageModel,
memory: Optional[BaseMemory] = None,
) -> Union[Chain, Callable]:
) -> Union[Chain, Callable, LLMChain]:
return LLMChain(prompt=prompt, llm=llm, memory=memory)

View file

@ -0,0 +1,24 @@
from langflow import CustomComponent
from langchain.chains import LLMCheckerChain
from typing import Union, Callable
from langflow.field_typing import (
BaseLanguageModel,
Chain,
)
class LLMCheckerChainComponent(CustomComponent):
display_name = "LLMCheckerChain"
description = ""
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_checker"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
}
def build(
self,
llm: BaseLanguageModel,
) -> Union[Chain, Callable]:
return LLMCheckerChain(llm=llm)

View file

@ -0,0 +1,31 @@
from typing import Callable, Optional, Union
from langchain.chains import LLMChain, LLMMathChain
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, BaseMemory, Chain
class LLMMathChainComponent(CustomComponent):
display_name = "LLMMathChain"
description = "Chain that interprets a prompt and executes python code to do math."
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_math"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"llm_chain": {"display_name": "LLM Chain"},
"memory": {"display_name": "Memory"},
"input_key": {"display_name": "Input Key"},
"output_key": {"display_name": "Output Key"},
}
def build(
self,
llm: BaseLanguageModel,
llm_chain: LLMChain,
input_key: str = "question",
output_key: str = "answer",
memory: Optional[BaseMemory] = None,
) -> Union[LLMMathChain, Callable, Chain]:
return LLMMathChain(llm=llm, llm_chain=llm_chain, input_key=input_key, output_key=output_key, memory=memory)

View file

@ -0,0 +1,39 @@
from typing import Callable, Optional, Union
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
from langflow import CustomComponent
from langflow.field_typing import BaseMemory, BaseRetriever
class RetrievalQAComponent(CustomComponent):
display_name = "RetrievalQA"
description = "Chain for question-answering against an index."
def build_config(self):
return {
"combine_documents_chain": {"display_name": "Combine Documents Chain"},
"retriever": {"display_name": "Retriever"},
"memory": {"display_name": "Memory", "required": False},
"input_key": {"display_name": "Input Key", "advanced": True},
"output_key": {"display_name": "Output Key", "advanced": True},
"return_source_documents": {"display_name": "Return Source Documents"},
}
def build(
self,
combine_documents_chain: BaseCombineDocumentsChain,
retriever: BaseRetriever,
memory: Optional[BaseMemory] = None,
input_key: str = "query",
output_key: str = "result",
return_source_documents: bool = True,
) -> Union[BaseRetrievalQA, Callable]:
return RetrievalQA(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
memory=memory,
input_key=input_key,
output_key=output_key,
return_source_documents=return_source_documents,
)

View file

@ -0,0 +1,42 @@
from typing import Optional
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever
class RetrievalQAWithSourcesChainComponent(CustomComponent):
display_name = "RetrievalQAWithSourcesChain"
description = "Question-answering with sources over an index."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"chain_type": {
"display_name": "Chain Type",
"options": ["stuff", "map_reduce", "map_rerank", "refine"],
},
"memory": {"display_name": "Memory"},
"return_source_documents": {"display_name": "Return Source Documents"},
}
def build(
self,
retriever: BaseRetriever,
llm: BaseLanguageModel,
combine_documents_chain: BaseCombineDocumentsChain,
chain_type: str,
memory: Optional[BaseMemory] = None,
return_source_documents: Optional[bool] = True,
) -> BaseQAWithSourcesChain:
return RetrievalQAWithSourcesChain.from_chain_type(
llm=llm,
chain_type=chain_type,
combine_documents_chain=combine_documents_chain,
memory=memory,
return_source_documents=return_source_documents,
retriever=retriever,
)

View file

@ -0,0 +1,25 @@
from langflow import CustomComponent
from typing import Callable, Union
from langflow.field_typing import BasePromptTemplate, BaseLanguageModel, Chain
from langchain_community.utilities.sql_database import SQLDatabase
from langchain_experimental.sql.base import SQLDatabaseChain
class SQLDatabaseChainComponent(CustomComponent):
display_name = "SQLDatabaseChain"
description = ""
def build_config(self):
return {
"db": {"display_name": "Database"},
"llm": {"display_name": "LLM"},
"prompt": {"display_name": "Prompt"},
}
def build(
self,
db: SQLDatabase,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
) -> Union[Chain, Callable, SQLDatabaseChain]:
return SQLDatabaseChain.from_llm(llm=llm, db=db, prompt=prompt)

View file

@ -0,0 +1,42 @@
from langflow import CustomComponent
from langchain.docstore.document import Document
from typing import Optional, Dict, Any
class DirectoryLoaderComponent(CustomComponent):
display_name = "DirectoryLoader"
description = "Load from a directory."
def build_config(self) -> Dict[str, Any]:
return {
"glob": {"display_name": "Glob Pattern", "value": "**/*.txt"},
"load_hidden": {"display_name": "Load Hidden Files", "value": False, "advanced": True},
"max_concurrency": {"display_name": "Max Concurrency", "value": 10, "advanced": True},
"metadata": {"display_name": "Metadata", "value": {}},
"path": {"display_name": "Local Directory"},
"recursive": {"display_name": "Recursive", "value": True, "advanced": True},
"silent_errors": {"display_name": "Silent Errors", "value": False, "advanced": True},
"use_multithreading": {"display_name": "Use Multithreading", "value": True, "advanced": True},
}
def build(
self,
glob: str,
path: str,
load_hidden: Optional[bool] = False,
max_concurrency: Optional[int] = 10,
metadata: Optional[dict] = {},
recursive: Optional[bool] = True,
silent_errors: Optional[bool] = False,
use_multithreading: Optional[bool] = True,
) -> Document:
return Document(
glob=glob,
path=path,
load_hidden=load_hidden,
max_concurrency=max_concurrency,
metadata=metadata,
recursive=recursive,
silent_errors=silent_errors,
use_multithreading=use_multithreading,
)

View file

@ -0,0 +1,65 @@
from langchain.embeddings.base import Embeddings
from langchain_community.embeddings import AzureOpenAIEmbeddings
from langflow import CustomComponent
class AzureOpenAIEmbeddingsComponent(CustomComponent):
display_name: str = "AzureOpenAIEmbeddings"
description: str = "Embeddings model from Azure OpenAI."
documentation: str = "https://python.langchain.com/docs/integrations/text_embedding/azureopenai"
beta = False
API_VERSION_OPTIONS = [
"2022-12-01",
"2023-03-15-preview",
"2023-05-15",
"2023-06-01-preview",
"2023-07-01-preview",
"2023-08-01-preview",
]
def build_config(self):
return {
"azure_endpoint": {
"display_name": "Azure Endpoint",
"required": True,
"info": "Your Azure endpoint, including the resource.. Example: `https://example-resource.azure.openai.com/`",
},
"azure_deployment": {
"display_name": "Deployment Name",
"required": True,
},
"api_version": {
"display_name": "API Version",
"options": self.API_VERSION_OPTIONS,
"value": self.API_VERSION_OPTIONS[-1],
"advanced": True,
},
"api_key": {
"display_name": "API Key",
"required": True,
"password": True,
},
"code": {"show": False},
}
def build(
self,
azure_endpoint: str,
azure_deployment: str,
api_version: str,
api_key: str,
) -> Embeddings:
try:
embeddings = AzureOpenAIEmbeddings(
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
api_version=api_version,
api_key=api_key,
)
except Exception as e:
raise ValueError("Could not connect to AzureOpenAIEmbeddings API.") from e
return embeddings

View file

@ -0,0 +1,36 @@
from typing import Optional
from langchain_community.embeddings.cohere import CohereEmbeddings
from langflow import CustomComponent
class CohereEmbeddingsComponent(CustomComponent):
display_name = "CohereEmbeddings"
description = "Cohere embedding models."
def build_config(self):
return {
"cohere_api_key": {"display_name": "Cohere API Key", "password": True},
"model": {"display_name": "Model", "default": "embed-english-v2.0", "advanced": True},
"truncate": {"display_name": "Truncate", "advanced": True},
"max_retries": {"display_name": "Max Retries", "advanced": True},
"user_agent": {"display_name": "User Agent", "advanced": True},
}
def build(
self,
request_timeout: Optional[float] = None,
cohere_api_key: str = "",
max_retries: Optional[int] = None,
model: str = "embed-english-v2.0",
truncate: Optional[str] = None,
user_agent: str = "langchain",
) -> CohereEmbeddings:
return CohereEmbeddings( # type: ignore
max_retries=max_retries,
user_agent=user_agent,
request_timeout=request_timeout,
cohere_api_key=cohere_api_key,
model=model,
truncate=truncate,
)

View file

@ -0,0 +1,36 @@
from langflow import CustomComponent
from typing import Optional, Dict
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
class HuggingFaceEmbeddingsComponent(CustomComponent):
display_name = "HuggingFaceEmbeddings"
description = "HuggingFace sentence_transformers embedding models."
documentation = (
"https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/sentence_transformers"
)
def build_config(self):
return {
"cache_folder": {"display_name": "Cache Folder", "advanced": True},
"encode_kwargs": {"display_name": "Encode Kwargs", "advanced": True, "field_type": "dict"},
"model_kwargs": {"display_name": "Model Kwargs", "field_type": "dict", "advanced": True},
"model_name": {"display_name": "Model Name"},
"multi_process": {"display_name": "Multi Process", "advanced": True},
}
def build(
self,
cache_folder: Optional[str] = None,
encode_kwargs: Optional[Dict] = {},
model_kwargs: Optional[Dict] = {},
model_name: str = "sentence-transformers/all-mpnet-base-v2",
multi_process: bool = False,
) -> HuggingFaceEmbeddings:
return HuggingFaceEmbeddings(
cache_folder=cache_folder,
encode_kwargs=encode_kwargs,
model_kwargs=model_kwargs,
model_name=model_name,
multi_process=multi_process,
)

View file

@ -0,0 +1,38 @@
from typing import Optional
from langflow import CustomComponent
from langchain.embeddings.base import Embeddings
from langchain_community.embeddings import OllamaEmbeddings
class OllamaEmbeddingsComponent(CustomComponent):
"""
A custom component for implementing an Embeddings Model using Ollama.
"""
display_name: str = "Ollama Embeddings"
description: str = "Embeddings model from Ollama."
documentation = "https://python.langchain.com/docs/integrations/text_embedding/ollama"
beta = True
def build_config(self):
return {
"model": {
"display_name": "Ollama Model",
},
"base_url": {"display_name": "Ollama Base URL"},
"temperature": {"display_name": "Model Temperature"},
"code": {"show": False},
}
def build(
self,
model: str = "llama2",
base_url: str = "http://localhost:11434",
temperature: Optional[float] = None,
) -> Embeddings:
try:
output = OllamaEmbeddings(model=model, base_url=base_url, temperature=temperature) # type: ignore
except Exception as e:
raise ValueError("Could not connect to Ollama API.") from e
return output

View file

@ -0,0 +1,117 @@
from typing import Any, Callable, Dict, List, Optional, Union
from langchain_openai.embeddings.base import OpenAIEmbeddings
from langflow import CustomComponent
from langflow.field_typing import NestedDict
class OpenAIEmbeddingsComponent(CustomComponent):
display_name = "OpenAIEmbeddings"
description = "OpenAI embedding models"
def build_config(self):
return {
"allowed_special": {
"display_name": "Allowed Special",
"advanced": True,
"field_type": "str",
"is_list": True,
},
"default_headers": {
"display_name": "Default Headers",
"advanced": True,
"field_type": "dict",
},
"default_query": {
"display_name": "Default Query",
"advanced": True,
"field_type": "NestedDict",
},
"disallowed_special": {
"display_name": "Disallowed Special",
"advanced": True,
"field_type": "str",
"is_list": True,
},
"chunk_size": {"display_name": "Chunk Size", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"deployment": {"display_name": "Deployment", "advanced": True},
"embedding_ctx_length": {
"display_name": "Embedding Context Length",
"advanced": True,
},
"max_retries": {"display_name": "Max Retries", "advanced": True},
"model": {"display_name": "Model", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"openai_api_base": {"display_name": "OpenAI API Base", "password": True, "advanced": True},
"openai_api_key": {"display_name": "OpenAI API Key", "password": True},
"openai_api_type": {"display_name": "OpenAI API Type", "advanced": True, "password": True},
"openai_api_version": {
"display_name": "OpenAI API Version",
"advanced": True,
},
"openai_organization": {
"display_name": "OpenAI Organization",
"advanced": True,
},
"openai_proxy": {"display_name": "OpenAI Proxy", "advanced": True},
"request_timeout": {"display_name": "Request Timeout", "advanced": True},
"show_progress_bar": {
"display_name": "Show Progress Bar",
"advanced": True,
},
"skip_empty": {"display_name": "Skip Empty", "advanced": True},
"tiktoken_model_name": {"display_name": "TikToken Model Name"},
"tikToken_enable": {"display_name": "TikToken Enable"},
}
def build(
self,
default_headers: Optional[Dict[str, str]] = None,
default_query: Optional[NestedDict] = {},
allowed_special: List[str] = [],
disallowed_special: List[str] = ["all"],
chunk_size: int = 1000,
client: Optional[Any] = None,
deployment: str = "text-embedding-ada-002",
embedding_ctx_length: int = 8191,
max_retries: int = 6,
model: str = "text-embedding-ada-002",
model_kwargs: NestedDict = {},
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = "",
openai_api_type: Optional[str] = None,
openai_api_version: Optional[str] = None,
openai_organization: Optional[str] = None,
openai_proxy: Optional[str] = None,
request_timeout: Optional[float] = None,
show_progress_bar: bool = False,
skip_empty: bool = False,
tikToken_enable: bool = True,
tiktoken_model_name: Optional[str] = None,
) -> Union[OpenAIEmbeddings, Callable]:
return OpenAIEmbeddings(
tiktoken_enabled=tikToken_enable,
default_headers=default_headers,
default_query=default_query,
allowed_special=set(allowed_special),
disallowed_special=set(disallowed_special),
chunk_size=chunk_size,
client=client,
deployment=deployment,
embedding_ctx_length=embedding_ctx_length,
max_retries=max_retries,
model=model,
model_kwargs=model_kwargs,
base_url=openai_api_base,
api_key=openai_api_key,
openai_api_type=openai_api_type,
api_version=openai_api_version,
organization=openai_organization,
openai_proxy=openai_proxy,
timeout=request_timeout,
show_progress_bar=show_progress_bar,
skip_empty=skip_empty,
tiktoken_model_name=tiktoken_model_name,
)

View file

@ -0,0 +1,60 @@
from langflow import CustomComponent
from langchain.embeddings import VertexAIEmbeddings
from typing import Optional, List
class VertexAIEmbeddingsComponent(CustomComponent):
display_name = "VertexAIEmbeddings"
description = "Google Cloud VertexAI embedding models."
def build_config(self):
return {
"credentials": {"display_name": "Credentials", "value": "", "file_types": [".json"], "field_type": "file"},
"instance": {"display_name": "instance", "advanced": True, "field_type": "dict"},
"location": {"display_name": "Location", "value": "us-central1", "advanced": True},
"max_output_tokens": {"display_name": "Max Output Tokens", "value": 128},
"max_retries": {"display_name": "Max Retries", "value": 6, "advanced": True},
"model_name": {"display_name": "Model Name", "value": "textembedding-gecko"},
"n": {"display_name": "N", "value": 1, "advanced": True},
"project": {"display_name": "Project", "advanced": True},
"request_parallelism": {"display_name": "Request Parallelism", "value": 5, "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "value": False, "advanced": True},
"temperature": {"display_name": "Temperature", "value": 0.0},
"top_k": {"display_name": "Top K", "value": 40, "advanced": True},
"top_p": {"display_name": "Top P", "value": 0.95, "advanced": True},
}
def build(
self,
instance: Optional[str] = None,
credentials: Optional[str] = None,
location: str = "us-central1",
max_output_tokens: int = 128,
max_retries: int = 6,
model_name: str = "textembedding-gecko",
n: int = 1,
project: Optional[str] = None,
request_parallelism: int = 5,
stop: Optional[List[str]] = None,
streaming: bool = False,
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
) -> VertexAIEmbeddings:
return VertexAIEmbeddings(
instance=instance,
credentials=credentials,
location=location,
max_output_tokens=max_output_tokens,
max_retries=max_retries,
model_name=model_name,
n=n,
project=project,
request_parallelism=request_parallelism,
stop=stop,
streaming=streaming,
temperature=temperature,
top_k=top_k,
top_p=top_p,
)

View file

@ -2,7 +2,6 @@ from typing import Optional
from langchain.llms.base import BaseLLM
from langchain.llms.bedrock import Bedrock
from langflow import CustomComponent
@ -44,7 +43,7 @@ class AmazonBedrockComponent(CustomComponent):
model_kwargs: Optional[dict] = None,
endpoint_url: Optional[str] = None,
streaming: bool = False,
cache: bool | None = None,
cache: Optional[bool] = None,
) -> BaseLLM:
try:
output = Bedrock(

View file

@ -0,0 +1,48 @@
from typing import Optional
from langchain_community.llms.anthropic import Anthropic
from pydantic.v1 import SecretStr
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, NestedDict
class AnthropicComponent(CustomComponent):
display_name = "Anthropic"
description = "Anthropic large language models."
def build_config(self):
return {
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"type": str,
"password": True,
},
"anthropic_api_url": {
"display_name": "Anthropic API URL",
"type": str,
},
"model_kwargs": {
"display_name": "Model Kwargs",
"field_type": "NestedDict",
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
},
}
def build(
self,
anthropic_api_key: str,
anthropic_api_url: str,
model_kwargs: NestedDict = {},
temperature: Optional[float] = None,
) -> BaseLanguageModel:
return Anthropic(
anthropic_api_key=SecretStr(anthropic_api_key),
anthropic_api_url=anthropic_api_url,
model_kwargs=model_kwargs,
temperature=temperature,
)

View file

@ -1,6 +1,6 @@
from typing import Optional
from langchain.chat_models.anthropic import ChatAnthropic
from langchain_community.chat_models.anthropic import ChatAnthropic
from langchain.llms.base import BaseLanguageModel
from pydantic.v1 import SecretStr

View file

@ -1,13 +1,14 @@
from typing import Optional
from langflow import CustomComponent
from langchain.llms.base import BaseLanguageModel
from langchain.chat_models.azure_openai import AzureChatOpenAI
from langchain_community.chat_models.azure_openai import AzureChatOpenAI
class AzureChatOpenAIComponent(CustomComponent):
display_name: str = "AzureChatOpenAI"
description: str = "LLM model from Azure OpenAI."
documentation: str = "https://python.langchain.com/docs/integrations/llms/azure_openai"
beta = False
AZURE_OPENAI_MODELS = [
"gpt-35-turbo",
@ -18,11 +19,21 @@ class AzureChatOpenAIComponent(CustomComponent):
"gpt-4-vision",
]
AZURE_OPENAI_API_VERSIONS = [
"2023-03-15-preview",
"2023-05-15",
"2023-06-01-preview",
"2023-07-01-preview",
"2023-08-01-preview",
"2023-09-01-preview",
"2023-12-01-preview",
]
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"value": "gpt-35-turbo",
"value": self.AZURE_OPENAI_MODELS[0],
"options": self.AZURE_OPENAI_MODELS,
"required": True,
},
@ -37,7 +48,8 @@ class AzureChatOpenAIComponent(CustomComponent):
},
"api_version": {
"display_name": "API Version",
"value": "2023-05-15",
"options": self.AZURE_OPENAI_API_VERSIONS,
"value": self.AZURE_OPENAI_API_VERSIONS[-1],
"required": True,
"advanced": True,
},
@ -54,6 +66,7 @@ class AzureChatOpenAIComponent(CustomComponent):
"required": False,
"field_type": "int",
"advanced": True,
"info": "Maximum number of tokens to generate.",
},
"code": {"show": False},
}
@ -64,16 +77,20 @@ class AzureChatOpenAIComponent(CustomComponent):
azure_endpoint: str,
azure_deployment: str,
api_key: str,
api_version: str = "2023-05-15",
api_version: str,
temperature: float = 0.7,
max_tokens: Optional[int] = 1000,
) -> BaseLanguageModel:
return AzureChatOpenAI(
model=model,
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
api_version=api_version,
api_key=api_key,
temperature=temperature,
max_tokens=max_tokens,
)
try:
llm = AzureChatOpenAI(
model=model,
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
api_version=api_version,
api_key=api_key,
temperature=temperature,
max_tokens=max_tokens,
)
except Exception as e:
raise ValueError("Could not connect to AzureOpenAI API.") from e
return llm

View file

@ -1,6 +1,6 @@
from typing import Optional
from langchain.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from langchain.llms.base import BaseLLM
from pydantic.v1 import SecretStr

View file

@ -0,0 +1,33 @@
from typing import Dict, Optional
from langchain_community.llms.ctransformers import CTransformers
from langflow import CustomComponent
class CTransformersComponent(CustomComponent):
display_name = "CTransformers"
description = "C Transformers LLM models"
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
def build_config(self):
return {
"model": {"display_name": "Model", "required": True},
"model_file": {
"display_name": "Model File",
"required": False,
"field_type": "file",
"file_types": [".bin"],
},
"model_type": {"display_name": "Model Type", "required": True},
"config": {
"display_name": "Config",
"advanced": True,
"required": False,
"field_type": "dict",
"value": '{"top_k":40,"top_p":0.95,"temperature":0.8,"repetition_penalty":1.1,"last_n_tokens":64,"seed":-1,"max_new_tokens":256,"stop":"","stream":"False","reset":"True","batch_size":8,"threads":-1,"context_length":-1,"gpu_layers":0}',
},
}
def build(self, model: str, model_file: str, model_type: str, config: Optional[Dict] = None) -> CTransformers:
return CTransformers(model=model, model_file=model_file, model_type=model_type, config=config) # type: ignore

View file

@ -0,0 +1,47 @@
from pydantic import SecretStr
from langflow import CustomComponent
from typing import Optional, Union, Callable
from langflow.field_typing import BaseLanguageModel
from langchain_community.chat_models.anthropic import ChatAnthropic
class ChatAnthropicComponent(CustomComponent):
display_name = "ChatAnthropic"
description = "`Anthropic` chat large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/anthropic"
def build_config(self):
return {
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"field_type": "str",
"password": True,
},
"anthropic_api_url": {
"display_name": "Anthropic API URL",
"field_type": "str",
},
"model_kwargs": {
"display_name": "Model Kwargs",
"field_type": "dict",
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
},
}
def build(
self,
anthropic_api_key: str,
anthropic_api_url: Optional[str] = None,
model_kwargs: dict = {},
temperature: Optional[float] = None,
) -> Union[BaseLanguageModel, Callable]:
return ChatAnthropic(
anthropic_api_key=SecretStr(anthropic_api_key),
anthropic_api_url=anthropic_api_url,
model_kwargs=model_kwargs,
temperature=temperature,
)

View file

@ -1,8 +1,8 @@
from typing import Any, Dict, List, Optional
# from langchain_community.chat_models import ChatOllama
from langchain.chat_models import ChatOllama
from langchain.chat_models.base import BaseChatModel
from langchain_community.chat_models import ChatOllama
from langchain_core.language_models.chat_models import BaseChatModel
# from langchain.chat_models import ChatOllama
from langflow import CustomComponent

View file

@ -0,0 +1,84 @@
from typing import Optional, Union
from langchain.llms import BaseLLM
from langchain_community.chat_models.openai import ChatOpenAI
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel, NestedDict
class ChatOpenAIComponent(CustomComponent):
display_name = "ChatOpenAI"
description = "`OpenAI` Chat large language models API."
def build_config(self):
return {
"max_tokens": {
"display_name": "Max Tokens",
"field_type": "int",
"advanced": False,
"required": False,
},
"model_kwargs": {
"display_name": "Model Kwargs",
"field_type": "NestedDict",
"advanced": True,
"required": False,
},
"model_name": {
"display_name": "Model Name",
"field_type": "str",
"advanced": False,
"required": False,
"options": [
"gpt-4-1106-preview",
"gpt-4",
"gpt-4-32k",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
],
},
"openai_api_base": {
"display_name": "OpenAI API Base",
"field_type": "str",
"advanced": False,
"required": False,
"info": (
"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\n"
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
),
},
"openai_api_key": {
"display_name": "OpenAI API Key",
"field_type": "str",
"advanced": False,
"required": False,
"password": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"advanced": False,
"required": False,
"value": 0.7,
},
}
def build(
self,
max_tokens: Optional[int] = 256,
model_kwargs: NestedDict = {},
model_name: str = "gpt-4-1106-preview",
openai_api_base: Optional[str] = None,
openai_api_key: Optional[str] = None,
temperature: float = 0.7,
) -> Union[BaseLanguageModel, BaseLLM]:
if not openai_api_base:
openai_api_base = "https://api.openai.com/v1"
return ChatOpenAI(
max_tokens=max_tokens,
model_kwargs=model_kwargs,
model=model_name,
base_url=openai_api_base,
api_key=openai_api_key,
temperature=temperature,
)

View file

@ -0,0 +1,87 @@
from typing import List, Optional, Union
from langchain.llms import BaseLLM
from langchain_community.chat_models.vertexai import ChatVertexAI
from langchain_core.messages.base import BaseMessage
from langflow import CustomComponent
from langflow.field_typing import BaseLanguageModel
class ChatVertexAIComponent(CustomComponent):
display_name = "ChatVertexAI"
description = "`Vertex AI` Chat large language models API."
def build_config(self):
return {
"credentials": {
"display_name": "Credentials",
"field_type": "file",
"file_types": [".json"],
"file_path": None,
},
"examples": {
"display_name": "Examples",
"multiline": True,
},
"location": {
"display_name": "Location",
"value": "us-central1",
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"value": 128,
"advanced": True,
},
"model_name": {
"display_name": "Model Name",
"value": "chat-bison",
},
"project": {
"display_name": "Project",
},
"temperature": {
"display_name": "Temperature",
"value": 0.0,
},
"top_k": {
"display_name": "Top K",
"value": 40,
"advanced": True,
},
"top_p": {
"display_name": "Top P",
"value": 0.95,
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"value": False,
"advanced": True,
},
}
def build(
self,
credentials: Optional[str],
project: str,
examples: Optional[List[BaseMessage]] = [],
location: str = "us-central1",
max_output_tokens: int = 128,
model_name: str = "chat-bison",
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
verbose: bool = False,
) -> Union[BaseLanguageModel, BaseLLM]:
return ChatVertexAI(
credentials=credentials,
examples=examples,
location=location,
max_output_tokens=max_output_tokens,
model_name=model_name,
project=project,
temperature=temperature,
top_k=top_k,
top_p=top_p,
verbose=verbose,
)

View file

@ -0,0 +1,24 @@
from langchain_community.llms.cohere import Cohere
from langchain_core.language_models.base import BaseLanguageModel
from langflow import CustomComponent
class CohereComponent(CustomComponent):
display_name = "Cohere"
description = "Cohere large language models."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
def build_config(self):
return {
"cohere_api_key": {"display_name": "Cohere API Key", "type": "password", "password": True},
"max_tokens": {"display_name": "Max Tokens", "default": 256, "type": "int", "show": True},
"temperature": {"display_name": "Temperature", "default": 0.75, "type": "float", "show": True},
}
def build(
self,
cohere_api_key: str,
max_tokens: int = 256,
temperature: float = 0.75,
) -> BaseLanguageModel:
return Cohere(cohere_api_key=cohere_api_key, max_tokens=max_tokens, temperature=temperature) # type: ignore

View file

@ -0,0 +1,129 @@
from typing import Optional, List, Dict, Any
from langflow import CustomComponent
from langchain_community.llms.llamacpp import LlamaCpp
class LlamaCppComponent(CustomComponent):
display_name = "LlamaCpp"
description = "llama.cpp model."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
def build_config(self):
return {
"grammar": {"display_name": "Grammar", "advanced": True},
"cache": {"display_name": "Cache", "advanced": True},
"client": {"display_name": "Client", "advanced": True},
"echo": {"display_name": "Echo", "advanced": True},
"f16_kv": {"display_name": "F16 KV", "advanced": True},
"grammar_path": {"display_name": "Grammar Path", "advanced": True},
"last_n_tokens_size": {"display_name": "Last N Tokens Size", "advanced": True},
"logits_all": {"display_name": "Logits All", "advanced": True},
"logprobs": {"display_name": "Logprobs", "advanced": True},
"lora_base": {"display_name": "Lora Base", "advanced": True},
"lora_path": {"display_name": "Lora Path", "advanced": True},
"max_tokens": {"display_name": "Max Tokens", "advanced": True},
"metadata": {"display_name": "Metadata", "advanced": True},
"model_kwargs": {"display_name": "Model Kwargs", "advanced": True},
"model_path": {
"display_name": "Model Path",
"field_type": "file",
"file_types": [".bin"],
"required": True,
},
"n_batch": {"display_name": "N Batch", "advanced": True},
"n_ctx": {"display_name": "N Ctx", "advanced": True},
"n_gpu_layers": {"display_name": "N GPU Layers", "advanced": True},
"n_parts": {"display_name": "N Parts", "advanced": True},
"n_threads": {"display_name": "N Threads", "advanced": True},
"repeat_penalty": {"display_name": "Repeat Penalty", "advanced": True},
"rope_freq_base": {"display_name": "Rope Freq Base", "advanced": True},
"rope_freq_scale": {"display_name": "Rope Freq Scale", "advanced": True},
"seed": {"display_name": "Seed", "advanced": True},
"stop": {"display_name": "Stop", "advanced": True},
"streaming": {"display_name": "Streaming", "advanced": True},
"suffix": {"display_name": "Suffix", "advanced": True},
"tags": {"display_name": "Tags", "advanced": True},
"temperature": {"display_name": "Temperature"},
"top_k": {"display_name": "Top K", "advanced": True},
"top_p": {"display_name": "Top P", "advanced": True},
"use_mlock": {"display_name": "Use Mlock", "advanced": True},
"use_mmap": {"display_name": "Use Mmap", "advanced": True},
"verbose": {"display_name": "Verbose", "advanced": True},
"vocab_only": {"display_name": "Vocab Only", "advanced": True},
}
def build(
self,
model_path: str,
grammar: Optional[str] = None,
cache: Optional[bool] = None,
client: Optional[Any] = None,
echo: Optional[bool] = False,
f16_kv: bool = True,
grammar_path: Optional[str] = None,
last_n_tokens_size: Optional[int] = 64,
logits_all: bool = False,
logprobs: Optional[int] = None,
lora_base: Optional[str] = None,
lora_path: Optional[str] = None,
max_tokens: Optional[int] = 256,
metadata: Optional[Dict] = None,
model_kwargs: Dict = {},
n_batch: Optional[int] = 8,
n_ctx: int = 512,
n_gpu_layers: Optional[int] = 1,
n_parts: int = -1,
n_threads: Optional[int] = 1,
repeat_penalty: Optional[float] = 1.1,
rope_freq_base: float = 10000.0,
rope_freq_scale: float = 1.0,
seed: int = -1,
stop: Optional[List[str]] = [],
streaming: bool = True,
suffix: Optional[str] = "",
tags: Optional[List[str]] = [],
temperature: Optional[float] = 0.8,
top_k: Optional[int] = 40,
top_p: Optional[float] = 0.95,
use_mlock: bool = False,
use_mmap: Optional[bool] = True,
verbose: bool = True,
vocab_only: bool = False,
) -> LlamaCpp:
return LlamaCpp(
model_path=model_path,
grammar=grammar,
cache=cache,
client=client,
echo=echo,
f16_kv=f16_kv,
grammar_path=grammar_path,
last_n_tokens_size=last_n_tokens_size,
logits_all=logits_all,
logprobs=logprobs,
lora_base=lora_base,
lora_path=lora_path,
max_tokens=max_tokens,
metadata=metadata,
model_kwargs=model_kwargs,
n_batch=n_batch,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_parts=n_parts,
n_threads=n_threads,
repeat_penalty=repeat_penalty,
rope_freq_base=rope_freq_base,
rope_freq_scale=rope_freq_scale,
seed=seed,
stop=stop,
streaming=streaming,
suffix=suffix,
tags=tags,
temperature=temperature,
top_k=top_k,
top_p=top_p,
use_mlock=use_mlock,
use_mmap=use_mmap,
verbose=verbose,
vocab_only=vocab_only,
)

View file

@ -1,7 +1,7 @@
from typing import Optional, List
from typing import List, Optional
from langchain.llms import Ollama
from langchain.llms.base import BaseLLM
from langchain_community.llms.ollama import Ollama
from langflow import CustomComponent
@ -133,30 +133,25 @@ class OllamaLLM(CustomComponent):
mirostat_eta = None
mirostat_tau = None
llm_params = {
"base_url": base_url,
"model": model,
"mirostat": mirostat_value,
"mirostat_eta": mirostat_eta,
"mirostat_tau": mirostat_tau,
"num_ctx": num_ctx,
"num_gpu": num_gpu,
"num_thread": num_thread,
"repeat_last_n": repeat_last_n,
"repeat_penalty": repeat_penalty,
"temperature": temperature,
"stop": stop,
"tfs_z": tfs_z,
"top_k": top_k,
"top_p": top_p,
}
# None Value remove
llm_params = {k: v for k, v in llm_params.items() if v is not None}
try:
llm = Ollama(**llm_params)
llm = Ollama(
base_url=base_url,
model=model,
mirostat=mirostat_value,
mirostat_eta=mirostat_eta,
mirostat_tau=mirostat_tau,
num_ctx=num_ctx,
num_gpu=num_gpu,
num_thread=num_thread,
repeat_last_n=repeat_last_n,
repeat_penalty=repeat_penalty,
temperature=temperature,
stop=stop,
tfs_z=tfs_z,
top_k=top_k,
top_p=top_p,
)
except Exception as e:
raise ValueError("Could not connect to Ollama.") from e

View file

@ -0,0 +1,147 @@
from langflow import CustomComponent
from langchain.llms import BaseLLM
from typing import Optional, Union, Callable, Dict
from langchain_community.llms.vertexai import VertexAI
class VertexAIComponent(CustomComponent):
display_name = "VertexAI"
description = "Google Vertex AI large language models"
def build_config(self):
return {
"credentials": {
"display_name": "Credentials",
"field_type": "file",
"file_types": [".json"],
"required": False,
"value": None,
},
"location": {
"display_name": "Location",
"type": "str",
"advanced": True,
"value": "us-central1",
"required": False,
},
"max_output_tokens": {
"display_name": "Max Output Tokens",
"field_type": "int",
"value": 128,
"required": False,
"advanced": True,
},
"max_retries": {
"display_name": "Max Retries",
"type": "int",
"value": 6,
"required": False,
"advanced": True,
},
"metadata": {
"display_name": "Metadata",
"field_type": "dict",
"required": False,
"default": {},
},
"model_name": {
"display_name": "Model Name",
"type": "str",
"value": "text-bison",
"required": False,
},
"n": {
"advanced": True,
"display_name": "N",
"field_type": "int",
"value": 1,
"required": False,
},
"project": {
"display_name": "Project",
"type": "str",
"required": False,
"default": None,
},
"request_parallelism": {
"display_name": "Request Parallelism",
"field_type": "int",
"value": 5,
"required": False,
"advanced": True,
},
"streaming": {
"display_name": "Streaming",
"field_type": "bool",
"value": False,
"required": False,
"advanced": True,
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.0,
"required": False,
"advanced": True,
},
"top_k": {"display_name": "Top K", "type": "int", "default": 40, "required": False, "advanced": True},
"top_p": {
"display_name": "Top P",
"field_type": "float",
"value": 0.95,
"required": False,
"advanced": True,
},
"tuned_model_name": {
"display_name": "Tuned Model Name",
"type": "str",
"required": False,
"value": None,
"advanced": True,
},
"verbose": {
"display_name": "Verbose",
"field_type": "bool",
"value": False,
"required": False,
},
"name": {"display_name": "Name", "field_type": "str"},
}
def build(
self,
credentials: Optional[str] = None,
location: str = "us-central1",
max_output_tokens: int = 128,
max_retries: int = 6,
metadata: Dict = {},
model_name: str = "text-bison",
n: int = 1,
name: Optional[str] = None,
project: Optional[str] = None,
request_parallelism: int = 5,
streaming: bool = False,
temperature: float = 0.0,
top_k: int = 40,
top_p: float = 0.95,
tuned_model_name: Optional[str] = None,
verbose: bool = False,
) -> Union[BaseLLM, Callable]:
return VertexAI(
credentials=credentials,
location=location,
max_output_tokens=max_output_tokens,
max_retries=max_retries,
metadata=metadata,
model_name=model_name,
n=n,
name=name,
project=project,
request_parallelism=request_parallelism,
streaming=streaming,
temperature=temperature,
top_k=top_k,
top_p=top_p,
tuned_model_name=tuned_model_name,
verbose=verbose,
)

View file

@ -0,0 +1,49 @@
from typing import Callable, Optional, Union
from langchain.retrievers import MultiQueryRetriever
from langflow import CustomComponent
from langflow.field_typing import BaseLLM, BaseRetriever, PromptTemplate
class MultiQueryRetrieverComponent(CustomComponent):
display_name = "MultiQueryRetriever"
description = "Initialize from llm using default template."
documentation = "https://python.langchain.com/docs/modules/data_connection/retrievers/how_to/MultiQueryRetriever"
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"prompt": {
"display_name": "Prompt",
"default": {
"input_variables": ["question"],
"input_types": {},
"output_parser": None,
"partial_variables": {},
"template": "You are an AI language model assistant. Your task is \n"
"to generate 3 different versions of the given user \n"
"question to retrieve relevant documents from a vector database. \n"
"By generating multiple perspectives on the user question, \n"
"your goal is to help the user overcome some of the limitations \n"
"of distance-based similarity search. Provide these alternative \n"
"questions separated by newlines. Original question: {question}",
"template_format": "f-string",
"validate_template": False,
"_type": "prompt",
},
},
"retriever": {"display_name": "Retriever"},
"parser_key": {"display_name": "Parser Key", "default": "lines"},
}
def build(
self,
llm: BaseLLM,
retriever: BaseRetriever,
prompt: Optional[PromptTemplate] = None,
parser_key: str = "lines",
) -> Union[Callable, MultiQueryRetriever]:
if not prompt:
return MultiQueryRetriever.from_llm(llm=llm, retriever=retriever, parser_key=parser_key)
else:
return MultiQueryRetriever.from_llm(llm=llm, retriever=retriever, prompt=prompt, parser_key=parser_key)

View file

@ -15,29 +15,21 @@ class VectaraSelfQueryRetriverComponent(CustomComponent):
display_name: str = "Vectara Self Query Retriever for Vectara Vector Store"
description: str = "Implementation of Vectara Self Query Retriever"
documentation = (
"https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query"
)
documentation = "https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query"
beta = True
field_config = {
"code": {"show": True},
"vectorstore": {
"display_name": "Vector Store",
"info": "Input Vectara Vectore Store"
},
"llm": {
"display_name": "LLM",
"info": "For self query retriever"
},
"document_content_description":{
"display_name": "Document Content Description",
"vectorstore": {"display_name": "Vector Store", "info": "Input Vectara Vectore Store"},
"llm": {"display_name": "LLM", "info": "For self query retriever"},
"document_content_description": {
"display_name": "Document Content Description",
"info": "For self query retriever",
},
},
"metadata_field_info": {
"display_name": "Metadata Field Info",
"info": "Each metadata field info is a string in the form of key value pair dictionary containing additional search metadata.\nExample input: {\"name\":\"speech\",\"description\":\"what name of the speech\",\"type\":\"string or list[string]\"}.\nThe keys should remain constant(name, description, type)",
},
"display_name": "Metadata Field Info",
"info": 'Each metadata field info is a string in the form of key value pair dictionary containing additional search metadata.\nExample input: {"name":"speech","description":"what name of the speech","type":"string or list[string]"}.\nThe keys should remain constant(name, description, type)',
},
}
def build(
@ -47,24 +39,19 @@ class VectaraSelfQueryRetriverComponent(CustomComponent):
llm: BaseLanguageModel,
metadata_field_info: List[str],
) -> BaseRetriever:
metadata_field_obj = []
for meta in metadata_field_info:
meta_obj = json.loads(meta)
if 'name' not in meta_obj or 'description' not in meta_obj or 'type' not in meta_obj :
raise Exception('Incorrect metadata field info format.')
if "name" not in meta_obj or "description" not in meta_obj or "type" not in meta_obj:
raise Exception("Incorrect metadata field info format.")
attribute_info = AttributeInfo(
name = meta_obj['name'],
description = meta_obj['description'],
type = meta_obj['type'],
name=meta_obj["name"],
description=meta_obj["description"],
type=meta_obj["type"],
)
metadata_field_obj.append(attribute_info)
return SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_obj,
verbose=True
)
llm, vectorstore, document_content_description, metadata_field_obj, verbose=True
)

View file

@ -0,0 +1,30 @@
from langflow import CustomComponent
from langchain.text_splitter import CharacterTextSplitter
from langchain_core.documents.base import Document
from typing import List
class CharacterTextSplitterComponent(CustomComponent):
display_name = "CharacterTextSplitter"
description = "Splitting text that looks at characters."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"chunk_overlap": {"display_name": "Chunk Overlap", "default": 200},
"chunk_size": {"display_name": "Chunk Size", "default": 1000},
"separator": {"display_name": "Separator", "default": "\n"},
}
def build(
self,
documents: List[Document],
chunk_overlap: int = 200,
chunk_size: int = 1000,
separator: str = "\n",
) -> List[Document]:
return CharacterTextSplitter(
chunk_overlap=chunk_overlap,
chunk_size=chunk_size,
separator=separator,
).split_documents(documents)

View file

@ -1,7 +1,9 @@
from typing import Optional
from langflow import CustomComponent
from langchain.text_splitter import Language
from langchain.schema import Document
from langchain.text_splitter import Language
from langflow import CustomComponent
class LanguageRecursiveTextSplitterComponent(CustomComponent):
@ -48,7 +50,7 @@ class LanguageRecursiveTextSplitterComponent(CustomComponent):
documents: list[Document],
chunk_size: Optional[int] = 1000,
chunk_overlap: Optional[int] = 200,
separator_type: Optional[str] = "Python",
separator_type: str = "Python",
) -> list[Document]:
"""
Split text into chunks of a specified length.

View file

@ -0,0 +1,16 @@
from langflow import CustomComponent
from langchain_community.tools.json.tool import JsonSpec
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonToolkitComponent(CustomComponent):
display_name = "JsonToolkit"
description = "Toolkit for interacting with a JSON spec."
def build_config(self):
return {
"spec": {"display_name": "Spec", "type": JsonSpec},
}
def build(self, spec: JsonSpec) -> JsonToolkit:
return JsonToolkit(spec=spec)

View file

@ -0,0 +1,23 @@
from langflow import CustomComponent
from langflow.field_typing import AgentExecutor
from typing import Callable
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
class OpenAPIToolkitComponent(CustomComponent):
display_name = "OpenAPIToolkit"
description = "Toolkit for interacting with an OpenAPI API."
def build_config(self):
return {
"json_agent": {"display_name": "JSON Agent"},
"requests_wrapper": {"display_name": "Text Requests Wrapper"},
}
def build(
self,
json_agent: AgentExecutor,
requests_wrapper: TextRequestsWrapper,
) -> Callable:
return OpenAPIToolkit(json_agent=json_agent, requests_wrapper=requests_wrapper)

View file

@ -0,0 +1,26 @@
from typing import Callable, Union
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
from langchain_community.vectorstores import VectorStore
from langflow import CustomComponent
class VectorStoreInfoComponent(CustomComponent):
display_name = "VectorStoreInfo"
description = "Information about a VectorStore"
def build_config(self):
return {
"vectorstore": {"display_name": "VectorStore"},
"description": {"display_name": "Description", "multiline": True},
"name": {"display_name": "Name"},
}
def build(
self,
vectorstore: VectorStore,
description: str,
name: str,
) -> Union[VectorStoreInfo, Callable]:
return VectorStoreInfo(vectorstore=vectorstore, description=description, name=name)

View file

@ -0,0 +1,23 @@
from langflow import CustomComponent
from typing import List, Union
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
from langflow.field_typing import BaseLanguageModel, Tool
class VectorStoreRouterToolkitComponent(CustomComponent):
display_name = "VectorStoreRouterToolkit"
description = "Toolkit for routing between Vector Stores."
def build_config(self):
return {
"vectorstores": {"display_name": "Vector Stores"},
"llm": {"display_name": "LLM"},
}
def build(
self, vectorstores: List[VectorStoreInfo], llm: BaseLanguageModel
) -> Union[Tool, VectorStoreRouterToolkit]:
print("vectorstores", vectorstores)
print("llm", llm)
return VectorStoreRouterToolkit(vectorstores=vectorstores, llm=llm)

View file

@ -0,0 +1,28 @@
from langflow import CustomComponent
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreToolkit
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
from langflow.field_typing import (
BaseLanguageModel,
)
from langflow.field_typing import (
Tool,
)
from typing import Union
class VectorStoreToolkitComponent(CustomComponent):
display_name = "VectorStoreToolkit"
description = "Toolkit for interacting with a Vector Store."
def build_config(self):
return {
"vectorstore_info": {"display_name": "Vector Store Info"},
"llm": {"display_name": "LLM"},
}
def build(
self,
vectorstore_info: VectorStoreInfo,
llm: BaseLanguageModel,
) -> Union[Tool, VectorStoreToolkit]:
return VectorStoreToolkit(vectorstore_info=vectorstore_info, llm=llm)

View file

@ -0,0 +1,31 @@
from langflow import CustomComponent
# Assuming `BingSearchAPIWrapper` is a class that exists in the context
# and has the appropriate methods and attributes.
# We need to make sure this class is importable from the context where this code will be running.
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
class BingSearchAPIWrapperComponent(CustomComponent):
display_name = "BingSearchAPIWrapper"
description = "Wrapper for Bing Search API."
def build_config(self):
return {
"bing_search_url": {"display_name": "Bing Search URL"},
"bing_subscription_key": {
"display_name": "Bing Subscription Key",
"password": True,
},
"k": {"display_name": "Number of results", "advanced": True},
# 'k' is not included as it is not shown (show=False)
}
def build(
self,
bing_search_url: str,
bing_subscription_key: str,
k: int = 10,
) -> BingSearchAPIWrapper:
# 'k' has a default value and is not shown (show=False), so it is hardcoded here
return BingSearchAPIWrapper(bing_search_url=bing_search_url, bing_subscription_key=bing_subscription_key, k=k)

View file

@ -0,0 +1,21 @@
from langflow import CustomComponent
from typing import Union, Callable
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
class GoogleSearchAPIWrapperComponent(CustomComponent):
display_name = "GoogleSearchAPIWrapper"
description = "Wrapper for Google Search API."
def build_config(self):
return {
"google_api_key": {"display_name": "Google API Key", "password": True},
"google_cse_id": {"display_name": "Google CSE ID", "password": True},
}
def build(
self,
google_api_key: str,
google_cse_id: str,
) -> Union[GoogleSearchAPIWrapper, Callable]:
return GoogleSearchAPIWrapper(google_api_key=google_api_key, google_cse_id=google_cse_id)

View file

@ -0,0 +1,47 @@
from langflow import CustomComponent
from typing import Dict, Optional
# Assuming the existence of GoogleSerperAPIWrapper class in the serper module
# If this class does not exist, you would need to create it or import the appropriate class from another module
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
class GoogleSerperAPIWrapperComponent(CustomComponent):
display_name = "GoogleSerperAPIWrapper"
description = "Wrapper around the Serper.dev Google Search API."
def build_config(self) -> Dict[str, Dict]:
return {
"result_key_for_type": {
"display_name": "Result Key for Type",
"show": True,
"multiline": False,
"password": False,
"name": "result_key_for_type",
"advanced": False,
"dynamic": False,
"info": "",
"field_type": "dict",
"list": False,
"value": {"news": "news", "places": "places", "images": "images", "search": "organic"},
},
"serper_api_key": {
"display_name": "Serper API Key",
"show": True,
"multiline": False,
"password": True,
"name": "serper_api_key",
"advanced": False,
"dynamic": False,
"info": "",
"type": "str",
"list": False,
},
}
def build(
self,
serper_api_key: str,
result_key_for_type: Optional[Dict[str, str]] = None,
) -> GoogleSerperAPIWrapper:
return GoogleSerperAPIWrapper(result_key_for_type=result_key_for_type, serper_api_key=serper_api_key)

View file

@ -0,0 +1,33 @@
from langflow import CustomComponent
from typing import Optional, Dict
from langchain_community.utilities.searx_search import SearxSearchWrapper
class SearxSearchWrapperComponent(CustomComponent):
display_name = "SearxSearchWrapper"
description = "Wrapper for Searx API."
def build_config(self):
return {
"headers": {
"field_type": "dict",
"display_name": "Headers",
"multiline": True,
"value": '{"Authorization": "Bearer <token>"}',
},
"k": {"display_name": "k", "advanced": True, "field_type": "int", "value": 10},
"searx_host": {
"display_name": "Searx Host",
"field_type": "str",
"value": "https://searx.example.com",
"advanced": True,
},
}
def build(
self,
k: int = 10,
headers: Optional[Dict[str, str]] = None,
searx_host: str = "https://searx.example.com",
) -> SearxSearchWrapper:
return SearxSearchWrapper(headers=headers, k=k, searx_host=searx_host)

View file

@ -0,0 +1,31 @@
from typing import Callable, Union
from langchain_community.utilities.serpapi import SerpAPIWrapper
from langflow import CustomComponent
class SerpAPIWrapperComponent(CustomComponent):
display_name = "SerpAPIWrapper"
description = "Wrapper around SerpAPI"
def build_config(self):
return {
"serpapi_api_key": {"display_name": "SerpAPI API Key", "type": "str", "password": True},
"params": {
"display_name": "Parameters",
"type": "dict",
"advanced": True,
"multiline": True,
"value": '{"engine": "google","google_domain": "google.com","gl": "us","hl": "en"}',
},
}
def build(
self,
serpapi_api_key: str,
params: dict,
) -> Union[SerpAPIWrapper, Callable]: # Removed quotes around SerpAPIWrapper
return SerpAPIWrapper( # type: ignore
serpapi_api_key=serpapi_api_key,
params=params,
)

View file

@ -0,0 +1,30 @@
from typing import Callable, Union
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
from langflow import CustomComponent
# Assuming WikipediaAPIWrapper is a class that needs to be imported.
# The import statement is not included as it is not provided in the JSON
# and the actual implementation details are unknown.
class WikipediaAPIWrapperComponent(CustomComponent):
display_name = "WikipediaAPIWrapper"
description = "Wrapper around WikipediaAPI."
def build_config(self):
return {}
def build(
self,
top_k_results: int = 3,
lang: str = "en",
load_all_available_meta: bool = False,
doc_content_chars_max: int = 4000,
) -> Union[WikipediaAPIWrapper, Callable]:
return WikipediaAPIWrapper( # type: ignore
top_k_results=top_k_results,
lang=lang,
load_all_available_meta=load_all_available_meta,
doc_content_chars_max=doc_content_chars_max,
)

View file

@ -0,0 +1,18 @@
from typing import Callable, Union
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langflow import CustomComponent
# Since all the fields in the JSON have show=False, we will only create a basic component
# without any configurable fields.
class WolframAlphaAPIWrapperComponent(CustomComponent):
display_name = "WolframAlphaAPIWrapper"
description = "Wrapper for Wolfram Alpha."
def build_config(self):
return {"appid": {"display_name": "App ID", "type": "str", "password": True}}
def build(self, appid: str) -> Union[Callable, WolframAlphaAPIWrapper]:
return WolframAlphaAPIWrapper(wolfram_alpha_appid=appid) # type: ignore

View file

@ -3,9 +3,8 @@ from typing import List, Optional, Union
import chromadb # type: ignore
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores import Chroma
from langchain.vectorstores.base import VectorStore
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.chroma import Chroma
from langflow import CustomComponent

View file

@ -0,0 +1,26 @@
from typing import List, Union
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.faiss import FAISS
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings
class FAISSComponent(CustomComponent):
display_name = "FAISS"
description = "Construct FAISS wrapper from raw documents."
documentation = "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
}
def build(
self,
embedding: Embeddings,
documents: List[Document],
) -> Union[VectorStore, FAISS, BaseRetriever]:
return FAISS.from_documents(documents=documents, embedding=embedding)

View file

@ -0,0 +1,47 @@
from typing import List, Optional
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langflow import CustomComponent
from langflow.field_typing import (
Document,
Embeddings,
NestedDict,
)
class MongoDBAtlasComponent(CustomComponent):
display_name = "MongoDB Atlas"
description = "Construct a `MongoDB Atlas Vector Search` vector store from raw documents."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"collection_name": {"display_name": "Collection Name"},
"db_name": {"display_name": "Database Name"},
"index_name": {"display_name": "Index Name"},
"mongodb_atlas_cluster_uri": {"display_name": "MongoDB Atlas Cluster URI"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
}
def build(
self,
documents: List[Document],
embedding: Embeddings,
collection_name: str = "",
db_name: str = "",
index_name: str = "",
mongodb_atlas_cluster_uri: str = "",
search_kwargs: Optional[NestedDict] = None,
) -> MongoDBAtlasVectorSearch:
search_kwargs = search_kwargs or {}
return MongoDBAtlasVectorSearch(
documents=documents,
embedding=embedding,
collection_name=collection_name,
db_name=db_name,
index_name=index_name,
mongodb_atlas_cluster_uri=mongodb_atlas_cluster_uri,
search_kwargs=search_kwargs,
)

View file

@ -0,0 +1,62 @@
import os
from typing import List, Optional, Union
import pinecone # type: ignore
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.pinecone import Pinecone
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings
class PineconeComponent(CustomComponent):
display_name = "Pinecone"
description = "Construct Pinecone wrapper from raw documents."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"index_name": {"display_name": "Index Name"},
"namespace": {"display_name": "Namespace"},
"pinecone_api_key": {"display_name": "Pinecone API Key", "default": "", "password": True, "required": True},
"pinecone_env": {"display_name": "Pinecone Environment", "default": "", "required": True},
"search_kwargs": {"display_name": "Search Kwargs", "default": "{}"},
"pool_threads": {"display_name": "Pool Threads", "default": 1, "advanced": True},
}
def build(
self,
embedding: Embeddings,
pinecone_env: str,
documents: List[Document],
index_name: Optional[str] = None,
pinecone_api_key: Optional[str] = None,
text_key: Optional[str] = "text",
namespace: Optional[str] = "default",
pool_threads: Optional[int] = None,
) -> Union[VectorStore, Pinecone, BaseRetriever]:
if pinecone_api_key is None or pinecone_env is None:
raise ValueError("Pinecone API Key and Environment are required.")
if os.getenv("PINECONE_API_KEY") is None and pinecone_api_key is None:
raise ValueError("Pinecone API Key is required.")
pinecone.init(api_key=pinecone_api_key, environment=pinecone_env) # type: ignore
if documents:
return Pinecone.from_documents(
documents=documents,
embedding=embedding,
index_name=index_name,
pool_threads=pool_threads,
namespace=namespace,
text_key=text_key,
)
return Pinecone.from_existing_index(
index_name=index_name,
embedding=embedding,
text_key=text_key,
namespace=namespace,
pool_threads=pool_threads,
)

View file

@ -0,0 +1,76 @@
from typing import List, Optional, Union
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.qdrant import Qdrant
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings, NestedDict
class QdrantComponent(CustomComponent):
display_name = "Qdrant"
description = "Construct Qdrant wrapper from a list of texts."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"api_key": {"display_name": "API Key", "password": True},
"collection_name": {"display_name": "Collection Name"},
"content_payload_key": {"display_name": "Content Payload Key", "advanced": True},
"distance_func": {"display_name": "Distance Function", "advanced": True},
"grpc_port": {"display_name": "gRPC Port", "advanced": True},
"host": {"display_name": "Host", "advanced": True},
"https": {"display_name": "HTTPS", "advanced": True},
"location": {"display_name": "Location", "advanced": True},
"metadata_payload_key": {"display_name": "Metadata Payload Key", "advanced": True},
"path": {"display_name": "Path", "advanced": True},
"port": {"display_name": "Port", "advanced": True},
"prefer_grpc": {"display_name": "Prefer gRPC", "advanced": True},
"prefix": {"display_name": "Prefix", "advanced": True},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"timeout": {"display_name": "Timeout", "advanced": True},
"url": {"display_name": "URL", "advanced": True},
}
def build(
self,
embedding: Embeddings,
documents: List[Document],
api_key: Optional[str] = None,
collection_name: Optional[str] = None,
content_payload_key: str = "page_content",
distance_func: str = "Cosine",
grpc_port: Optional[int] = 6334,
host: Optional[str] = None,
https: bool = False,
location: str = ":memory:",
metadata_payload_key: str = "metadata",
path: Optional[str] = None,
port: Optional[int] = 6333,
prefer_grpc: bool = False,
prefix: Optional[str] = None,
search_kwargs: Optional[NestedDict] = None,
timeout: Optional[float] = None,
url: Optional[str] = None,
) -> Union[VectorStore, Qdrant, BaseRetriever]:
return Qdrant.from_documents(
documents=documents,
embedding=embedding,
api_key=api_key,
collection_name=collection_name,
content_payload_key=content_payload_key,
distance_func=distance_func,
grpc_port=grpc_port,
host=host,
https=https,
location=location,
metadata_payload_key=metadata_payload_key,
path=path,
port=port,
prefer_grpc=prefer_grpc,
prefix=prefix,
search_kwargs=search_kwargs,
timeout=timeout,
url=url,
)

View file

@ -1,11 +1,13 @@
from typing import Optional, List, Union
from langflow import CustomComponent
from langchain.vectorstores.redis import Redis
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever
from langchain.schema import Document
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.redis import Redis
from langflow import CustomComponent
class RedisComponent(CustomComponent):
"""

View file

@ -0,0 +1,44 @@
from typing import List, Union
from langchain.schema import BaseRetriever
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.supabase import SupabaseVectorStore
from langflow import CustomComponent
from langflow.field_typing import Document, Embeddings, NestedDict
from supabase.client import Client, create_client
class SupabaseComponent(CustomComponent):
display_name = "Supabase"
description = "Return VectorStore initialized from texts and embeddings."
def build_config(self):
return {
"documents": {"display_name": "Documents"},
"embedding": {"display_name": "Embedding"},
"query_name": {"display_name": "Query Name"},
"search_kwargs": {"display_name": "Search Kwargs", "advanced": True},
"supabase_service_key": {"display_name": "Supabase Service Key"},
"supabase_url": {"display_name": "Supabase URL"},
"table_name": {"display_name": "Table Name", "advanced": True},
}
def build(
self,
embedding: Embeddings,
documents: List[Document],
query_name: str = "",
search_kwargs: NestedDict = {},
supabase_service_key: str = "",
supabase_url: str = "",
table_name: str = "",
) -> Union[VectorStore, SupabaseVectorStore, BaseRetriever]:
supabase: Client = create_client(supabase_url, supabase_key=supabase_service_key)
return SupabaseVectorStore.from_documents(
documents=documents,
embedding=embedding,
query_name=query_name,
search_kwargs=search_kwargs,
client=supabase,
table_name=table_name,
)

View file

@ -1,14 +1,13 @@
from typing import Optional, Union, List
from langflow import CustomComponent
import tempfile
import urllib.request
import urllib
import urllib.request
from typing import List, Optional, Union
from langchain.vectorstores import Vectara
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.schema import BaseRetriever
from langchain.embeddings import FakeEmbeddings
from langchain.schema import BaseRetriever, Document
from langchain_community.vectorstores import Vectara, VectorStore
from langflow import CustomComponent
class VectaraComponent(CustomComponent):

View file

@ -1,12 +1,11 @@
import weaviate # type: ignore
from typing import Optional, Union
from langflow import CustomComponent
from langchain.vectorstores import Weaviate
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.schema import BaseRetriever
import weaviate # type: ignore
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain_community.vectorstores import VectorStore, Weaviate
from langflow import CustomComponent
class WeaviateVectorStore(CustomComponent):
@ -45,7 +44,7 @@ class WeaviateVectorStore(CustomComponent):
search_by_text: bool = False,
api_key: Optional[str] = None,
index_name: Optional[str] = None,
text_key: Optional[str] = "text",
text_key: str = "text",
embedding: Optional[Embeddings] = None,
documents: Optional[Document] = None,
attributes: Optional[list] = None,

View file

@ -1,10 +1,10 @@
from typing import Optional, List
from langflow import CustomComponent
from typing import List, Optional
from langchain.vectorstores.pgvector import PGVector
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain_community.vectorstores import VectorStore
from langchain_community.vectorstores.pgvector import PGVector
from langflow import CustomComponent
class PostgresqlVectorComponent(CustomComponent):

View file

@ -106,6 +106,8 @@ embeddings:
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/google_vertex_ai_palm"
AmazonBedrockEmbeddings:
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/bedrock"
OllamaEmbeddings:
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/ollama"
llms:
OpenAI:
@ -274,6 +276,8 @@ vectorstores:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
Pinecone:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/pinecone"
ElasticsearchStore:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/elasticsearch"
SupabaseVectorStore:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/supabase"
MongoDBAtlasVectorSearch:

View file

@ -12,7 +12,7 @@ from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.memory import BaseMemory
from langchain.text_splitter import TextSplitter
from langchain.tools import Tool
from langchain.vectorstores.base import VectorStore
from langchain_community.vectorstores import VectorStore
# Type alias for more complex dicts
NestedDict = Dict[str, Union[str, Dict]]

View file

@ -12,7 +12,6 @@ from langflow.interface.retrievers.base import retriever_creator
from langflow.interface.text_splitters.base import textsplitter_creator
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
from langflow.utils.lazy_load import LazyLoadDictBase
@ -46,7 +45,7 @@ class VertexTypesDict(LazyLoadDictBase):
**{t: types.LLMVertex for t in llm_creator.to_list()},
**{t: types.MemoryVertex for t in memory_creator.to_list()},
**{t: types.EmbeddingVertex for t in embedding_creator.to_list()},
**{t: types.VectorStoreVertex for t in vectorstore_creator.to_list()},
# **{t: types.VectorStoreVertex for t in vectorstore_creator.to_list()},
**{t: types.DocumentLoaderVertex for t in documentloader_creator.to_list()},
**{t: types.TextSplitterVertex for t in textsplitter_creator.to_list()},
**{t: types.OutputParserVertex for t in output_parser_creator.to_list()},

View file

@ -2,14 +2,10 @@ from typing import Any, Optional
from langchain.agents import AgentExecutor, ZeroShotAgent
from langchain.agents.agent_toolkits import (
SQLDatabaseToolkit,
VectorStoreInfo,
VectorStoreRouterToolkit,
VectorStoreToolkit,
)
from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
from langchain.agents.agent_toolkits.sql.prompt import SQL_PREFIX, SQL_SUFFIX
from langchain.agents.agent_toolkits.vectorstore.prompt import PREFIX as VECTORSTORE_PREFIX
from langchain.agents.agent_toolkits.vectorstore.prompt import ROUTER_PREFIX as VECTORSTORE_ROUTER_PREFIX
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
@ -17,9 +13,14 @@ from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.sql_database import SQLDatabase
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain_community.agent_toolkits import SQLDatabaseToolkit
from langchain_community.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
from langchain_community.agent_toolkits.sql.prompt import SQL_PREFIX, SQL_SUFFIX
from langchain_experimental.agents.agent_toolkits.pandas.prompt import PREFIX as PANDAS_PREFIX
from langchain_experimental.agents.agent_toolkits.pandas.prompt import SUFFIX_WITH_DF as PANDAS_SUFFIX
from langchain_experimental.tools.python.tool import PythonAstREPLTool
from langflow.interface.base import CustomAgentExecutor

View file

@ -67,7 +67,9 @@ Human: {input}
class MidJourneyPromptChain(BaseCustomConversationChain):
"""MidJourneyPromptChain is a chain you can use to generate new MidJourney prompts."""
template: Optional[str] = """I want you to act as a prompt generator for Midjourney's artificial intelligence program.
template: Optional[
str
] = """I want you to act as a prompt generator for Midjourney's artificial intelligence program.
Your job is to provide detailed and creative descriptions that will inspire unique and interesting images from the AI.
Keep in mind that the AI is capable of understanding a wide range of language and can interpret abstract concepts, so feel free to be as imaginative and descriptive as possible.
For example, you could describe a scene from a futuristic city, or a surreal landscape filled with strange creatures.
@ -81,7 +83,9 @@ class MidJourneyPromptChain(BaseCustomConversationChain):
class TimeTravelGuideChain(BaseCustomConversationChain):
template: Optional[str] = """I want you to act as my time travel guide. You are helpful and creative. I will provide you with the historical period or future time I want to visit and you will suggest the best events, sights, or people to experience. Provide the suggestions and any necessary information.
template: Optional[
str
] = """I want you to act as my time travel guide. You are helpful and creative. I will provide you with the historical period or future time I want to visit and you will suggest the best events, sights, or people to experience. Provide the suggestions and any necessary information.
Current conversation:
{history}
Human: {input}

View file

@ -66,18 +66,18 @@ class DirectoryReader:
def filter_loaded_components(self, data: dict, with_errors: bool) -> dict:
from langflow.interface.custom.utils import build_component
items = [
{
"name": menu["name"],
"path": menu["path"],
"components": [
(*build_component(component), component)
for component in menu["components"]
if (component["error"] if with_errors else not component["error"])
],
}
for menu in data["menu"]
]
items = []
for menu in data["menu"]:
components = []
for component in menu["components"]:
try:
if component["error"] if with_errors else not component["error"]:
component_tuple = (*build_component(component), component)
components.append(component_tuple)
except Exception as e:
logger.error(f"Error while loading component: {e}")
continue
items.append({"name": menu["name"], "path": menu["path"], "components": components})
filtered = [menu for menu in items if menu["components"]]
logger.debug(f'Filtered components {"with errors" if with_errors else ""}: {len(filtered)}')
return {"menu": filtered}

View file

@ -1,9 +1,10 @@
import inspect
from typing import Any
from langchain import document_loaders, embeddings, llms, memory, requests, text_splitter
from langchain.agents import agent_toolkits
from langchain.chat_models import AzureChatOpenAI, ChatAnthropic, ChatOpenAI, ChatVertexAI
from langchain import llms, memory, requests, text_splitter
from langchain_community.chat_models import AzureChatOpenAI, ChatAnthropic, ChatOpenAI, ChatVertexAI
from langchain_community import agent_toolkits, document_loaders, embeddings
from langflow.interface.agents.custom import CUSTOM_AGENTS
from langflow.interface.chains.custom import CUSTOM_CHAINS
from langflow.interface.importing.utils import import_class
@ -24,14 +25,14 @@ llm_type_to_cls_dict["vertexai-chat"] = ChatVertexAI # type: ignore
# Toolkits
toolkit_type_to_loader_dict: dict[str, Any] = {
toolkit_name: import_class(f"langchain.agents.agent_toolkits.{toolkit_name}")
toolkit_name: import_class(f"langchain_community.agent_toolkits.{toolkit_name}")
# if toolkit_name is lower case it is a loader
for toolkit_name in agent_toolkits.__all__
if toolkit_name.islower()
}
toolkit_type_to_cls_dict: dict[str, Any] = {
toolkit_name: import_class(f"langchain.agents.agent_toolkits.{toolkit_name}")
toolkit_name: import_class(f"langchain_community.agent_toolkits.{toolkit_name}")
# if toolkit_name is not lower case it is a class
for toolkit_name in agent_toolkits.__all__
if not toolkit_name.islower()
@ -47,13 +48,14 @@ wrapper_type_to_cls_dict: dict[str, Any] = {wrapper.__name__: wrapper for wrappe
# Embeddings
embedding_type_to_cls_dict: dict[str, Any] = {
embedding_name: import_class(f"langchain.embeddings.{embedding_name}") for embedding_name in embeddings.__all__
embedding_name: import_class(f"langchain_community.embeddings.{embedding_name}")
for embedding_name in embeddings.__all__
}
# Document Loaders
documentloaders_type_to_cls_dict: dict[str, Any] = {
documentloader_name: import_class(f"langchain.document_loaders.{documentloader_name}")
documentloader_name: import_class(f"langchain_community.document_loaders.{documentloader_name}")
for documentloader_name in document_loaders.__all__
}

View file

@ -6,9 +6,9 @@ from typing import Any, Type
from langchain.agents import Agent
from langchain.base_language import BaseLanguageModel
from langchain.chains.base import Chain
from langchain.chat_models.base import BaseChatModel
from langchain.prompts import PromptTemplate
from langchain.tools import BaseTool
from langchain_core.language_models.chat_models import BaseChatModel
from langflow.interface.custom.custom_component import CustomComponent
from langflow.interface.wrappers.base import wrapper_creator
from langflow.utils import validate
@ -71,7 +71,7 @@ def import_output_parser(output_parser: str) -> Any:
def import_chat_llm(llm: str) -> BaseChatModel:
"""Import chat llm from llm name"""
return import_class(f"langchain.chat_models.{llm}")
return import_class(f"langchain_community.chat_models.{llm}")
def import_retriever(retriever: str) -> Any:
@ -148,17 +148,17 @@ def import_chain(chain: str) -> Type[Chain]:
def import_embedding(embedding: str) -> Any:
"""Import embedding from embedding name"""
return import_class(f"langchain.embeddings.{embedding}")
return import_class(f"langchain_community.embeddings.{embedding}")
def import_vectorstore(vectorstore: str) -> Any:
"""Import vectorstore from vectorstore name"""
return import_class(f"langchain.vectorstores.{vectorstore}")
return import_class(f"langchain_community.vectorstores.{vectorstore}")
def import_documentloader(documentloader: str) -> Any:
"""Import documentloader from documentloader name"""
return import_class(f"langchain.document_loaders.{documentloader}")
return import_class(f"langchain_community.document_loaders.{documentloader}")
def import_textsplitter(textsplitter: str) -> Any:
@ -169,8 +169,8 @@ def import_textsplitter(textsplitter: str) -> Any:
def import_utility(utility: str) -> Any:
"""Import utility from utility name"""
if utility == "SQLDatabase":
return import_class(f"langchain.sql_database.{utility}")
return import_class(f"langchain.utilities.{utility}")
return import_class(f"langchain_community.sql_database.{utility}")
return import_class(f"langchain_community.utilities.{utility}")
def get_function(code):

View file

@ -10,7 +10,7 @@ from langchain.agents.tools import BaseTool
from langchain.chains.base import Chain
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain_community.vectorstores import VectorStore
from loguru import logger
from pydantic import ValidationError

View file

@ -1,17 +1,18 @@
from typing import Any, Callable, Dict, Type
from langchain.vectorstores import (
Pinecone,
Qdrant,
Chroma,
FAISS,
Weaviate,
SupabaseVectorStore,
MongoDBAtlasVectorSearch,
)
from langchain.schema import Document
import os
from typing import Any, Callable, Dict, Type
import orjson
from langchain.schema import Document
from langchain_community.vectorstores import (
FAISS,
Chroma,
ElasticsearchStore,
MongoDBAtlasVectorSearch,
Pinecone,
Qdrant,
SupabaseVectorStore,
Weaviate,
)
def docs_in_params(params: dict) -> bool:
@ -26,8 +27,8 @@ def initialize_mongodb(class_object: Type[MongoDBAtlasVectorSearch], params: dic
MONGODB_ATLAS_CLUSTER_URI = params.pop("mongodb_atlas_cluster_uri")
if not MONGODB_ATLAS_CLUSTER_URI:
raise ValueError("Mongodb atlas cluster uri must be provided in the params")
from pymongo import MongoClient
import certifi
from pymongo import MongoClient
client: MongoClient = MongoClient(MONGODB_ATLAS_CLUSTER_URI, tlsCAFile=certifi.where())
db_name = params.pop("db_name", None)
@ -226,11 +227,34 @@ def initialize_qdrant(class_object: Type[Qdrant], params: dict):
return class_object.from_documents(**params)
def initialize_elasticsearch(class_object: Type[ElasticsearchStore], params: dict):
"""Initialize elastic and return the class object"""
if "index_name" not in params:
raise ValueError("Elasticsearch Index must be provided in the params")
if "es_url" not in params:
raise ValueError("Elasticsearch URL must be provided in the params")
if not docs_in_params(params):
existing_index_params = {
"embedding": params.pop("embedding"),
}
if "index_name" in params:
existing_index_params["index_name"] = params.pop("index_name")
if "es_url" in params:
existing_index_params["es_url"] = params.pop("es_url")
return class_object.from_existing_index(**existing_index_params)
# If there are docs in the params, create a new index
if "texts" in params:
params["documents"] = params.pop("texts")
return class_object.from_documents(**params)
vecstore_initializer: Dict[str, Callable[[Type[Any], dict], Any]] = {
"Pinecone": initialize_pinecone,
"Chroma": initialize_chroma,
"Qdrant": initialize_qdrant,
"Weaviate": initialize_weaviate,
"ElasticsearchStore": initialize_elasticsearch,
"FAISS": initialize_faiss,
"SupabaseVectorStore": initialize_supabase,
"MongoDBAtlasVectorSearch": initialize_mongodb,

View file

@ -1,14 +1,12 @@
from typing import Any, ClassVar, Dict, List, Optional, Type
from langchain import retrievers
from langchain_community import retrievers
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.importing.utils import import_class
from langflow.services.deps import get_settings_service
from langflow.template.frontend_node.retrievers import RetrieverFrontendNode
from langflow.utils.util import build_template_from_class, build_template_from_method
from loguru import logger
from langflow.utils.util import build_template_from_method, build_template_from_class
class RetrieverCreator(LangChainTypeCreator):
@ -27,7 +25,7 @@ class RetrieverCreator(LangChainTypeCreator):
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict: dict[str, Any] = {
retriever_name: import_class(f"langchain.retrievers.{retriever_name}")
retriever_name: import_class(f"langchain_community.retrievers.{retriever_name}")
for retriever_name in retrievers.__all__
}
return self.type_dict

View file

@ -1,18 +1,9 @@
from langchain import tools
from langchain.agents import Tool
from langchain.agents.load_tools import (
_BASE_TOOLS,
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langchain.agents.load_tools import _BASE_TOOLS, _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS
from langchain.tools.json.tool import JsonSpec
from langflow.interface.importing.utils import import_class
from langflow.interface.tools.custom import (
PythonFunctionTool,
PythonFunction,
)
from langflow.interface.tools.custom import PythonFunction, PythonFunctionTool
FILE_TOOLS = {"JsonSpec": JsonSpec}
CUSTOM_TOOLS = {
@ -21,7 +12,7 @@ CUSTOM_TOOLS = {
"PythonFunction": PythonFunction,
}
OTHER_TOOLS = {tool: import_class(f"langchain.tools.{tool}") for tool in tools.__all__}
OTHER_TOOLS = {tool: import_class(f"langchain_community.tools.{tool}") for tool in tools.__all__}
ALL_TOOLS_NAMES = {
**_BASE_TOOLS,

View file

@ -1,4 +1,5 @@
from cachetools import LRUCache, cached
from langflow.interface.agents.base import agent_creator
from langflow.interface.chains.base import chain_creator
from langflow.interface.custom.directory_reader.utils import merge_nested_dicts_with_renaming
@ -14,7 +15,6 @@ from langflow.interface.text_splitters.base import textsplitter_creator
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.utilities.base import utility_creator
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
@ -46,7 +46,7 @@ def build_langchain_types_dict(): # sourcery skip: dict-assign-update-to-union
toolkits_creator,
wrapper_creator,
embedding_creator,
vectorstore_creator,
# vectorstore_creator,
documentloader_creator,
textsplitter_creator,
utility_creator,

View file

@ -1,6 +1,6 @@
from typing import Dict, List, Optional, Type
from langchain import utilities
from langchain_community import utilities
from loguru import logger
from langflow.custom.customs import get_custom_nodes
@ -30,7 +30,7 @@ class UtilityCreator(LangChainTypeCreator):
self.type_dict = {}
for utility_name in utilities.__all__:
try:
imported = import_class(f"langchain.utilities.{utility_name}")
imported = import_class(f"langchain_community.utilities.{utility_name}")
self.type_dict[utility_name] = imported
except Exception:
pass

View file

@ -1,13 +1,12 @@
from typing import Any, Dict, List, Optional, Type
from langchain import vectorstores
from loguru import logger
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.importing.utils import import_class
from langflow.services.deps import get_settings_service
from langflow.template.frontend_node.vectorstores import VectorStoreFrontendNode
from loguru import logger
from langflow.utils.util import build_template_from_method
@ -22,7 +21,7 @@ class VectorstoreCreator(LangChainTypeCreator):
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict: dict[str, Any] = {
vectorstore_name: import_class(f"langchain.vectorstores.{vectorstore_name}")
vectorstore_name: import_class(f"langchain_community.vectorstores.{vectorstore_name}")
for vectorstore_name in vectorstores.__all__
}
return self.type_dict

View file

@ -1,9 +1,9 @@
from typing import ClassVar, Dict, List, Optional
from langchain.utilities import requests, sql_database
from langchain_community.utilities import requests, sql_database
from loguru import logger
from langflow.interface.base import LangChainTypeCreator
from loguru import logger
from langflow.utils.util import build_template_from_class, build_template_from_method

View file

@ -7,6 +7,7 @@ from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from langflow.api import router
from langflow.interface.utils import setup_llm_caching
from langflow.services.plugins.langfuse_plugin import LangfuseInstance
@ -102,11 +103,12 @@ def setup_app(static_files_dir: Optional[Path] = None, backend_only: bool = Fals
if __name__ == "__main__":
import uvicorn
from langflow.__main__ import get_number_of_workers
configure()
uvicorn.run(
create_app,
"langflow.main:create_app",
host="127.0.0.1",
port=7860,
workers=get_number_of_workers(),

View file

@ -4,12 +4,14 @@ from typing import Any, Coroutine, Dict, List, Optional, Tuple, Union
from langchain.agents import AgentExecutor
from langchain.chains.base import Chain
from langchain.schema import AgentAction, Document
from langchain.vectorstores.base import VectorStore
from langchain_community.vectorstores import VectorStore
from langchain_core.messages import AIMessage
from langchain_core.runnables.base import Runnable
from langflow.graph.graph.base import Graph
from langflow.interface.custom.custom_component import CustomComponent
from langflow.interface.run import build_sorted_vertices, get_memory_key, update_memory_keys
from langflow.services.deps import get_session_service
from langflow.services.session.service import SessionService
from loguru import logger
from pydantic import BaseModel
@ -220,13 +222,29 @@ async def process_graph_cached(
graph, artifacts = session if session else (None, None)
if not graph:
raise ValueError("Graph not found in the session")
result = await build_graph_and_generate_result(
graph=graph, session_id=session_id, inputs=inputs, artifacts=artifacts, session_service=session_service
)
return result
async def build_graph_and_generate_result(
graph: "Graph",
session_id: str,
inputs: Optional[Union[dict, List[dict]]] = None,
artifacts: Optional[Dict[str, Any]] = None,
session_service: Optional[SessionService] = None,
):
"""Build the graph and generate the result"""
built_object = await graph.build()
processed_inputs = process_inputs(inputs, artifacts or {})
result = await generate_result(built_object, processed_inputs)
# langchain_object is now updated with the new memory
# we need to update the cache with the updated langchain_object
session_service.update_session(session_id, (graph, artifacts))
if session_id and session_service:
session_service.update_session(session_id, (graph, artifacts))
return Result(result=result, session_id=session_id)

View file

@ -7,6 +7,7 @@ from fastapi import Depends, HTTPException, Security, status
from fastapi.security import APIKeyHeader, APIKeyQuery, OAuth2PasswordBearer
from jose import JWTError, jwt
from sqlmodel import Session
from starlette.websockets import WebSocket
from langflow.services.database.models.api_key.model import ApiKey
from langflow.services.database.models.api_key.crud import check_key
@ -130,6 +131,21 @@ async def get_current_user_by_jwt(
return user
async def get_current_user_for_websocket(
websocket: WebSocket,
db: Session = Depends(get_session),
query_param: str = Security(api_key_query),
) -> Optional[User]:
token = websocket.query_params.get("token")
api_key = websocket.query_params.get("x-api-key")
if token:
return await get_current_user_by_jwt(token, db)
elif api_key:
return await api_key_security(api_key, query_param, db)
else:
return None
def get_current_active_user(current_user: Annotated[User, Depends(get_current_user)]):
if not current_user.is_active:
raise HTTPException(status_code=400, detail="Inactive user")

Some files were not shown because too many files have changed in this diff Show more