Merge branch 'dev' into cz/bug/state/zustand
This commit is contained in:
commit
06bc86142a
38 changed files with 7873 additions and 14988 deletions
5
.gitignore
vendored
5
.gitignore
vendored
|
|
@ -17,6 +17,9 @@ qdrant_storage
|
|||
.chroma
|
||||
.ruff_cache
|
||||
|
||||
# PyCharm
|
||||
.idea/
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
|
||||
|
|
@ -256,4 +259,4 @@ langflow.db
|
|||
/tmp/*
|
||||
src/backend/langflow/frontend/
|
||||
.docker
|
||||
scratchpad*
|
||||
scratchpad*
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ Follow our step-by-step guide to deploy Langflow on Google Cloud Platform (GCP)
|
|||
|
||||
Alternatively, click the **"Open in Cloud Shell"** button below to launch Google Cloud Shell, clone the Langflow repository, and start an **interactive tutorial** that will guide you through the process of setting up the necessary resources and deploying Langflow on your GCP project.
|
||||
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts&shellonly=true&tutorial=walkthroughtutorial_spot.md)
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/logspace-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
|
||||
|
||||
## Deploy on Railway
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
import Admonition from '@theme/Admonition';
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Embeddings
|
||||
|
||||
<Admonition type="caution" icon="🚧" title="ZONE UNDER CONSTRUCTION">
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝
|
||||
</p>
|
||||
<p>
|
||||
We appreciate your understanding as we polish our documentation – it may
|
||||
contain some rough edges. Share your feedback or report issues to help us
|
||||
improve! 🛠️📝
|
||||
</p>
|
||||
</Admonition>
|
||||
|
||||
Embeddings are vector representations of text that capture the semantic meaning of the text. They are created using text embedding models and allow us to think about the text in a vector space, enabling us to perform tasks like semantic search, where we look for pieces of text that are most similar in the vector space.
|
||||
|
|
@ -110,4 +112,12 @@ Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP).
|
|||
- **top_k:** How the model selects tokens for output, the next token is selected from – defaults to `40`.
|
||||
- **top_p:** Tokens are selected from most probable to least until the sum of their – defaults to `0.95`.
|
||||
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
|
||||
### OllamaEmbeddings
|
||||
|
||||
Used to load [Ollama’s](https://ollama.ai/) embedding models. Wrapper around LangChain's [Ollama API](https://python.langchain.com/docs/integrations/text_embedding/ollama).
|
||||
|
||||
- **model** The name of the Ollama model to use – defaults to `llama2`.
|
||||
- **base_url** The base URL for the Ollama API – defaults to `http://localhost:11434`.
|
||||
- **temperature** Tunes the degree of randomness in text generations. Should be a non-negative value – defaults to `0`.
|
||||
|
|
|
|||
20173
docs/package-lock.json
generated
20173
docs/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "docusaurus",
|
||||
"name": "langflow-docs",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
|
|
@ -36,8 +36,8 @@
|
|||
"path-browserify": "^1.0.1",
|
||||
"postcss": "^8.4.31",
|
||||
"prism-react-renderer": "^1.3.5",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-images": "^0.6.7",
|
||||
"react-medium-image-zoom": "^5.1.6",
|
||||
"react-player": "^2.12.0",
|
||||
|
|
|
|||
1858
poetry.lock
generated
1858
poetry.lock
generated
File diff suppressed because it is too large
Load diff
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "langflow"
|
||||
version = "0.6.4"
|
||||
version = "0.6.5a8"
|
||||
description = "A Python package with a built-in web application"
|
||||
authors = ["Logspace <contact@logspace.ai>"]
|
||||
maintainers = [
|
||||
|
|
@ -55,7 +55,7 @@ tiktoken = "~0.5.0"
|
|||
wikipedia = "^1.4.0"
|
||||
qdrant-client = "^1.7.0"
|
||||
websockets = "^10.3"
|
||||
weaviate-client = "^3.26.0"
|
||||
weaviate-client = { version = "^4.4b6", allow-prereleases = true }
|
||||
jina = "*"
|
||||
sentence-transformers = { version = "^2.2.2", optional = true }
|
||||
ctransformers = { version = "^0.2.10", optional = true }
|
||||
|
|
@ -104,7 +104,9 @@ qianfan = "0.2.0"
|
|||
pgvector = "^0.2.3"
|
||||
pyautogen = "^0.2.0"
|
||||
langchain-google-genai = "^0.0.2"
|
||||
elasticsearch = "^8.11.1"
|
||||
pytube = "^15.0.0"
|
||||
llama-index = "^0.9.24"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest-asyncio = "^0.23.1"
|
||||
|
|
|
|||
|
|
@ -1,15 +1,12 @@
|
|||
import platform
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import webbrowser
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
import typer
|
||||
from dotenv import load_dotenv
|
||||
from multiprocess import Process, cpu_count # type: ignore
|
||||
from multiprocess import cpu_count # type: ignore
|
||||
from rich import box
|
||||
from rich import print as rprint
|
||||
from rich.console import Console
|
||||
|
|
@ -212,23 +209,12 @@ def run(
|
|||
run_on_windows(host, port, log_level, options, app)
|
||||
else:
|
||||
# Run using gunicorn on Linux
|
||||
run_on_mac_or_linux(host, port, log_level, options, app, open_browser)
|
||||
run_on_mac_or_linux(host, port, log_level, options, app)
|
||||
|
||||
|
||||
def run_on_mac_or_linux(host, port, log_level, options, app, open_browser=True):
|
||||
webapp_process = Process(target=run_langflow, args=(host, port, log_level, options, app))
|
||||
webapp_process.start()
|
||||
status_code = 0
|
||||
while status_code != 200:
|
||||
try:
|
||||
status_code = httpx.get(f"http://{host}:{port}/health").status_code
|
||||
|
||||
except Exception:
|
||||
time.sleep(1)
|
||||
|
||||
def run_on_mac_or_linux(host, port, log_level, options, app):
|
||||
print_banner(host, port)
|
||||
if open_browser:
|
||||
webbrowser.open(f"http://{host}:{port}")
|
||||
run_langflow(host, port, log_level, options, app)
|
||||
|
||||
|
||||
def run_on_windows(host, port, log_level, options, app):
|
||||
|
|
@ -303,19 +289,26 @@ def run_langflow(host, port, log_level, options, app):
|
|||
Run Langflow server on localhost
|
||||
"""
|
||||
try:
|
||||
if platform.system() in ["Windows"]:
|
||||
if platform.system() in ["Windows", "Darwin"]:
|
||||
# Run using uvicorn on MacOS and Windows
|
||||
# Windows doesn't support gunicorn
|
||||
# MacOS requires an env variable to be set to use gunicorn
|
||||
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host=host, port=port, log_level=log_level)
|
||||
uvicorn.run(
|
||||
app,
|
||||
host=host,
|
||||
port=port,
|
||||
log_level=log_level,
|
||||
)
|
||||
else:
|
||||
from langflow.server import LangflowApplication
|
||||
|
||||
LangflowApplication(app, options).run()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
logger.info("Shutting down server")
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
sys.exit(1)
|
||||
|
|
|
|||
|
|
@ -27,7 +27,8 @@ def upgrade() -> None:
|
|||
|
||||
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||
batch_op.create_unique_constraint('uq_user_id', ['id'])
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
|
@ -44,6 +45,7 @@ def downgrade() -> None:
|
|||
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.drop_constraint('uq_apikey_id', type_='unique')
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
71
src/backend/langflow/alembic/versions/0b8757876a7c_.py
Normal file
71
src/backend/langflow/alembic/versions/0b8757876a7c_.py
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: 0b8757876a7c
|
||||
Revises: 006b3990db50
|
||||
Create Date: 2024-01-17 10:32:56.686287
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '0b8757876a7c'
|
||||
down_revision: Union[str, None] = '006b3990db50'
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.create_index(batch_op.f('ix_apikey_api_key'), ['api_key'], unique=True)
|
||||
batch_op.create_index(batch_op.f('ix_apikey_name'), ['name'], unique=False)
|
||||
batch_op.create_index(batch_op.f('ix_apikey_user_id'), ['user_id'], unique=False)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
try:
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.create_index(batch_op.f('ix_flow_description'), ['description'], unique=False)
|
||||
batch_op.create_index(batch_op.f('ix_flow_name'), ['name'], unique=False)
|
||||
batch_op.create_index(batch_op.f('ix_flow_user_id'), ['user_id'], unique=False)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
try:
|
||||
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], unique=True)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||
batch_op.drop_index(batch_op.f('ix_user_username'))
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
try:
|
||||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.drop_index(batch_op.f('ix_flow_user_id'))
|
||||
batch_op.drop_index(batch_op.f('ix_flow_name'))
|
||||
batch_op.drop_index(batch_op.f('ix_flow_description'))
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
try:
|
||||
with op.batch_alter_table('apikey', schema=None) as batch_op:
|
||||
batch_op.drop_index(batch_op.f('ix_apikey_user_id'))
|
||||
batch_op.drop_index(batch_op.f('ix_apikey_name'))
|
||||
batch_op.drop_index(batch_op.f('ix_apikey_api_key'))
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
@ -60,8 +60,8 @@ def upgrade() -> None:
|
|||
sa.Column("create_at", sa.DateTime(), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(), nullable=False),
|
||||
sa.Column("last_login_at", sa.DateTime(), nullable=True),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("id"),
|
||||
sa.PrimaryKeyConstraint("id", name="pk_user"),
|
||||
sa.UniqueConstraint("id", name="uq_user_id"),
|
||||
)
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.create_index(
|
||||
|
|
@ -83,8 +83,8 @@ def upgrade() -> None:
|
|||
["user_id"],
|
||||
["user.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("id"),
|
||||
sa.PrimaryKeyConstraint("id", name="pk_apikey"),
|
||||
sa.UniqueConstraint("id", name="uq_apikey_id"),
|
||||
)
|
||||
with op.batch_alter_table("apikey", schema=None) as batch_op:
|
||||
batch_op.create_index(
|
||||
|
|
@ -106,8 +106,8 @@ def upgrade() -> None:
|
|||
["user_id"],
|
||||
["user.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("id"),
|
||||
sa.PrimaryKeyConstraint("id", name="pk_flow"),
|
||||
sa.UniqueConstraint("id", name="uq_flow_id"),
|
||||
)
|
||||
# Conditionally create indices for 'flow' table
|
||||
# if _alembic_tmp_flow exists, then we need to drop it first
|
||||
|
|
@ -145,7 +145,7 @@ def upgrade() -> None:
|
|||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
|
||||
conn = op.get_bind()
|
||||
conn = op.get_bind()
|
||||
inspector = Inspector.from_engine(conn) # type: ignore
|
||||
# List existing tables
|
||||
existing_tables = inspector.get_table_names()
|
||||
|
|
|
|||
|
|
@ -29,9 +29,10 @@ def upgrade() -> None:
|
|||
sa.Column('id', sqlmodel.sql.sqltypes.GUID(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
|
@ -40,6 +41,7 @@ def downgrade() -> None:
|
|||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
op.drop_table('credential')
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ def downgrade() -> None:
|
|||
|
||||
with op.batch_alter_table("flow", schema=None) as batch_op:
|
||||
batch_op.drop_column("is_component")
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ def upgrade() -> None:
|
|||
with op.batch_alter_table('flow', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True))
|
||||
batch_op.add_column(sa.Column('folder', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,8 @@ def upgrade() -> None:
|
|||
except exc.SQLAlchemyError:
|
||||
# connection.execute(text("ROLLBACK"))
|
||||
pass
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
try:
|
||||
|
|
@ -37,7 +38,8 @@ def upgrade() -> None:
|
|||
except exc.SQLAlchemyError:
|
||||
# connection.execute(text("ROLLBACK"))
|
||||
pass
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
|
@ -57,14 +59,15 @@ def downgrade() -> None:
|
|||
sa.Column("is_read_only", sa.BOOLEAN(), nullable=False),
|
||||
sa.Column("create_at", sa.DATETIME(), nullable=False),
|
||||
sa.Column("update_at", sa.DATETIME(), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.PrimaryKeyConstraint("id", name="pk_component"),
|
||||
)
|
||||
with op.batch_alter_table("component", schema=None) as batch_op:
|
||||
batch_op.create_index("ix_component_name", ["name"], unique=False)
|
||||
batch_op.create_index(
|
||||
"ix_component_frontend_node_id", ["frontend_node_id"], unique=False
|
||||
)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
try:
|
||||
|
|
@ -78,9 +81,10 @@ def downgrade() -> None:
|
|||
["flow_id"],
|
||||
["flow.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("id"),
|
||||
sa.PrimaryKeyConstraint("id", name="pk_flowstyle"),
|
||||
sa.UniqueConstraint("id", name="uq_flowstyle_id"),
|
||||
)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -7,10 +7,8 @@ Create Date: 2023-10-18 23:12:27.297016
|
|||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "f5ee9749d1a6"
|
||||
|
|
@ -26,7 +24,8 @@ def upgrade() -> None:
|
|||
batch_op.alter_column(
|
||||
"user_id", existing_type=sa.CHAR(length=32), nullable=True
|
||||
)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
|
@ -39,7 +38,8 @@ def downgrade() -> None:
|
|||
batch_op.alter_column(
|
||||
"user_id", existing_type=sa.CHAR(length=32), nullable=False
|
||||
)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -21,7 +21,8 @@ def upgrade() -> None:
|
|||
try:
|
||||
with op.batch_alter_table('credential', schema=None) as batch_op:
|
||||
batch_op.create_foreign_key("fk_credential_user_id", 'user', ['user_id'], ['id'])
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
|
@ -32,7 +33,8 @@ def downgrade() -> None:
|
|||
try:
|
||||
with op.batch_alter_table('credential', schema=None) as batch_op:
|
||||
batch_op.drop_constraint("fk_credential_user_id", type_='foreignkey')
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -3,12 +3,10 @@ from typing import Annotated, Any, List, Optional, Union
|
|||
|
||||
import sqlalchemy as sa
|
||||
from fastapi import APIRouter, Body, Depends, HTTPException, UploadFile, status
|
||||
from loguru import logger
|
||||
from sqlmodel import select
|
||||
|
||||
from langflow.api.utils import update_frontend_node_with_template_values
|
||||
from langflow.api.v1.schemas import (
|
||||
CustomComponentCode,
|
||||
PreloadResponse,
|
||||
ProcessResponse,
|
||||
TaskResponse,
|
||||
TaskStatusResponse,
|
||||
|
|
@ -17,12 +15,15 @@ from langflow.api.v1.schemas import (
|
|||
from langflow.interface.custom.custom_component import CustomComponent
|
||||
from langflow.interface.custom.directory_reader import DirectoryReader
|
||||
from langflow.interface.custom.utils import build_custom_component_template
|
||||
from langflow.processing.process import process_graph_cached, process_tweaks
|
||||
from langflow.processing.process import build_graph_and_generate_result, process_graph_cached, process_tweaks
|
||||
from langflow.services.auth.utils import api_key_security, get_current_active_user
|
||||
from langflow.services.cache.utils import save_uploaded_file
|
||||
from langflow.services.database.models.flow import Flow
|
||||
from langflow.services.database.models.user.model import User
|
||||
from langflow.services.deps import get_session, get_session_service, get_settings_service, get_task_service
|
||||
from langflow.services.session.service import SessionService
|
||||
from loguru import logger
|
||||
from sqlmodel import select
|
||||
|
||||
try:
|
||||
from langflow.worker import process_graph_cached_task
|
||||
|
|
@ -32,9 +33,8 @@ except ImportError:
|
|||
raise NotImplementedError("Celery is not installed")
|
||||
|
||||
|
||||
from sqlmodel import Session
|
||||
|
||||
from langflow.services.task.service import TaskService
|
||||
from sqlmodel import Session
|
||||
|
||||
# build router
|
||||
router = APIRouter(tags=["Base"])
|
||||
|
|
@ -148,6 +148,55 @@ async def process_json(
|
|||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
|
||||
|
||||
# Endpoint to preload a graph
|
||||
@router.post("/process/preload/{flow_id}", response_model=PreloadResponse)
|
||||
async def preload_flow(
|
||||
session: Annotated[Session, Depends(get_session)],
|
||||
flow_id: str,
|
||||
session_id: Optional[str] = None,
|
||||
session_service: SessionService = Depends(get_session_service),
|
||||
api_key_user: User = Depends(api_key_security),
|
||||
clear_session: Annotated[bool, Body(embed=True)] = False, # noqa: F821
|
||||
):
|
||||
try:
|
||||
# Get the flow that matches the flow_id and belongs to the user
|
||||
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
|
||||
if clear_session:
|
||||
session_service.clear_session(session_id)
|
||||
# Check if the session exists
|
||||
session_data = await session_service.load_session(session_id)
|
||||
# Session data is a tuple of (graph, artifacts)
|
||||
# or (None, None) if the session is empty
|
||||
if isinstance(session_data, tuple):
|
||||
graph, artifacts = session_data
|
||||
is_clear = graph is None and artifacts is None
|
||||
else:
|
||||
is_clear = session_data is None
|
||||
return PreloadResponse(session_id=session_id, is_clear=is_clear)
|
||||
else:
|
||||
if session_id is None:
|
||||
session_id = flow_id
|
||||
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
|
||||
if flow is None:
|
||||
raise ValueError(f"Flow {flow_id} not found")
|
||||
|
||||
if flow.data is None:
|
||||
raise ValueError(f"Flow {flow_id} has no data")
|
||||
graph_data = flow.data
|
||||
session_service.clear_session(session_id)
|
||||
# Load the graph using SessionService
|
||||
session_data = await session_service.load_session(session_id, graph_data)
|
||||
graph, artifacts = session_data if session_data else (None, None)
|
||||
if not graph:
|
||||
raise ValueError("Graph not found in the session")
|
||||
_ = await graph.build()
|
||||
session_service.update_session(session_id, (graph, artifacts))
|
||||
return PreloadResponse(session_id=session_id)
|
||||
except Exception as exc:
|
||||
logger.exception(exc)
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/predict/{flow_id}",
|
||||
response_model=ProcessResponse,
|
||||
|
|
@ -167,36 +216,75 @@ async def process(
|
|||
task_service: "TaskService" = Depends(get_task_service),
|
||||
api_key_user: User = Depends(api_key_security),
|
||||
sync: Annotated[bool, Body(embed=True)] = True, # noqa: F821
|
||||
session_service: SessionService = Depends(get_session_service),
|
||||
):
|
||||
"""
|
||||
Endpoint to process an input with a given flow_id.
|
||||
"""
|
||||
|
||||
try:
|
||||
if api_key_user is None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API Key",
|
||||
if session_id:
|
||||
session_data = await session_service.load_session(session_id)
|
||||
graph, artifacts = session_data if session_data else (None, None)
|
||||
task_result: Any = None
|
||||
task_status = None
|
||||
task_id = None
|
||||
if not graph:
|
||||
raise ValueError("Graph not found in the session")
|
||||
result = await build_graph_and_generate_result(
|
||||
graph=graph,
|
||||
inputs=inputs,
|
||||
artifacts=artifacts,
|
||||
session_id=session_id,
|
||||
session_service=session_service,
|
||||
)
|
||||
task_id = str(id(result))
|
||||
if isinstance(result, dict) and "result" in result:
|
||||
task_result = result["result"]
|
||||
session_id = result["session_id"]
|
||||
elif hasattr(result, "result") and hasattr(result, "session_id"):
|
||||
task_result = result.result
|
||||
|
||||
session_id = result.session_id
|
||||
else:
|
||||
task_result = result
|
||||
if task_id:
|
||||
task_response = TaskResponse(id=task_id, href=f"api/v1/task/{task_id}")
|
||||
else:
|
||||
task_response = None
|
||||
return ProcessResponse(
|
||||
result=task_result,
|
||||
status=task_status,
|
||||
task=task_response,
|
||||
session_id=session_id,
|
||||
backend=task_service.backend_name,
|
||||
)
|
||||
|
||||
# Get the flow that matches the flow_id and belongs to the user
|
||||
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
|
||||
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
|
||||
if flow is None:
|
||||
raise ValueError(f"Flow {flow_id} not found")
|
||||
else:
|
||||
if api_key_user is None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API Key",
|
||||
)
|
||||
|
||||
if flow.data is None:
|
||||
raise ValueError(f"Flow {flow_id} has no data")
|
||||
graph_data = flow.data
|
||||
return await process_graph_data(
|
||||
graph_data=graph_data,
|
||||
inputs=inputs,
|
||||
tweaks=tweaks,
|
||||
clear_cache=clear_cache,
|
||||
session_id=session_id,
|
||||
task_service=task_service,
|
||||
sync=sync,
|
||||
)
|
||||
# Get the flow that matches the flow_id and belongs to the user
|
||||
# flow = session.query(Flow).filter(Flow.id == flow_id).filter(Flow.user_id == api_key_user.id).first()
|
||||
flow = session.exec(select(Flow).where(Flow.id == flow_id).where(Flow.user_id == api_key_user.id)).first()
|
||||
if flow is None:
|
||||
raise ValueError(f"Flow {flow_id} not found")
|
||||
|
||||
if flow.data is None:
|
||||
raise ValueError(f"Flow {flow_id} has no data")
|
||||
graph_data = flow.data
|
||||
return await process_graph_data(
|
||||
graph_data=graph_data,
|
||||
inputs=inputs,
|
||||
tweaks=tweaks,
|
||||
clear_cache=clear_cache,
|
||||
session_id=session_id,
|
||||
task_service=task_service,
|
||||
sync=sync,
|
||||
)
|
||||
except sa.exc.StatementError as exc:
|
||||
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
|
||||
if "badly formed hexadecimal UUID string" in str(exc):
|
||||
|
|
|
|||
|
|
@ -64,6 +64,13 @@ class ProcessResponse(BaseModel):
|
|||
backend: Optional[str] = None
|
||||
|
||||
|
||||
class PreloadResponse(BaseModel):
|
||||
"""Preload response schema."""
|
||||
|
||||
session_id: Optional[str] = None
|
||||
is_clear: Optional[bool] = None
|
||||
|
||||
|
||||
# TaskStatusResponse(
|
||||
# status=task.status, result=task.result if task.ready() else None
|
||||
# )
|
||||
|
|
|
|||
|
|
@ -0,0 +1,64 @@
|
|||
from langflow import CustomComponent
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain_community.embeddings import AzureOpenAIEmbeddings
|
||||
|
||||
class AzureOpenAIEmbeddingsComponent(CustomComponent):
|
||||
display_name: str = "AzureOpenAIEmbeddings"
|
||||
description: str = "Embeddings model from Azure OpenAI."
|
||||
documentation: str = "https://python.langchain.com/docs/integrations/text_embedding/azureopenai"
|
||||
beta = False
|
||||
|
||||
API_VERSION_OPTIONS = [
|
||||
"2022-12-01",
|
||||
"2023-03-15-preview",
|
||||
"2023-05-15",
|
||||
"2023-06-01-preview",
|
||||
"2023-07-01-preview",
|
||||
"2023-08-01-preview"
|
||||
]
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"azure_endpoint": {
|
||||
"display_name": "Azure Endpoint",
|
||||
"required": True,
|
||||
"info": "Your Azure endpoint, including the resource.. Example: `https://example-resource.azure.openai.com/`",
|
||||
},
|
||||
"azure_deployment": {
|
||||
"display_name": "Deployment Name",
|
||||
"required": True,
|
||||
},
|
||||
"api_version": {
|
||||
"display_name": "API Version",
|
||||
"options": self.API_VERSION_OPTIONS,
|
||||
"value": self.API_VERSION_OPTIONS[-1],
|
||||
"advanced": True,
|
||||
},
|
||||
"api_key": {
|
||||
"display_name": "API Key",
|
||||
"required": True,
|
||||
"password": True,
|
||||
},
|
||||
"code": {
|
||||
"show": False
|
||||
},
|
||||
}
|
||||
def build(
|
||||
self,
|
||||
azure_endpoint: str,
|
||||
azure_deployment: str,
|
||||
api_version: str,
|
||||
api_key: str,
|
||||
) -> Embeddings:
|
||||
try:
|
||||
embeddings = AzureOpenAIEmbeddings(
|
||||
azure_endpoint = azure_endpoint,
|
||||
deployment = azure_deployment,
|
||||
openai_api_version = api_version,
|
||||
openai_api_key = api_key,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise ValueError("Could not connect to AzureOpenAIEmbeddings API.") from e
|
||||
|
||||
return embeddings
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
from typing import Optional
|
||||
|
||||
from langflow import CustomComponent
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain_community.embeddings import OllamaEmbeddings
|
||||
|
||||
class OllamaEmbeddingsComponent(CustomComponent):
|
||||
"""
|
||||
A custom component for implementing an Embeddings Model using Ollama.
|
||||
"""
|
||||
|
||||
display_name: str = "Ollama Embeddings"
|
||||
description: str = "Embeddings model from Ollama."
|
||||
documentation = "https://python.langchain.com/docs/integrations/text_embedding/ollama"
|
||||
beta = True
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"model": {
|
||||
"display_name": "Ollama Model",
|
||||
},
|
||||
"base_url": {"display_name": "Ollama Base URL"},
|
||||
"temperature": {"display_name": "Model Temperature"},
|
||||
"code": {"show": False},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
model: str = "llama2",
|
||||
base_url: str = "http://localhost:11434",
|
||||
temperature: Optional[float] = None,
|
||||
) -> Embeddings:
|
||||
try:
|
||||
output = OllamaEmbeddings(
|
||||
model=model,
|
||||
base_url=base_url,
|
||||
temperature=temperature
|
||||
) # type: ignore
|
||||
except Exception as e:
|
||||
raise ValueError("Could not connect to Ollama API.") from e
|
||||
return output
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
from typing import Optional
|
||||
from langflow import CustomComponent
|
||||
from langchain.llms.bedrock import Bedrock
|
||||
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.llms.bedrock import Bedrock
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class AmazonBedrockComponent(CustomComponent):
|
||||
|
|
@ -27,18 +28,32 @@ class AmazonBedrockComponent(CustomComponent):
|
|||
},
|
||||
"credentials_profile_name": {"display_name": "Credentials Profile Name"},
|
||||
"streaming": {"display_name": "Streaming", "field_type": "bool"},
|
||||
"code": {"show": False},
|
||||
"endpoint_url": {"display_name": "Endpoint URL"},
|
||||
"region_name": {"display_name": "Region Name"},
|
||||
"model_kwargs": {"display_name": "Model Kwargs"},
|
||||
"cache": {"display_name": "Cache"},
|
||||
"code": {"advanced": True},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
model_id: str = "anthropic.claude-instant-v1",
|
||||
credentials_profile_name: Optional[str] = None,
|
||||
region_name: Optional[str] = None,
|
||||
model_kwargs: Optional[dict] = None,
|
||||
endpoint_url: Optional[str] = None,
|
||||
streaming: bool = False,
|
||||
cache: Optional[bool] = None,
|
||||
) -> BaseLLM:
|
||||
try:
|
||||
output = Bedrock(
|
||||
credentials_profile_name=credentials_profile_name,
|
||||
model_id=model_id,
|
||||
region_name=region_name,
|
||||
model_kwargs=model_kwargs,
|
||||
endpoint_url=endpoint_url,
|
||||
streaming=streaming,
|
||||
cache=cache,
|
||||
) # type: ignore
|
||||
except Exception as e:
|
||||
raise ValueError("Could not connect to AmazonBedrock API.") from e
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ class AzureChatOpenAIComponent(CustomComponent):
|
|||
display_name: str = "AzureChatOpenAI"
|
||||
description: str = "LLM model from Azure OpenAI."
|
||||
documentation: str = "https://python.langchain.com/docs/integrations/llms/azure_openai"
|
||||
beta = False
|
||||
|
||||
AZURE_OPENAI_MODELS = [
|
||||
"gpt-35-turbo",
|
||||
|
|
@ -18,11 +19,20 @@ class AzureChatOpenAIComponent(CustomComponent):
|
|||
"gpt-4-vision",
|
||||
]
|
||||
|
||||
AZURE_OPENAI_API_VERSIONS = [
|
||||
"2023-03-15-preview",
|
||||
"2023-05-15",
|
||||
"2023-06-01-preview",
|
||||
"2023-07-01-preview",
|
||||
"2023-08-01-preview",
|
||||
"2023-12-01-preview"
|
||||
]
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"model": {
|
||||
"display_name": "Model Name",
|
||||
"value": "gpt-35-turbo",
|
||||
"value": self.AZURE_OPENAI_MODELS[0],
|
||||
"options": self.AZURE_OPENAI_MODELS,
|
||||
"required": True,
|
||||
},
|
||||
|
|
@ -37,11 +47,16 @@ class AzureChatOpenAIComponent(CustomComponent):
|
|||
},
|
||||
"api_version": {
|
||||
"display_name": "API Version",
|
||||
"value": "2023-05-15",
|
||||
"options": self.AZURE_OPENAI_API_VERSIONS,
|
||||
"value": self.AZURE_OPENAI_API_VERSIONS[-1],
|
||||
"required": True,
|
||||
"advanced": True,
|
||||
},
|
||||
"api_key": {"display_name": "API Key", "required": True, "password": True},
|
||||
"api_key": {
|
||||
"display_name": "API Key",
|
||||
"required": True,
|
||||
"password": True
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"value": 0.7,
|
||||
|
|
@ -54,26 +69,32 @@ class AzureChatOpenAIComponent(CustomComponent):
|
|||
"required": False,
|
||||
"field_type": "int",
|
||||
"advanced": True,
|
||||
"info": "Maximum number of tokens to generate.",
|
||||
},
|
||||
"code": {
|
||||
"show": False
|
||||
},
|
||||
"code": {"show": False},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
model: str,
|
||||
azure_endpoint: str,
|
||||
azure_deployment: str,
|
||||
api_key: str,
|
||||
api_version: str = "2023-05-15",
|
||||
api_version: str,
|
||||
temperature: float = 0.7,
|
||||
max_tokens: Optional[int] = 1000,
|
||||
) -> BaseLanguageModel:
|
||||
return AzureChatOpenAI(
|
||||
model=model,
|
||||
azure_endpoint=azure_endpoint,
|
||||
azure_deployment=azure_deployment,
|
||||
api_version=api_version,
|
||||
api_key=api_key,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
try:
|
||||
llm = AzureChatOpenAI(
|
||||
model=model,
|
||||
azure_endpoint=azure_endpoint,
|
||||
azure_deployment=azure_deployment,
|
||||
api_version=api_version,
|
||||
api_key=api_key,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError("Could not connect to AzureOpenAI API.") from e
|
||||
return llm
|
||||
|
|
|
|||
|
|
@ -106,6 +106,8 @@ embeddings:
|
|||
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/google_vertex_ai_palm"
|
||||
AmazonBedrockEmbeddings:
|
||||
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/bedrock"
|
||||
OllamaEmbeddings:
|
||||
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/ollama"
|
||||
|
||||
llms:
|
||||
OpenAI:
|
||||
|
|
@ -274,6 +276,8 @@ vectorstores:
|
|||
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
|
||||
Pinecone:
|
||||
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/pinecone"
|
||||
ElasticsearchStore:
|
||||
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/elasticsearch"
|
||||
SupabaseVectorStore:
|
||||
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/supabase"
|
||||
MongoDBAtlasVectorSearch:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from typing import Any, Callable, Dict, Type
|
||||
from langchain.vectorstores import (
|
||||
Pinecone,
|
||||
ElasticsearchStore,
|
||||
Qdrant,
|
||||
Chroma,
|
||||
FAISS,
|
||||
|
|
@ -226,11 +227,34 @@ def initialize_qdrant(class_object: Type[Qdrant], params: dict):
|
|||
return class_object.from_documents(**params)
|
||||
|
||||
|
||||
def initialize_elasticsearch(class_object: Type[ElasticsearchStore], params: dict):
|
||||
"""Initialize elastic and return the class object"""
|
||||
if "index_name" not in params:
|
||||
raise ValueError("Elasticsearch Index must be provided in the params")
|
||||
if "es_url" not in params:
|
||||
raise ValueError("Elasticsearch URL must be provided in the params")
|
||||
if not docs_in_params(params):
|
||||
existing_index_params = {
|
||||
"embedding": params.pop("embedding"),
|
||||
}
|
||||
if "index_name" in params:
|
||||
existing_index_params["index_name"] = params.pop("index_name")
|
||||
if "es_url" in params:
|
||||
existing_index_params["es_url"] = params.pop("es_url")
|
||||
|
||||
return class_object.from_existing_index(**existing_index_params)
|
||||
# If there are docs in the params, create a new index
|
||||
if "texts" in params:
|
||||
params["documents"] = params.pop("texts")
|
||||
return class_object.from_documents(**params)
|
||||
|
||||
|
||||
vecstore_initializer: Dict[str, Callable[[Type[Any], dict], Any]] = {
|
||||
"Pinecone": initialize_pinecone,
|
||||
"Chroma": initialize_chroma,
|
||||
"Qdrant": initialize_qdrant,
|
||||
"Weaviate": initialize_weaviate,
|
||||
"ElasticsearchStore": initialize_elasticsearch,
|
||||
"FAISS": initialize_faiss,
|
||||
"SupabaseVectorStore": initialize_supabase,
|
||||
"MongoDBAtlasVectorSearch": initialize_mongodb,
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from fastapi import FastAPI, Request
|
|||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
|
||||
from langflow.api import router
|
||||
from langflow.interface.utils import setup_llm_caching
|
||||
from langflow.services.plugins.langfuse_plugin import LangfuseInstance
|
||||
|
|
@ -102,11 +103,12 @@ def setup_app(static_files_dir: Optional[Path] = None, backend_only: bool = Fals
|
|||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
from langflow.__main__ import get_number_of_workers
|
||||
|
||||
configure()
|
||||
uvicorn.run(
|
||||
create_app,
|
||||
"langflow.main:create_app",
|
||||
host="127.0.0.1",
|
||||
port=7860,
|
||||
workers=get_number_of_workers(),
|
||||
|
|
|
|||
|
|
@ -7,9 +7,11 @@ from langchain.schema import AgentAction, Document
|
|||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain_core.messages import AIMessage
|
||||
from langchain_core.runnables.base import Runnable
|
||||
from langflow.graph.graph.base import Graph
|
||||
from langflow.interface.custom.custom_component import CustomComponent
|
||||
from langflow.interface.run import build_sorted_vertices, get_memory_key, update_memory_keys
|
||||
from langflow.services.deps import get_session_service
|
||||
from langflow.services.session.service import SessionService
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
|
@ -220,13 +222,29 @@ async def process_graph_cached(
|
|||
graph, artifacts = session if session else (None, None)
|
||||
if not graph:
|
||||
raise ValueError("Graph not found in the session")
|
||||
|
||||
result = await build_graph_and_generate_result(
|
||||
graph=graph, session_id=session_id, inputs=inputs, artifacts=artifacts, session_service=session_service
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def build_graph_and_generate_result(
|
||||
graph: "Graph",
|
||||
session_id: str,
|
||||
inputs: Optional[Union[dict, List[dict]]] = None,
|
||||
artifacts: Optional[Dict[str, Any]] = None,
|
||||
session_service: Optional[SessionService] = None,
|
||||
):
|
||||
"""Build the graph and generate the result"""
|
||||
built_object = await graph.build()
|
||||
processed_inputs = process_inputs(inputs, artifacts or {})
|
||||
result = await generate_result(built_object, processed_inputs)
|
||||
# langchain_object is now updated with the new memory
|
||||
# we need to update the cache with the updated langchain_object
|
||||
session_service.update_session(session_id, (graph, artifacts))
|
||||
|
||||
if session_id and session_service:
|
||||
session_service.update_session(session_id, (graph, artifacts))
|
||||
return Result(result=result, session_id=session_id)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,14 +5,15 @@ from typing import Any, Dict, List
|
|||
|
||||
import orjson
|
||||
from fastapi import WebSocket, status
|
||||
from loguru import logger
|
||||
from starlette.websockets import WebSocketState
|
||||
|
||||
from langflow.api.v1.schemas import ChatMessage, ChatResponse, FileResponse
|
||||
from langflow.interface.utils import pil_to_base64
|
||||
from langflow.services import ServiceType, service_manager
|
||||
from langflow.services.base import Service
|
||||
from langflow.services.chat.cache import Subject
|
||||
from langflow.services.chat.utils import process_graph
|
||||
from loguru import logger
|
||||
from starlette.websockets import WebSocketState
|
||||
|
||||
from .cache import cache_service
|
||||
|
||||
|
|
@ -117,7 +118,7 @@ class ChatService(Service):
|
|||
if "after sending" in str(exc):
|
||||
logger.error(f"Error closing connection: {exc}")
|
||||
|
||||
async def process_message(self, client_id: str, payload: Dict, langchain_object: Any):
|
||||
async def process_message(self, client_id: str, payload: Dict, build_result: Any):
|
||||
# Process the graph data and chat message
|
||||
chat_inputs = payload.pop("inputs", {})
|
||||
chatkey = payload.pop("chatKey", None)
|
||||
|
|
@ -134,12 +135,12 @@ class ChatService(Service):
|
|||
logger.debug("Generating result and thought")
|
||||
|
||||
result, intermediate_steps, raw_output = await process_graph(
|
||||
langchain_object=langchain_object,
|
||||
build_result=build_result,
|
||||
chat_inputs=chat_inputs,
|
||||
client_id=client_id,
|
||||
session_id=self.connection_ids[client_id],
|
||||
)
|
||||
self.set_cache(client_id, langchain_object)
|
||||
self.set_cache(client_id, build_result)
|
||||
except Exception as e:
|
||||
# Log stack trace
|
||||
logger.exception(e)
|
||||
|
|
@ -205,8 +206,8 @@ class ChatService(Service):
|
|||
continue
|
||||
|
||||
with self.chat_cache.set_client_id(client_id):
|
||||
if langchain_object := self.cache_service.get(client_id).get("result"):
|
||||
await self.process_message(client_id, payload, langchain_object)
|
||||
if build_result := self.cache_service.get(client_id).get("result"):
|
||||
await self.process_message(client_id, payload, build_result)
|
||||
|
||||
else:
|
||||
raise RuntimeError(f"Could not find a build result for client_id {client_id}")
|
||||
|
|
|
|||
|
|
@ -1,20 +1,28 @@
|
|||
from typing import Any
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.chains.base import Chain
|
||||
from langchain_core.runnables import Runnable
|
||||
from loguru import logger
|
||||
|
||||
from langflow.api.v1.schemas import ChatMessage
|
||||
from langflow.interface.utils import try_setting_streaming_options
|
||||
from langflow.processing.base import get_result_and_steps
|
||||
from langflow.utils.chat import ChatDefinition
|
||||
|
||||
LANGCHAIN_RUNNABLES = (Chain, Runnable, AgentExecutor)
|
||||
|
||||
|
||||
async def process_graph(
|
||||
langchain_object,
|
||||
build_result,
|
||||
chat_inputs: ChatMessage,
|
||||
client_id: str,
|
||||
session_id: str,
|
||||
):
|
||||
langchain_object = try_setting_streaming_options(langchain_object)
|
||||
build_result = try_setting_streaming_options(build_result)
|
||||
logger.debug("Loaded langchain object")
|
||||
|
||||
if langchain_object is None:
|
||||
if build_result is None:
|
||||
# Raise user facing error
|
||||
raise ValueError("There was an error loading the langchain_object. Please, check all the nodes and try again.")
|
||||
|
||||
|
|
@ -25,15 +33,36 @@ async def process_graph(
|
|||
chat_inputs.message = {}
|
||||
|
||||
logger.debug("Generating result and thought")
|
||||
result, intermediate_steps, raw_output = await get_result_and_steps(
|
||||
langchain_object,
|
||||
chat_inputs.message,
|
||||
client_id=client_id,
|
||||
session_id=session_id,
|
||||
)
|
||||
if isinstance(build_result, LANGCHAIN_RUNNABLES):
|
||||
result, intermediate_steps, raw_output = await get_result_and_steps(
|
||||
build_result,
|
||||
chat_inputs.message,
|
||||
client_id=client_id,
|
||||
session_id=session_id,
|
||||
)
|
||||
elif isinstance(build_result, ChatDefinition):
|
||||
raw_output = await run_build_result(
|
||||
build_result,
|
||||
chat_inputs,
|
||||
client_id=client_id,
|
||||
session_id=session_id,
|
||||
)
|
||||
if isinstance(raw_output, dict):
|
||||
if not build_result.output_key:
|
||||
raise ValueError("No output key provided to ChatDefinition when returning a dict.")
|
||||
result = raw_output[build_result.output_key]
|
||||
else:
|
||||
result = raw_output
|
||||
intermediate_steps = []
|
||||
else:
|
||||
raise TypeError(f"Unknown type {type(build_result)}")
|
||||
logger.debug("Generated result and intermediate_steps")
|
||||
return result, intermediate_steps, raw_output
|
||||
except Exception as e:
|
||||
# Log stack trace
|
||||
logger.exception(e)
|
||||
raise e
|
||||
|
||||
|
||||
async def run_build_result(build_result: Any, chat_inputs: ChatMessage, client_id: str, session_id: str):
|
||||
return build_result(inputs=chat_inputs.message)
|
||||
|
|
|
|||
|
|
@ -5,17 +5,16 @@ from typing import TYPE_CHECKING
|
|||
import sqlalchemy as sa
|
||||
from alembic import command, util
|
||||
from alembic.config import Config
|
||||
from loguru import logger
|
||||
from sqlalchemy import inspect
|
||||
from sqlalchemy.exc import OperationalError
|
||||
from sqlmodel import Session, SQLModel, create_engine, select, text
|
||||
|
||||
from langflow.services.base import Service
|
||||
from langflow.services.database import models # noqa
|
||||
from langflow.services.database.models.user.crud import get_user_by_username
|
||||
from langflow.services.database.utils import Result, TableResults
|
||||
from langflow.services.deps import get_settings_service
|
||||
from langflow.services.utils import teardown_superuser
|
||||
from loguru import logger
|
||||
from sqlalchemy import inspect
|
||||
from sqlalchemy.exc import OperationalError
|
||||
from sqlmodel import Session, SQLModel, create_engine, select, text
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from sqlalchemy.engine import Engine
|
||||
|
|
@ -40,7 +39,7 @@ class DatabaseService(Service):
|
|||
connect_args = {"check_same_thread": False}
|
||||
else:
|
||||
connect_args = {}
|
||||
return create_engine(self.database_url, connect_args=connect_args)
|
||||
return create_engine(self.database_url, connect_args=connect_args, max_overflow=-1)
|
||||
|
||||
def __enter__(self):
|
||||
self._session = Session(self.engine)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from langflow.interface.run import build_sorted_vertices
|
||||
from langflow.services.base import Service
|
||||
|
|
@ -14,14 +14,15 @@ class SessionService(Service):
|
|||
def __init__(self, cache_service):
|
||||
self.cache_service: "BaseCacheService" = cache_service
|
||||
|
||||
async def load_session(self, key, data_graph):
|
||||
async def load_session(self, key, data_graph: Optional[dict] = None):
|
||||
# Check if the data is cached
|
||||
if key in self.cache_service:
|
||||
return self.cache_service.get(key)
|
||||
|
||||
if key is None:
|
||||
key = self.generate_key(session_id=None, data_graph=data_graph)
|
||||
|
||||
if data_graph is None:
|
||||
return (None, None)
|
||||
# If not cached, build the graph and cache it
|
||||
graph, artifacts = await build_sorted_vertices(data_graph)
|
||||
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ BASIC_FIELDS = [
|
|||
"persist_directory",
|
||||
"persist",
|
||||
"weaviate_url",
|
||||
"es_url",
|
||||
"index_name",
|
||||
"namespace",
|
||||
"folder_path",
|
||||
|
|
@ -170,6 +171,33 @@ class VectorStoreFrontendNode(FrontendNode):
|
|||
value="",
|
||||
)
|
||||
extra_fields.extend((extra_field, extra_field2))
|
||||
|
||||
elif self.template.type_name == "ElasticsearchStore":
|
||||
# add elastic and elastic credentials
|
||||
extra_field = TemplateField(
|
||||
name="es_url",
|
||||
field_type="str",
|
||||
required=True,
|
||||
placeholder="http://localhost:9200",
|
||||
show=True,
|
||||
advanced=False,
|
||||
multiline=False,
|
||||
value="http://localhost:9200",
|
||||
display_name="Elasticsearch URL",
|
||||
)
|
||||
extra_field2 = TemplateField(
|
||||
name="index_name",
|
||||
field_type="str",
|
||||
required=True,
|
||||
placeholder="test-index",
|
||||
show=True,
|
||||
advanced=False,
|
||||
multiline=False,
|
||||
value="test-index",
|
||||
display_name="Index Name",
|
||||
)
|
||||
extra_fields.extend((extra_field, extra_field2))
|
||||
|
||||
elif self.template.type_name == "FAISS":
|
||||
extra_field = TemplateField(
|
||||
name="folder_path",
|
||||
|
|
|
|||
34
src/backend/langflow/utils/chat.py
Normal file
34
src/backend/langflow/utils/chat.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
from langchain_core.prompts import PromptTemplate as LCPromptTemplate
|
||||
from langflow.utils.prompt import GenericPromptTemplate
|
||||
from llama_index.prompts import PromptTemplate as LIPromptTemplate
|
||||
|
||||
PromptTemplate = Union[LCPromptTemplate, LIPromptTemplate]
|
||||
|
||||
|
||||
class ChatDefinition:
|
||||
def __init__(
|
||||
self,
|
||||
func: Callable,
|
||||
inputs: list[str],
|
||||
output_key: Optional[str] = None,
|
||||
prompt_template: Optional[PromptTemplate] = None,
|
||||
):
|
||||
self.func = func
|
||||
self.input_keys = inputs
|
||||
self.output_key = output_key
|
||||
self.prompt_template = prompt_template
|
||||
|
||||
@classmethod
|
||||
def from_prompt_template(cls, prompt_template: PromptTemplate, func: Callable, output_key: Optional[str] = None):
|
||||
prompt = GenericPromptTemplate(prompt_template)
|
||||
return cls(
|
||||
func=func,
|
||||
inputs=prompt.input_keys,
|
||||
output_key=output_key,
|
||||
prompt_template=prompt_template,
|
||||
)
|
||||
|
||||
def __call__(self, inputs: dict, callbacks: Optional[Any] = None) -> dict:
|
||||
return self.func(inputs, callbacks)
|
||||
58
src/backend/langflow/utils/prompt.py
Normal file
58
src/backend/langflow/utils/prompt.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
from typing import Any, Union
|
||||
|
||||
from langchain_core.prompts import PromptTemplate as LCPromptTemplate
|
||||
from llama_index.prompts import PromptTemplate as LIPromptTemplate
|
||||
|
||||
PromptTemplateTypes = Union[LCPromptTemplate, LIPromptTemplate]
|
||||
|
||||
|
||||
class GenericPromptTemplate:
|
||||
def __init__(self, prompt_template: PromptTemplateTypes):
|
||||
object.__setattr__(self, "prompt_template", prompt_template)
|
||||
|
||||
@property
|
||||
def input_keys(self):
|
||||
prompt_template = object.__getattribute__(self, "prompt_template")
|
||||
if isinstance(prompt_template, LCPromptTemplate):
|
||||
return prompt_template.input_variables
|
||||
elif isinstance(prompt_template, LIPromptTemplate):
|
||||
return prompt_template.template_vars
|
||||
else:
|
||||
raise TypeError(f"Unknown prompt template type {type(prompt_template)}")
|
||||
|
||||
def to_lc_prompt(self):
|
||||
prompt_template = object.__getattribute__(self, "prompt_template")
|
||||
if isinstance(prompt_template, LCPromptTemplate):
|
||||
return prompt_template
|
||||
elif isinstance(prompt_template, LIPromptTemplate):
|
||||
return LCPromptTemplate.from_template(prompt_template.get_template())
|
||||
else:
|
||||
raise TypeError(f"Unknown prompt template type {type(prompt_template)}")
|
||||
|
||||
def to_li_prompt(self):
|
||||
prompt_template = object.__getattribute__(self, "prompt_template")
|
||||
if isinstance(prompt_template, LIPromptTemplate):
|
||||
return prompt_template
|
||||
elif isinstance(prompt_template, LCPromptTemplate):
|
||||
return LIPromptTemplate(template=prompt_template.template)
|
||||
else:
|
||||
raise TypeError(f"Unknown prompt template type {type(prompt_template)}")
|
||||
|
||||
def __or__(self, other):
|
||||
prompt_template = object.__getattribute__(self, "prompt_template")
|
||||
if isinstance(prompt_template, LIPromptTemplate):
|
||||
return self.to_lc_prompt() | other
|
||||
else:
|
||||
raise TypeError(f"Unknown prompt template type {type(other)}")
|
||||
|
||||
def __getattribute__(self, name: str) -> Any:
|
||||
if name in {
|
||||
"input_keys",
|
||||
"to_lc_prompt",
|
||||
"to_li_prompt",
|
||||
"__or__",
|
||||
"prompt_template",
|
||||
}:
|
||||
return object.__getattribute__(self, name)
|
||||
prompt_template = object.__getattribute__(self, "prompt_template")
|
||||
return getattr(prompt_template, name)
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
const SvgElasticsearchLogo = (props) => (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width="24"
|
||||
height="24"
|
||||
fill="none"
|
||||
stroke="currentColor"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
strokeWidth="2"
|
||||
className="icon icon-tabler icon-tabler-brand-elastic"
|
||||
viewBox="0 0 24 24"
|
||||
>
|
||||
<path stroke="none" d="M0 0h24v24H0z"></path>
|
||||
<path d="M14 2a5 5 0 015 5c0 .712-.232 1.387-.5 2 1.894.042 3.5 1.595 3.5 3.5 0 1.869-1.656 3.4-3.5 3.5.333.625.5 1.125.5 1.5a2.5 2.5 0 01-2.5 2.5c-.787 0-1.542-.432-2-1-.786 1.73-2.476 3-4.5 3a5 5 0 01-4.583-7 3.5 3.5 0 01-.11-6.992h.195a2.5 2.5 0 012-4c.787 0 1.542.432 2 1 .786-1.73 2.476-3 4.5-3zM8.5 9l-3-1"></path>
|
||||
<path d="M9.5 5l-1 4 1 2 5 2 4-4M18.499 16l-3-.5-1-2.5M14.5 19l1-3.5M5.417 15L9.5 11"></path>
|
||||
</svg>
|
||||
);
|
||||
export default SvgElasticsearchLogo;
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-brand-elastic" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path stroke="none" d="M0 0h24v24H0z" fill="none" />
|
||||
<path d="M14 2a5 5 0 0 1 5 5c0 .712 -.232 1.387 -.5 2c1.894 .042 3.5 1.595 3.5 3.5c0 1.869 -1.656 3.4 -3.5 3.5c.333 .625 .5 1.125 .5 1.5a2.5 2.5 0 0 1 -2.5 2.5c-.787 0 -1.542 -.432 -2 -1c-.786 1.73 -2.476 3 -4.5 3a5 5 0 0 1 -4.583 -7a3.5 3.5 0 0 1 -.11 -6.992l.195 0a2.5 2.5 0 0 1 2 -4c.787 0 1.542 .432 2 1c.786 -1.73 2.476 -3 4.5 -3z" />
|
||||
<path d="M8.5 9l-3 -1" />
|
||||
<path d="M9.5 5l-1 4l1 2l5 2l4 -4" />
|
||||
<path d="M18.499 16l-3 -.5l-1 -2.5" />
|
||||
<path d="M14.5 19l1 -3.5" />
|
||||
<path d="M5.417 15l4.083 -4" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 810 B |
9
src/frontend/src/icons/ElasticsearchStore/index.tsx
Normal file
9
src/frontend/src/icons/ElasticsearchStore/index.tsx
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
import React, { forwardRef } from "react";
|
||||
import SvgElasticsearchLogo from "./ElasticsearchLogo";
|
||||
|
||||
export const ElasticsearchIcon = forwardRef<
|
||||
SVGSVGElement,
|
||||
React.PropsWithChildren<{}>
|
||||
>((props, ref) => {
|
||||
return <SvgElasticsearchLogo ref={ref} {...props} />;
|
||||
});
|
||||
|
|
@ -111,6 +111,7 @@ import { AnthropicIcon } from "../icons/Anthropic";
|
|||
import { BingIcon } from "../icons/Bing";
|
||||
import { ChromaIcon } from "../icons/ChromaIcon";
|
||||
import { CohereIcon } from "../icons/Cohere";
|
||||
import { ElasticsearchIcon } from "../icons/ElasticsearchStore";
|
||||
import { EvernoteIcon } from "../icons/Evernote";
|
||||
import { FBIcon } from "../icons/FacebookMessenger";
|
||||
import { GitBookIcon } from "../icons/GitBook";
|
||||
|
|
@ -256,6 +257,7 @@ export const nodeIconsLucide: iconsType = {
|
|||
OpenAIEmbeddings: OpenAiIcon,
|
||||
Pinecone: PineconeIcon,
|
||||
Qdrant: QDrantIcon,
|
||||
ElasticsearchStore: ElasticsearchIcon,
|
||||
Weaviate: WeaviateIcon,
|
||||
Searx: SearxIcon,
|
||||
SlackDirectoryLoader: SvgSlackIcon,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue