Merge branch 'release' into feat-tweaks

This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-06-29 19:22:12 -03:00 committed by GitHub
commit 15e38be62e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
63 changed files with 1938 additions and 803 deletions

66
.github/workflows/codeql.yml vendored Normal file
View file

@ -0,0 +1,66 @@
name: "CodeQL"
on:
push:
branches: [ 'dev', 'main' ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ 'dev' ]
schedule:
- cron: '17 2 * * 1'
jobs:
analyze:
name: Analyze
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'python', 'javascript' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Use only 'java' to analyze code written in Java, Kotlin or both
# Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"

827
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.2.2"
version = "0.2.6"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -25,22 +25,21 @@ langflow = "langflow.__main__:main"
python = ">=3.9,<3.12"
fastapi = "^0.98.0"
uvicorn = "^0.22.0"
beautifulsoup4 = "^4.11.2"
beautifulsoup4 = "^4.12.2"
google-search-results = "^2.4.1"
google-api-python-client = "^2.79.0"
typer = "^0.9.0"
gunicorn = "^20.1.0"
langchain = "^0.0.215"
langchain = "^0.0.219"
openai = "^0.27.8"
types-pyyaml = "^6.0.12.8"
pandas = "^1.5.3"
pandas = "^2.0.0"
chromadb = "^0.3.21"
huggingface-hub = "^0.13.3"
huggingface-hub = "^0.15.0"
rich = "^13.4.2"
llama-cpp-python = "~0.1.0"
networkx = "^3.1"
unstructured = "^0.5.11"
pypdf = "^3.7.1"
unstructured = "^0.7.0"
pypdf = "^3.11.0"
lxml = "^4.9.2"
pysrt = "^1.1.2"
fake-useragent = "^1.1.3"
@ -49,18 +48,18 @@ psycopg2-binary = "^2.9.6"
pyarrow = "^12.0.0"
tiktoken = "~0.4.0"
wikipedia = "^1.4.0"
langchain-serve = { version = ">0.0.39", optional = true }
qdrant-client = "^1.2.0"
langchain-serve = { version = ">0.0.47", optional = true }
qdrant-client = "^1.3.0"
websockets = "^10.3"
weaviate-client = "^3.21.0"
jina = "3.15.2"
sentence-transformers = "^2.2.2"
ctransformers = "^0.2.2"
cohere = "^4.6.0"
ctransformers = "^0.2.10"
cohere = "^4.11.0"
python-multipart = "^0.0.6"
sqlmodel = "^0.0.8"
faiss-cpu = "^1.7.4"
anthropic = "^0.2.10"
anthropic = "^0.3.0"
orjson = "^3.9.1"
multiprocess = "^0.70.14"
cachetools = "^5.3.1"
@ -70,7 +69,8 @@ pinecone-client = "^2.2.2"
supabase = "^1.0.3"
pymongo = "^4.4.0"
certifi = "^2023.5.7"
psycopg = "^3.1.9"
psycopg-binary = "^3.1.9"
[tool.poetry.dev-dependencies]
black = "^23.1.0"
@ -85,6 +85,7 @@ pytest-cov = "^4.0.0"
pandas-stubs = "^2.0.0.230412"
types-pillow = "^9.5.0.2"
types-appdirs = "^1.4.3.5"
types-pyyaml = "^6.0.12.8"
[tool.poetry.extras]

View file

@ -1,6 +1,5 @@
import sys
import time
from fastapi import FastAPI
import httpx
from multiprocess import Process, cpu_count # type: ignore
import platform
@ -11,9 +10,7 @@ from rich.panel import Panel
from rich import box
from rich import print as rprint
import typer
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from langflow.main import create_app
from langflow.main import setup_app
from langflow.settings import settings
from langflow.utils.logger import configure, logger
import webbrowser
@ -144,15 +141,9 @@ def serve(
remove_api_keys=remove_api_keys,
cache=cache,
)
# get the directory of the current file
if not path:
frontend_path = Path(__file__).parent
static_files_dir = frontend_path / "frontend"
else:
static_files_dir = Path(path)
app = create_app()
setup_static_files(app, static_files_dir)
# create path object if path is provided
static_files_dir: Optional[Path] = Path(path) if path else None
app = setup_app(static_files_dir=static_files_dir)
# check if port is being used
if is_port_in_use(port, host):
port = get_free_port(port)
@ -200,29 +191,6 @@ def run_on_windows(host, port, log_level, options, app):
run_langflow(host, port, log_level, options, app)
def setup_static_files(app: FastAPI, static_files_dir: Path):
"""
Setup the static files directory.
Args:
app (FastAPI): FastAPI app.
path (str): Path to the static files directory.
"""
app.mount(
"/",
StaticFiles(directory=static_files_dir, html=True),
name="static",
)
@app.exception_handler(404)
async def custom_404_handler(request, __):
path = static_files_dir / "index.html"
if not path.exists():
raise RuntimeError(f"File at path {path} does not exist.")
return FileResponse(path)
def is_port_in_use(port, host="localhost"):
"""
Check if a port is in use.

View file

@ -1,12 +1,6 @@
from fastapi import (
APIRouter,
HTTPException,
WebSocket,
WebSocketException,
status,
)
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketException, status
from fastapi.responses import StreamingResponse
from langflow.api.v1.schemas import BuiltResponse, InitResponse, StreamData
from langflow.api.v1.schemas import BuildStatus, BuiltResponse, InitResponse, StreamData
from langflow.chat.manager import ChatManager
from langflow.graph.graph.base import Graph
@ -32,15 +26,29 @@ async def chat(client_id: str, websocket: WebSocket):
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=str(exc))
@router.post("/build/init", response_model=InitResponse, status_code=201)
async def init_build(graph_data: dict):
@router.post("/build/init/{flow_id}", response_model=InitResponse, status_code=201)
async def init_build(graph_data: dict, flow_id: str):
"""Initialize the build by storing graph data and returning a unique session ID."""
try:
flow_id = graph_data.get("id")
if flow_id is None:
raise ValueError("No ID provided")
flow_data_store[flow_id] = graph_data
# Check if already building
if (
flow_id in flow_data_store
and flow_data_store[flow_id]["status"] == BuildStatus.IN_PROGRESS
):
return InitResponse(flowId=flow_id)
# Delete from cache if already exists
if flow_id in chat_manager.in_memory_cache:
with chat_manager.in_memory_cache._lock:
chat_manager.in_memory_cache.delete(flow_id)
logger.debug(f"Deleted flow {flow_id} from cache")
flow_data_store[flow_id] = {
"graph_data": graph_data,
"status": BuildStatus.STARTED,
}
return InitResponse(flowId=flow_id)
except Exception as exc:
@ -52,8 +60,9 @@ async def init_build(graph_data: dict):
async def build_status(flow_id: str):
"""Check the flow_id is in the flow_data_store."""
try:
built = flow_id in flow_data_store and not isinstance(
flow_data_store[flow_id], dict
built = (
flow_id in flow_data_store
and flow_data_store[flow_id]["status"] == BuildStatus.SUCCESS
)
return BuiltResponse(
@ -77,7 +86,12 @@ async def stream_build(flow_id: str):
yield str(StreamData(event="error", data={"error": error_message}))
return
graph_data = flow_data_store[flow_id].get("data")
if flow_data_store[flow_id].get("status") == BuildStatus.IN_PROGRESS:
error_message = "Already building"
yield str(StreamData(event="error", data={"error": error_message}))
return
graph_data = flow_data_store[flow_id].get("graph_data")
if not graph_data:
error_message = "No data provided"
@ -95,6 +109,7 @@ async def stream_build(flow_id: str):
return
number_of_nodes = len(graph.nodes)
flow_data_store[flow_id]["status"] = BuildStatus.IN_PROGRESS
for i, vertex in enumerate(graph.generator_build(), 1):
try:
log_dict = {
@ -110,6 +125,7 @@ async def stream_build(flow_id: str):
except Exception as exc:
params = str(exc)
valid = False
flow_data_store[flow_id]["status"] = BuildStatus.FAILURE
response = {
"valid": valid,
@ -121,8 +137,10 @@ async def stream_build(flow_id: str):
yield str(StreamData(event="message", data=response))
chat_manager.set_cache(flow_id, graph.build())
flow_data_store[flow_id]["status"] = BuildStatus.SUCCESS
except Exception as exc:
logger.error("Error while building the flow: %s", exc)
flow_data_store[flow_id]["status"] = BuildStatus.FAILURE
yield str(StreamData(event="error", data={"error": str(exc)}))
finally:
yield str(StreamData(event="message", data=final_response))

View file

@ -11,7 +11,7 @@ from langflow.api.v1.schemas import (
UploadFileResponse,
)
from langflow.interface.types import build_langchain_types_dict
from langflow.interface.types import langchain_types_dict
from langflow.database.base import get_session
from sqlmodel import Session
@ -21,7 +21,7 @@ router = APIRouter(tags=["Base"])
@router.get("/all")
def get_all():
return build_langchain_types_dict()
return langchain_types_dict
# For backwards compatibility we will keep the old endpoint

View file

@ -1,3 +1,4 @@
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from langflow.database.models.flow import FlowCreate, FlowRead
@ -5,6 +6,15 @@ from pydantic import BaseModel, Field, validator
import json
class BuildStatus(Enum):
"""Status of the build."""
SUCCESS = "success"
FAILURE = "failure"
STARTED = "started"
IN_PROGRESS = "in_progress"
class GraphData(BaseModel):
"""Data inside the exported flow."""

View file

@ -0,0 +1,2 @@
class ChatConfig:
streaming: bool = True

View file

@ -1,141 +1,264 @@
---
agents:
- ZeroShotAgent
- JsonAgent
- CSVAgent
- AgentInitializer
- VectorStoreAgent
- VectorStoreRouterAgent
- SQLAgent
ZeroShotAgent:
documentation: "https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent"
JsonAgent:
documentation: "https://python.langchain.com/docs/modules/agents/toolkits/openapi"
CSVAgent:
documentation: "https://python.langchain.com/docs/modules/agents/toolkits/csv"
AgentInitializer:
documentation: "https://python.langchain.com/docs/modules/agents/agent_types/"
VectorStoreAgent:
documentation: ""
VectorStoreRouterAgent:
documentation: ""
SQLAgent:
documentation: ""
chains:
- LLMChain
- LLMMathChain
- LLMCheckerChain
- ConversationChain
- SeriesCharacterChain
- MidJourneyPromptChain
- TimeTravelGuideChain
- SQLDatabaseChain
- RetrievalQA
- RetrievalQAWithSourcesChain
- ConversationalRetrievalChain
- CombineDocsChain
LLMChain:
documentation: "https://python.langchain.com/docs/modules/chains/foundational/llm_chain"
LLMMathChain:
documentation: "https://python.langchain.com/docs/modules/chains/additional/llm_math"
LLMCheckerChain:
documentation: "https://python.langchain.com/docs/modules/chains/additional/llm_checker"
ConversationChain:
documentation: ""
SeriesCharacterChain:
documentation: ""
MidJourneyPromptChain:
documentation: ""
TimeTravelGuideChain:
documentation: ""
SQLDatabaseChain:
documentation: ""
RetrievalQA:
documentation: "https://python.langchain.com/docs/modules/chains/popular/vector_db_qa"
RetrievalQAWithSourcesChain:
documentation: ""
ConversationalRetrievalChain:
documentation: "https://python.langchain.com/docs/modules/chains/popular/chat_vector_db"
CombineDocsChain:
documentation: ""
documentloaders:
- AirbyteJSONLoader
- CoNLLULoader
- CSVLoader
- UnstructuredEmailLoader
- EverNoteLoader
- FacebookChatLoader
- GutenbergLoader
- BSHTMLLoader
- UnstructuredHTMLLoader
# - UnstructuredImageLoader # Issue with Python 3.11 (https://github.com/Unstructured-IO/unstructured-inference/issues/83)
- UnstructuredMarkdownLoader
- PyPDFLoader
- UnstructuredPowerPointLoader
- SRTLoader
- TelegramChatLoader
- TextLoader
- UnstructuredWordDocumentLoader
- WebBaseLoader
- AZLyricsLoader
- CollegeConfidentialLoader
- HNLoader
- IFixitLoader
- IMSDbLoader
- GitbookLoader
- ReadTheDocsLoader
- SlackDirectoryLoader
- NotionDirectoryLoader
- DirectoryLoader
- GitLoader
AirbyteJSONLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/airbyte_json"
CoNLLULoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/conll-u"
CSVLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/csv"
UnstructuredEmailLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/email"
EverNoteLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/evernote"
FacebookChatLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/facebook_chat"
GutenbergLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/gutenberg"
BSHTMLLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/how_to/html"
UnstructuredHTMLLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/how_to/html"
UnstructuredMarkdownLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/how_to/markdown"
PyPDFLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/how_to/pdf"
UnstructuredPowerPointLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/microsoft_powerpoint"
SRTLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/subtitle"
TelegramChatLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/telegram"
TextLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/"
UnstructuredWordDocumentLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/microsoft_word"
WebBaseLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/web_base"
AZLyricsLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/azlyrics"
CollegeConfidentialLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/college_confidential"
HNLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/hacker_news"
IFixitLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/ifixit"
IMSDbLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/imsdb"
GitbookLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/gitbook"
ReadTheDocsLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/readthedocs_documentation"
SlackDirectoryLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/slack"
NotionDirectoryLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/notion"
DirectoryLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/how_to/file_directory"
GitLoader:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/git"
embeddings:
- OpenAIEmbeddings
- HuggingFaceEmbeddings
- CohereEmbeddings
OpenAIEmbeddings:
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/openai"
HuggingFaceEmbeddings:
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/sentence_transformers"
CohereEmbeddings:
documentation: "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/cohere"
llms:
- OpenAI
# - AzureOpenAI
# - AzureChatOpenAI
- ChatOpenAI
- LlamaCpp
- CTransformers
- Cohere
- Anthropic
- ChatAnthropic
- HuggingFaceHub
OpenAI:
documentation: "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/openai"
ChatOpenAI:
documentation: "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai"
LlamaCpp:
documentation: "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp"
CTransformers:
documentation: "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/ctransformers"
Cohere:
documentation: "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
Anthropic:
documentation: ""
ChatAnthropic:
documentation: "https://python.langchain.com/docs/modules/model_io/models/chat/integrations/anthropic"
HuggingFaceHub:
documentation: "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/huggingface_hub"
memories:
- ConversationBufferMemory
- ConversationSummaryMemory
- ConversationKGMemory
# https://github.com/supabase-community/supabase-py/issues/482
# ZepChatMessageHistory:
# documentation: "https://python.langchain.com/docs/modules/memory/integrations/zep_memory"
ConversationEntityMemory:
documentation: "https://python.langchain.com/docs/modules/memory/integrations/entity_memory_with_sqlite"
# https://github.com/hwchase17/langchain/issues/6091
# SQLiteEntityStore:
# documentation: "https://python.langchain.com/docs/modules/memory/integrations/entity_memory_with_sqlite"
PostgresChatMessageHistory:
documentation: "https://python.langchain.com/docs/modules/memory/integrations/postgres_chat_message_history"
ConversationBufferMemory:
documentation: "https://python.langchain.com/docs/modules/memory/how_to/buffer"
ConversationSummaryMemory:
documentation: "https://python.langchain.com/docs/modules/memory/how_to/summary"
ConversationKGMemory:
documentation: "https://python.langchain.com/docs/modules/memory/how_to/kg"
ConversationBufferWindowMemory:
documentation: "https://python.langchain.com/docs/modules/memory/how_to/buffer_window"
VectorStoreRetrieverMemory:
documentation: "https://python.langchain.com/docs/modules/memory/how_to/vectorstore_retriever_memory"
prompts:
- PromptTemplate
- FewShotPromptTemplate
- ZeroShotPrompt
PromptTemplate:
documentation: "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/"
ZeroShotPrompt:
documentation: "https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent"
textsplitters:
- CharacterTextSplitter
- RecursiveCharacterTextSplitter
# - LatexTextSplitter
# - PythonCodeTextSplitter
CharacterTextSplitter:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter"
RecursiveCharacterTextSplitter:
documentation: "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/recursive_text_splitter"
toolkits:
- OpenAPIToolkit
- JsonToolkit
- VectorStoreInfo
- VectorStoreRouterToolkit
- VectorStoreToolkit
OpenAPIToolkit:
documentation: ""
JsonToolkit:
documentation: ""
VectorStoreInfo:
documentation: ""
VectorStoreRouterToolkit:
documentation: ""
VectorStoreToolkit:
documentation: ""
tools:
- Search
- PAL-MATH
- Calculator
- Serper Search
- Tool
- PythonFunctionTool
- PythonFunction
- JsonSpec
- News API
- TMDB API
- Podcast API
- QuerySQLDataBaseTool
- InfoSQLDatabaseTool
- ListSQLDatabaseTool
# - QueryCheckerTool
- BingSearchRun
- GoogleSearchRun
- GoogleSearchResults
- GoogleSerperRun
- JsonListKeysTool
- JsonGetValueTool
- PythonREPLTool
- PythonAstREPLTool
- RequestsGetTool
- RequestsPostTool
- RequestsPatchTool
- RequestsPutTool
- RequestsDeleteTool
- WikipediaQueryRun
- WolframAlphaQueryRun
Search:
documentation: ""
PAL-MATH:
documentation: ""
Calculator:
documentation: ""
Serper Search:
documentation: ""
Tool:
documentation: ""
PythonFunctionTool:
documentation: ""
PythonFunction:
documentation: ""
JsonSpec:
documentation: ""
News API:
documentation: ""
TMDB API:
documentation: ""
Podcast API:
documentation: ""
QuerySQLDataBaseTool:
documentation: ""
InfoSQLDatabaseTool:
documentation: ""
ListSQLDatabaseTool:
documentation: ""
BingSearchRun:
documentation: ""
GoogleSearchRun:
documentation: ""
GoogleSearchResults:
documentation: ""
GoogleSerperRun:
documentation: ""
JsonListKeysTool:
documentation: ""
JsonGetValueTool:
documentation: ""
PythonREPLTool:
documentation: ""
PythonAstREPLTool:
documentation: ""
RequestsGetTool:
documentation: ""
RequestsPostTool:
documentation: ""
RequestsPatchTool:
documentation: ""
RequestsPutTool:
documentation: ""
RequestsDeleteTool:
documentation: ""
WikipediaQueryRun:
documentation: ""
WolframAlphaQueryRun:
documentation: ""
utilities:
- BingSearchAPIWrapper
- GoogleSearchAPIWrapper
- GoogleSerperAPIWrapper
- SearxResults
- SearxSearchWrapper
- SerpAPIWrapper
- WikipediaAPIWrapper
- WolframAlphaAPIWrapper
# - ZapierNLAWrapper
- SQLDatabase
BingSearchAPIWrapper:
documentation: ""
GoogleSearchAPIWrapper:
documentation: ""
GoogleSerperAPIWrapper:
documentation: ""
SearxResults:
documentation: ""
SearxSearchWrapper:
documentation: ""
SerpAPIWrapper:
documentation: ""
WikipediaAPIWrapper:
documentation: ""
WolframAlphaAPIWrapper:
documentation: ""
retrievers:
MultiQueryRetriever:
documentation: "https://python.langchain.com/docs/modules/data_connection/retrievers/how_to/MultiQueryRetriever"
# https://github.com/supabase-community/supabase-py/issues/482
# ZepRetriever:
# documentation: "https://python.langchain.com/docs/modules/data_connection/retrievers/integrations/zep_memorystore"
vectorstores:
- Chroma
- Qdrant
- Weaviate
- FAISS
- Pinecone
- SupabaseVectorStore
- MongoDBAtlasVectorSearch
Chroma:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/chroma"
Qdrant:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/qdrant"
Weaviate:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/weaviate"
FAISS:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss"
Pinecone:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/pinecone"
SupabaseVectorStore:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/supabase"
MongoDBAtlasVectorSearch:
documentation: "https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/mongodb_atlas_vector_search"
wrappers:
- RequestsWrapper
# - ChatPromptTemplate
# - SystemMessagePromptTemplate
# - HumanMessagePromptTemplate
RequestsWrapper:
documentation: ""

View file

@ -21,6 +21,9 @@ CUSTOM_NODES = {
"utilities": {
"SQLDatabase": frontend_node.agents.SQLDatabaseNode(),
},
"memories": {
"PostgresChatMessageHistory": frontend_node.memories.PostgresChatMessageHistoryFrontendNode(),
},
"chains": {
"SeriesCharacterChain": frontend_node.chains.SeriesCharacterChainNode(),
"TimeTravelGuideChain": frontend_node.chains.TimeTravelGuideChainNode(),

View file

@ -14,6 +14,7 @@ from langflow.graph.vertex.types import (
ToolkitVertex,
VectorStoreVertex,
WrapperVertex,
RetrieverVertex,
)
__all__ = [
@ -32,4 +33,5 @@ __all__ = [
"ToolkitVertex",
"VectorStoreVertex",
"WrapperVertex",
"RetrieverVertex",
]

View file

@ -1,18 +1,5 @@
from langflow.graph.vertex.base import Vertex
from langflow.graph.vertex.types import (
AgentVertex,
ChainVertex,
DocumentLoaderVertex,
EmbeddingVertex,
LLMVertex,
MemoryVertex,
PromptVertex,
TextSplitterVertex,
ToolVertex,
ToolkitVertex,
VectorStoreVertex,
WrapperVertex,
)
from langflow.graph.vertex import types
from langflow.interface.agents.base import agent_creator
from langflow.interface.chains.base import chain_creator
from langflow.interface.document_loaders.base import documentloader_creator
@ -25,22 +12,23 @@ from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
from langflow.interface.retrievers.base import retriever_creator
from typing import Dict, Type
VERTEX_TYPE_MAP: Dict[str, Type[Vertex]] = {
**{t: PromptVertex for t in prompt_creator.to_list()},
**{t: AgentVertex for t in agent_creator.to_list()},
**{t: ChainVertex for t in chain_creator.to_list()},
**{t: ToolVertex for t in tool_creator.to_list()},
**{t: ToolkitVertex for t in toolkits_creator.to_list()},
**{t: WrapperVertex for t in wrapper_creator.to_list()},
**{t: LLMVertex for t in llm_creator.to_list()},
**{t: MemoryVertex for t in memory_creator.to_list()},
**{t: EmbeddingVertex for t in embedding_creator.to_list()},
**{t: VectorStoreVertex for t in vectorstore_creator.to_list()},
**{t: DocumentLoaderVertex for t in documentloader_creator.to_list()},
**{t: TextSplitterVertex for t in textsplitter_creator.to_list()},
**{t: types.PromptVertex for t in prompt_creator.to_list()},
**{t: types.AgentVertex for t in agent_creator.to_list()},
**{t: types.ChainVertex for t in chain_creator.to_list()},
**{t: types.ToolVertex for t in tool_creator.to_list()},
**{t: types.ToolkitVertex for t in toolkits_creator.to_list()},
**{t: types.WrapperVertex for t in wrapper_creator.to_list()},
**{t: types.LLMVertex for t in llm_creator.to_list()},
**{t: types.MemoryVertex for t in memory_creator.to_list()},
**{t: types.EmbeddingVertex for t in embedding_creator.to_list()},
**{t: types.VectorStoreVertex for t in vectorstore_creator.to_list()},
**{t: types.DocumentLoaderVertex for t in documentloader_creator.to_list()},
**{t: types.TextSplitterVertex for t in textsplitter_creator.to_list()},
**{t: types.RetrieverVertex for t in retriever_creator.to_list()},
}

View file

@ -112,6 +112,11 @@ class MemoryVertex(Vertex):
super().__init__(data, base_type="memory")
class RetrieverVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="retrievers")
class TextSplitterVertex(Vertex):
def __init__(self, data: Dict):
super().__init__(data, base_type="textsplitters")

View file

@ -8,6 +8,7 @@ from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.base import FrontendNode
from langflow.template.template.base import Template
from langflow.utils.logger import logger
from langflow.settings import settings
# Assuming necessary imports for Field, Template, and FrontendNode classes
@ -15,12 +16,29 @@ from langflow.utils.logger import logger
class LangChainTypeCreator(BaseModel, ABC):
type_name: str
type_dict: Optional[Dict] = None
name_docs_dict: Optional[Dict[str, str]] = None
@property
def frontend_node_class(self) -> Type[FrontendNode]:
"""The class type of the FrontendNode created in frontend_node."""
return FrontendNode
@property
def docs_map(self) -> Dict[str, str]:
"""A dict with the name of the component as key and the documentation link as value."""
if self.name_docs_dict is None:
try:
type_settings = getattr(settings, self.type_name)
self.name_docs_dict = {
name: value_dict["documentation"]
for name, value_dict in type_settings.items()
}
except AttributeError as exc:
logger.error(exc)
self.name_docs_dict = {}
return self.name_docs_dict
@property
@abstractmethod
def type_to_loader_dict(self) -> Dict:
@ -83,7 +101,7 @@ class LangChainTypeCreator(BaseModel, ABC):
signature.add_extra_fields()
signature.add_extra_base_classes()
signature.set_documentation(self.docs_map.get(name, ""))
return signature

View file

@ -44,6 +44,7 @@ def import_by_type(_type: str, name: str) -> Any:
"documentloaders": import_documentloader,
"textsplitters": import_textsplitter,
"utilities": import_utility,
"retrievers": import_retriever,
}
if _type == "llms":
key = "chat" if "chat" in name.lower() else "llm"
@ -59,6 +60,11 @@ def import_chat_llm(llm: str) -> BaseChatModel:
return import_class(f"langchain.chat_models.{llm}")
def import_retriever(retriever: str) -> Any:
"""Import retriever from retriever name"""
return import_module(f"from langchain.retrievers import {retriever}")
def import_memory(memory: str) -> Any:
"""Import memory from memory name"""
return import_module(f"from langchain.memory import {memory}")

View file

@ -1,11 +1,12 @@
import json
from typing import Any, Callable, Dict, Sequence
from typing import Any, Callable, Dict, Sequence, Type
from langchain.agents import ZeroShotAgent
from langchain.agents import agent as agent_module
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.agents.tools import BaseTool
from langflow.interface.initialize.vector_store import vecstore_initializer
from pydantic import ValidationError
@ -14,8 +15,13 @@ from langflow.interface.custom_lists import CUSTOM_NODES
from langflow.interface.importing.utils import get_function, import_by_type
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.chains.base import chain_creator
from langflow.interface.retrievers.base import retriever_creator
from langflow.interface.utils import load_file_into_dict
from langflow.utils import validate
from langchain.chains.base import Chain
from langchain.vectorstores.base import VectorStore
from langchain.document_loaders.base import BaseLoader
from langchain.prompts.base import BasePromptTemplate
def instantiate_class(node_type: str, base_type: str, params: Dict) -> Any:
@ -43,8 +49,8 @@ def convert_params_to_sets(params):
def convert_kwargs(params):
# if *kwargs are passed as a string, convert to dict
# first find any key that has kwargs in it
kwargs_keys = [key for key in params.keys() if "kwargs" in key]
# first find any key that has kwargs or config in it
kwargs_keys = [key for key in params.keys() if "kwargs" in key or "config" in key]
for key in kwargs_keys:
if isinstance(params[key], str):
params[key] = json.loads(params[key])
@ -72,11 +78,47 @@ def instantiate_based_on_type(class_object, base_type, node_type, params):
return instantiate_utility(node_type, class_object, params)
elif base_type == "chains":
return instantiate_chains(node_type, class_object, params)
elif base_type == "llms":
return instantiate_llm(node_type, class_object, params)
elif base_type == "retrievers":
return instantiate_retriever(node_type, class_object, params)
elif base_type == "memory":
return instantiate_memory(node_type, class_object, params)
else:
return class_object(**params)
def instantiate_chains(node_type, class_object, params):
def instantiate_memory(node_type, class_object, params):
try:
return class_object(**params)
# I want to catch a specific attribute error that happens
# when the object does not have a cursor attribute
except Exception as exc:
if "object has no attribute 'cursor'" in str(
exc
) or 'object has no field "conn"' in str(exc):
raise AttributeError(
f"Failed to build connection to database. Please check your connection string and try again. Error: {exc}"
) from exc
raise exc
def instantiate_retriever(node_type, class_object, params):
if "retriever" in params and hasattr(params["retriever"], "as_retriever"):
params["retriever"] = params["retriever"].as_retriever()
if node_type in retriever_creator.from_method_nodes:
method = retriever_creator.from_method_nodes[node_type]
if class_method := getattr(class_object, method, None):
return class_method(**params)
raise ValueError(f"Method {method} not found in {class_object}")
return class_object(**params)
def instantiate_llm(node_type, class_object, params: Dict):
return class_object(**params)
def instantiate_chains(node_type, class_object: Type[Chain], params: Dict):
if "retriever" in params and hasattr(params["retriever"], "as_retriever"):
params["retriever"] = params["retriever"].as_retriever()
if node_type in chain_creator.from_method_nodes:
@ -88,11 +130,11 @@ def instantiate_chains(node_type, class_object, params):
return class_object(**params)
def instantiate_agent(class_object, params):
def instantiate_agent(class_object: Type[agent_module.Agent], params: Dict):
return load_agent_executor(class_object, params)
def instantiate_prompt(node_type, class_object, params):
def instantiate_prompt(node_type, class_object: Type[BasePromptTemplate], params: Dict):
if node_type == "ZeroShotPrompt":
if "tools" not in params:
params["tools"] = []
@ -100,7 +142,7 @@ def instantiate_prompt(node_type, class_object, params):
return class_object(**params)
def instantiate_tool(node_type, class_object, params):
def instantiate_tool(node_type, class_object: Type[BaseTool], params: Dict):
if node_type == "JsonSpec":
params["dict_"] = load_file_into_dict(params.pop("path"))
return class_object(**params)
@ -118,7 +160,7 @@ def instantiate_tool(node_type, class_object, params):
return class_object(**params)
def instantiate_toolkit(node_type, class_object, params):
def instantiate_toolkit(node_type, class_object: Type[BaseToolkit], params: Dict):
loaded_toolkit = class_object(**params)
# Commenting this out for now to use toolkits as normal tools
# if toolkits_creator.has_create_function(node_type):
@ -128,7 +170,7 @@ def instantiate_toolkit(node_type, class_object, params):
return loaded_toolkit
def instantiate_embedding(class_object, params):
def instantiate_embedding(class_object, params: Dict):
params.pop("model", None)
params.pop("headers", None)
try:
@ -142,7 +184,7 @@ def instantiate_embedding(class_object, params):
return class_object(**params)
def instantiate_vectorstore(class_object, params):
def instantiate_vectorstore(class_object: Type[VectorStore], params: Dict):
search_kwargs = params.pop("search_kwargs", {})
if initializer := vecstore_initializer.get(class_object.__name__):
vecstore = initializer(class_object, params)
@ -158,7 +200,7 @@ def instantiate_vectorstore(class_object, params):
return vecstore
def instantiate_documentloader(class_object, params):
def instantiate_documentloader(class_object: Type[BaseLoader], params: Dict):
if "file_filter" in params:
# file_filter will be a string but we need a function
# that will be used to filter the files using file_filter
@ -171,35 +213,55 @@ def instantiate_documentloader(class_object, params):
extension.strip() in x for extension in extensions
)
metadata = params.pop("metadata", None)
if metadata and isinstance(metadata, str):
try:
metadata = json.loads(metadata)
except json.JSONDecodeError as exc:
raise ValueError(
"The metadata you provided is not a valid JSON string."
) from exc
docs = class_object(**params).load()
# Now if metadata is an empty dict, we will not add it to the documents
if metadata:
if isinstance(metadata, str):
try:
metadata = json.loads(metadata)
except json.JSONDecodeError as exc:
raise ValueError(
"The metadata you provided is not a valid JSON string."
) from exc
for doc in docs:
doc.metadata = metadata
# If the document already has metadata, we will not overwrite it
if not doc.metadata:
doc.metadata = metadata
else:
doc.metadata.update(metadata)
return docs
def instantiate_textsplitter(class_object, params):
def instantiate_textsplitter(
class_object,
params: Dict,
):
try:
documents = params.pop("documents")
except KeyError as e:
except KeyError as exc:
raise ValueError(
"The source you provided did not load correctly or was empty."
"Try changing the chunk_size of the Text Splitter."
) from e
text_splitter = class_object(**params)
) from exc
if (
"separator_type" in params and params["separator_type"] == "Text"
) or "separator_type" not in params:
params.pop("separator_type", None)
text_splitter = class_object(**params)
else:
from langchain.text_splitter import Language
language = params.pop("separator_type", None)
params["language"] = Language(language)
params.pop("separators", None)
text_splitter = class_object.from_language(**params)
return text_splitter.split_documents(documents)
def instantiate_utility(node_type, class_object, params):
def instantiate_utility(node_type, class_object, params: Dict):
if node_type == "SQLDatabase":
return class_object.from_uri(params.pop("uri"))
return class_object(**params)
@ -225,6 +287,8 @@ def load_agent_executor(agent_class: type[agent_module.Agent], params, **kwargs)
"""Load agent executor from agent class, tools and chain"""
allowed_tools: Sequence[BaseTool] = params.get("allowed_tools", [])
llm_chain = params["llm_chain"]
# agent has hidden args for memory. might need to be support
# memory = params["memory"]
# if allowed_tools is not a list or set, make it a list
if not isinstance(allowed_tools, (list, set)) and isinstance(
allowed_tools, BaseTool
@ -237,6 +301,7 @@ def load_agent_executor(agent_class: type[agent_module.Agent], params, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=allowed_tools,
# memory=memory,
**kwargs,
)

View file

@ -11,6 +11,7 @@ from langflow.interface.tools.base import tool_creator
from langflow.interface.utilities.base import utility_creator
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
from langflow.interface.retrievers.base import retriever_creator
def get_type_dict():
@ -28,6 +29,7 @@ def get_type_dict():
"embeddings": embedding_creator.to_list(),
"textSplitters": textsplitter_creator.to_list(),
"utilities": utility_creator.to_list(),
"retrievers": retriever_creator.to_list(),
}

View file

@ -6,12 +6,18 @@ from langflow.settings import settings
from langflow.template.frontend_node.base import FrontendNode
from langflow.template.frontend_node.memories import MemoryFrontendNode
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_class
from langflow.utils.util import build_template_from_class, build_template_from_method
from langflow.custom.customs import get_custom_nodes
class MemoryCreator(LangChainTypeCreator):
type_name: str = "memories"
from_method_nodes = {
"ZepChatMessageHistory": "__init__",
"SQLiteEntityStore": "__init__",
}
@property
def frontend_node_class(self) -> Type[FrontendNode]:
"""The class type of the FrontendNode created in frontend_node."""
@ -26,6 +32,14 @@ class MemoryCreator(LangChainTypeCreator):
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a memory."""
try:
if name in get_custom_nodes(self.type_name).keys():
return get_custom_nodes(self.type_name)[name]
elif name in self.from_method_nodes:
return build_template_from_method(
name,
type_to_cls_dict=memory_type_to_cls_dict,
method_name=self.from_method_nodes[name],
)
return build_template_from_class(name, memory_type_to_cls_dict)
except ValueError as exc:
raise ValueError("Memory not found") from exc

View file

@ -0,0 +1,58 @@
from typing import Any, Dict, List, Optional, Type
from langchain import retrievers
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.importing.utils import import_class
from langflow.settings import settings
from langflow.template.frontend_node.retrievers import RetrieverFrontendNode
from langflow.utils.logger import logger
from langflow.utils.util import build_template_from_method, build_template_from_class
class RetrieverCreator(LangChainTypeCreator):
type_name: str = "retrievers"
from_method_nodes = {"MultiQueryRetriever": "from_llm", "ZepRetriever": "__init__"}
@property
def frontend_node_class(self) -> Type[RetrieverFrontendNode]:
return RetrieverFrontendNode
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:
self.type_dict: dict[str, Any] = {
retriever_name: import_class(f"langchain.retrievers.{retriever_name}")
for retriever_name in retrievers.__all__
}
return self.type_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of an embedding."""
try:
if name in self.from_method_nodes:
return build_template_from_method(
name,
type_to_cls_dict=self.type_to_loader_dict,
method_name=self.from_method_nodes[name],
)
else:
return build_template_from_class(
name, type_to_cls_dict=self.type_to_loader_dict
)
except ValueError as exc:
raise ValueError(f"Retriever {name} not found") from exc
except AttributeError as exc:
logger.error(f"Retriever {name} not loaded: {exc}")
return None
def to_list(self) -> List[str]:
return [
retriever
for retriever in self.type_to_loader_dict.keys()
if retriever in settings.retrievers or settings.dev
]
retriever_creator = RetrieverCreator()

View file

@ -11,6 +11,7 @@ from langflow.interface.tools.base import tool_creator
from langflow.interface.utilities.base import utility_creator
from langflow.interface.vector_store.base import vectorstore_creator
from langflow.interface.wrappers.base import wrapper_creator
from langflow.interface.retrievers.base import retriever_creator
def get_type_list():
@ -44,6 +45,7 @@ def build_langchain_types_dict(): # sourcery skip: dict-assign-update-to-union
documentloader_creator,
textsplitter_creator,
utility_creator,
retriever_creator,
]
all_types = {}
@ -52,3 +54,6 @@ def build_langchain_types_dict(): # sourcery skip: dict-assign-update-to-union
if created_types[creator.type_name].values():
all_types.update(created_types)
return all_types
langchain_types_dict = build_langchain_types_dict()

View file

@ -4,10 +4,12 @@ import os
from io import BytesIO
import re
import yaml
from langchain.base_language import BaseLanguageModel
from PIL.Image import Image
from langflow.utils.logger import logger
from langflow.chat.config import ChatConfig
def load_file_into_dict(file_path: str) -> dict:
@ -49,9 +51,9 @@ def try_setting_streaming_options(langchain_object, websocket):
if isinstance(llm, BaseLanguageModel):
if hasattr(llm, "streaming") and isinstance(llm.streaming, bool):
llm.streaming = True
llm.streaming = ChatConfig.streaming
elif hasattr(llm, "stream") and isinstance(llm.stream, bool):
llm.stream = True
llm.stream = ChatConfig.streaming
return langchain_object

View file

@ -1,5 +1,9 @@
from pathlib import Path
from typing import Optional
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from langflow.api import router
from langflow.database.base import create_db_and_tables
@ -33,6 +37,42 @@ def create_app():
return app
def setup_static_files(app: FastAPI, static_files_dir: Path):
"""
Setup the static files directory.
Args:
app (FastAPI): FastAPI app.
path (str): Path to the static files directory.
"""
app.mount(
"/",
StaticFiles(directory=static_files_dir, html=True),
name="static",
)
@app.exception_handler(404)
async def custom_404_handler(request, __):
path = static_files_dir / "index.html"
if not path.exists():
raise RuntimeError(f"File at path {path} does not exist.")
return FileResponse(path)
# app = create_app()
# setup_static_files(app, static_files_dir)
def setup_app(static_files_dir: Optional[Path]) -> FastAPI:
"""Setup the FastAPI app."""
# get the directory of the current file
if not static_files_dir:
frontend_path = Path(__file__).parent
static_files_dir = frontend_path / "frontend"
app = create_app()
setup_static_files(app, static_files_dir)
return app
app = create_app()

View file

@ -107,6 +107,10 @@ def process_graph_cached(data_graph: Dict[str, Any], inputs: Optional[dict] = No
elif isinstance(langchain_object, VectorStore):
class_name = langchain_object.__class__.__name__
result = {"message": f"Processed {class_name} successfully"}
else:
raise ValueError(
f"Unknown langchain_object type: {type(langchain_object).__name__}"
)
return result

View file

@ -1,24 +1,24 @@
import os
from typing import List
import yaml
from pydantic import BaseSettings, root_validator
class Settings(BaseSettings):
chains: List[str] = []
agents: List[str] = []
prompts: List[str] = []
llms: List[str] = []
tools: List[str] = []
memories: List[str] = []
embeddings: List[str] = []
vectorstores: List[str] = []
documentloaders: List[str] = []
wrappers: List[str] = []
toolkits: List[str] = []
textsplitters: List[str] = []
utilities: List[str] = []
chains: dict = {}
agents: dict = {}
prompts: dict = {}
llms: dict = {}
tools: dict = {}
memories: dict = {}
embeddings: dict = {}
vectorstores: dict = {}
documentloaders: dict = {}
wrappers: dict = {}
retrievers: dict = {}
toolkits: dict = {}
textsplitters: dict = {}
utilities: dict = {}
dev: bool = False
database_url: str = "sqlite:///./langflow.db"
cache: str = "InMemoryCache"
@ -38,16 +38,21 @@ class Settings(BaseSettings):
def update_from_yaml(self, file_path: str, dev: bool = False):
new_settings = load_settings_from_yaml(file_path)
self.chains = new_settings.chains or []
self.agents = new_settings.agents or []
self.prompts = new_settings.prompts or []
self.llms = new_settings.llms or []
self.tools = new_settings.tools or []
self.memories = new_settings.memories or []
self.wrappers = new_settings.wrappers or []
self.toolkits = new_settings.toolkits or []
self.textsplitters = new_settings.textsplitters or []
self.utilities = new_settings.utilities or []
self.chains = new_settings.chains or {}
self.agents = new_settings.agents or {}
self.prompts = new_settings.prompts or {}
self.llms = new_settings.llms or {}
self.tools = new_settings.tools or {}
self.memories = new_settings.memories or {}
self.wrappers = new_settings.wrappers or {}
self.toolkits = new_settings.toolkits or {}
self.textsplitters = new_settings.textsplitters or {}
self.utilities = new_settings.utilities or {}
self.embeddings = new_settings.embeddings or {}
self.vectorstores = new_settings.vectorstores or {}
self.documentloaders = new_settings.documentloaders or {}
self.retrievers = new_settings.retrievers or {}
self.dev = dev
def update_settings(self, **kwargs):

View file

@ -21,6 +21,7 @@ class TemplateFieldCreator(BaseModel, ABC):
name: str = ""
display_name: Optional[str] = None
advanced: bool = False
info: Optional[str] = ""
def to_dict(self):
result = self.dict()

View file

@ -1,12 +1,43 @@
import re
from typing import List, Optional
from pydantic import BaseModel
from pydantic import BaseModel, Field
from langflow.template.frontend_node.constants import FORCE_SHOW_FIELDS
from langflow.template.field.base import TemplateField
from langflow.template.template.base import Template
from langflow.utils import constants
from langflow.template.frontend_node.formatter import field_formatters
CLASSES_TO_REMOVE = ["Serializable", "BaseModel"]
class FieldFormatters(BaseModel):
formatters = {
"openai_api_key": field_formatters.OpenAIAPIKeyFormatter(),
}
base_formatters = {
"kwargs": field_formatters.KwargsFormatter(),
"optional": field_formatters.RemoveOptionalFormatter(),
"list": field_formatters.ListTypeFormatter(),
"dict": field_formatters.DictTypeFormatter(),
"union": field_formatters.UnionTypeFormatter(),
"multiline": field_formatters.MultilineFieldFormatter(),
"show": field_formatters.ShowFieldFormatter(),
"password": field_formatters.PasswordFieldFormatter(),
"default": field_formatters.DefaultValueFormatter(),
"headers": field_formatters.HeadersDefaultValueFormatter(),
"dict_code_file": field_formatters.DictCodeFileFormatter(),
"model_fields": field_formatters.ModelSpecificFieldFormatter(),
}
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
for key, formatter in self.base_formatters.items():
formatter.format(field, name)
for key, formatter in self.formatters.items():
if key == field.name:
formatter.format(field, name)
class FrontendNode(BaseModel):
@ -15,14 +46,37 @@ class FrontendNode(BaseModel):
base_classes: List[str]
name: str = ""
display_name: str = ""
documentation: str = ""
field_formatters: FieldFormatters = Field(default_factory=FieldFormatters)
def process_base_classes(self) -> None:
"""Removes unwanted base classes from the list of base classes."""
self.base_classes = [
base_class
for base_class in self.base_classes
if base_class not in CLASSES_TO_REMOVE
]
# field formatters is an instance attribute but it is not used in the class
# so we need to create a method to get it
@staticmethod
def get_field_formatters() -> FieldFormatters:
return FieldFormatters()
def set_documentation(self, documentation: str) -> None:
"""Sets the documentation of the frontend node."""
self.documentation = documentation
def to_dict(self) -> dict:
"""Returns a dict representation of the frontend node."""
self.process_base_classes()
return {
self.name: {
"template": self.template.to_dict(self.format_field),
"description": self.description,
"base_classes": self.base_classes,
"display_name": self.display_name or self.name,
"documentation": self.documentation,
},
}
@ -35,33 +89,8 @@ class FrontendNode(BaseModel):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
"""Formats a given field based on its attributes and value."""
SPECIAL_FIELD_HANDLERS = {
"allowed_tools": lambda field: "Tool",
"max_value_length": lambda field: "int",
}
key = field.name
value = field.to_dict()
_type = value["type"]
_type = FrontendNode.remove_optional(_type)
_type, is_list = FrontendNode.check_for_list_type(_type)
field.is_list = is_list or field.is_list
_type = FrontendNode.replace_mapping_with_dict(_type)
_type = FrontendNode.handle_union_type(_type)
field.field_type = FrontendNode.handle_special_field(
field, key, _type, SPECIAL_FIELD_HANDLERS
)
field.field_type = FrontendNode.handle_dict_type(field, _type)
field.show = FrontendNode.should_show_field(key, field.required)
field.password = FrontendNode.should_be_password(key, field.show)
field.multiline = FrontendNode.should_be_multiline(key)
FrontendNode.replace_default_value(field, value)
FrontendNode.handle_specific_field_values(field, key, name)
FrontendNode.handle_kwargs_field(field)
FrontendNode.handle_api_key_field(field, key)
FrontendNode.get_field_formatters().format(field, name)
@staticmethod
def remove_optional(_type: str) -> str:

View file

@ -49,6 +49,10 @@ class ChainFrontendNode(FrontendNode):
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
FrontendNode.format_field(field, name)
if "name" == "RetrievalQA" and field.name == "memory":
field.show = False
field.required = False
field.advanced = False
if "key" in field.name:
field.password = False

View file

@ -32,3 +32,29 @@ You are a good listener and you can talk about anything.
HUMAN_PROMPT = "{input}"
QA_CHAIN_TYPES = ["stuff", "map_reduce", "map_rerank", "refine"]
CTRANSFORMERS_DEFAULT_CONFIG = {
"top_k": 40,
"top_p": 0.95,
"temperature": 0.8,
"repetition_penalty": 1.1,
"last_n_tokens": 64,
"seed": -1,
"max_new_tokens": 256,
"stop": None,
"stream": False,
"reset": True,
"batch_size": 8,
"threads": -1,
"context_length": -1,
"gpu_layers": 0,
}
# This variable is used to tell the user
# that it can be changed to use other APIs
# like Prem and LocalAI
OPENAI_API_BASE_INFO = """
The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.
You can change this to use other APIs like JinaChat, LocalAI and Prem.
"""

View file

@ -0,0 +1,9 @@
from abc import ABC, abstractmethod
from langflow.template.field.base import TemplateField
class FieldFormatter(ABC):
@abstractmethod
def format(self, field: TemplateField):
pass

View file

@ -0,0 +1,162 @@
from typing import Optional
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.constants import FORCE_SHOW_FIELDS
from langflow.template.frontend_node.formatter.base import FieldFormatter
import re
from langflow.utils.constants import (
ANTHROPIC_MODELS,
CHAT_OPENAI_MODELS,
OPENAI_MODELS,
)
class OpenAIAPIKeyFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
if "api_key" in field.name and "OpenAI" in str(name):
field.display_name = "OpenAI API Key"
field.required = False
if field.value is None:
field.value = ""
class ModelSpecificFieldFormatter(FieldFormatter):
MODEL_DICT = {
"OpenAI": OPENAI_MODELS,
"ChatOpenAI": CHAT_OPENAI_MODELS,
"Anthropic": ANTHROPIC_MODELS,
"ChatAnthropic": ANTHROPIC_MODELS,
}
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
if name in self.MODEL_DICT and field.name == "model_name":
field.options = self.MODEL_DICT[name]
field.is_list = True
class KwargsFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
if "kwargs" in field.name.lower():
field.advanced = True
field.required = False
field.show = False
class APIKeyFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
if "api" in field.name.lower() and "key" in field.name.lower():
field.required = False
field.advanced = False
field.display_name = field.name.replace("_", " ").title()
field.display_name = field.display_name.replace("Api", "API")
class RemoveOptionalFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
_type = field.field_type
field.field_type = re.sub(r"Optional\[(.*)\]", r"\1", _type)
class ListTypeFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
_type = field.field_type
is_list = "List" in _type or "Sequence" in _type
if is_list:
_type = re.sub(r"(List|Sequence)\[(.*)\]", r"\2", _type)
field.is_list = True
field.field_type = _type
class DictTypeFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
_type = field.field_type
_type = _type.replace("Mapping", "dict")
field.field_type = _type
class UnionTypeFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
_type = field.field_type
if "Union" in _type:
_type = _type.replace("Union[", "")[:-1]
_type = _type.split(",")[0]
_type = _type.replace("]", "").replace("[", "")
field.field_type = _type
class SpecialFieldFormatter(FieldFormatter):
SPECIAL_FIELD_HANDLERS = {
"allowed_tools": lambda field: "Tool",
"max_value_length": lambda field: "int",
}
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
handler = self.SPECIAL_FIELD_HANDLERS.get(field.name)
field.field_type = handler(field) if handler else field.field_type
class ShowFieldFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
key = field.name
required = field.required
field.show = (
(required and key not in ["input_variables"])
or key in FORCE_SHOW_FIELDS
or "api" in key
or ("key" in key and "input" not in key and "output" not in key)
)
class PasswordFieldFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
key = field.name
show = field.show
if (
any(text in key.lower() for text in {"password", "token", "api", "key"})
and show
):
field.password = True
class MultilineFieldFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
key = field.name
if key in {
"suffix",
"prefix",
"template",
"examples",
"code",
"headers",
"description",
}:
field.multiline = True
class DefaultValueFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
value = field.to_dict()
if "default" in value:
field.value = value["default"]
class HeadersDefaultValueFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
key = field.name
if key == "headers":
field.value = """{'Authorization': 'Bearer <token>'}"""
class DictCodeFileFormatter(FieldFormatter):
def format(self, field: TemplateField, name: Optional[str] = None) -> None:
key = field.name
value = field.to_dict()
_type = value["type"]
if "dict" in _type.lower():
if key == "dict_":
field.field_type = "file"
field.suffixes = [".json", ".yaml", ".yml"]
field.file_types = ["json", "yaml", "yml"]
else:
field.field_type = "code"

View file

@ -1,7 +1,10 @@
import json
from typing import Optional
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.base import FrontendNode
from langflow.template.frontend_node.constants import CTRANSFORMERS_DEFAULT_CONFIG
from langflow.template.frontend_node.constants import OPENAI_API_BASE_INFO
class LLMFrontendNode(FrontendNode):
@ -15,6 +18,13 @@ class LLMFrontendNode(FrontendNode):
if "key" not in field.name.lower() and "token" not in field.name.lower():
field.password = False
if field.name == "openai_api_base":
field.info = OPENAI_API_BASE_INFO
def add_extra_base_classes(self) -> None:
if "BaseLLM" not in self.base_classes:
self.base_classes.append("BaseLLM")
@staticmethod
def format_azure_field(field: TemplateField):
if field.name == "model_name":
@ -31,6 +41,13 @@ class LLMFrontendNode(FrontendNode):
field.show = True
field.advanced = not field.required
@staticmethod
def format_ctransformers_field(field: TemplateField):
if field.name == "config":
field.show = True
field.advanced = True
field.value = json.dumps(CTRANSFORMERS_DEFAULT_CONFIG, indent=2)
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
display_names_dict = {
@ -38,6 +55,7 @@ class LLMFrontendNode(FrontendNode):
}
FrontendNode.format_field(field, name)
LLMFrontendNode.format_openai_field(field)
LLMFrontendNode.format_ctransformers_field(field)
if name and "azure" in name.lower():
LLMFrontendNode.format_azure_field(field)
if name and "llama" in name.lower():

View file

@ -2,11 +2,19 @@ from typing import Optional
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.base import FrontendNode
from langflow.template.template.base import Template
from langchain.memory.chat_message_histories.postgres import DEFAULT_CONNECTION_STRING
class MemoryFrontendNode(FrontendNode):
#! Needs testing
def add_extra_fields(self) -> None:
# chat history should have another way to add common field?
# prevent adding incorect field in ChatMessageHistory
base_message_classes = ["BaseEntityStore", "BaseChatMessageHistory"]
if any(base_class in self.base_classes for base_class in base_message_classes):
return
# add return_messages field
self.template.add_field(
TemplateField(
@ -64,3 +72,50 @@ class MemoryFrontendNode(FrontendNode):
field.value = ""
if field.name == "memory_key":
field.value = "chat_history"
if field.name == "chat_memory":
field.show = True
field.advanced = False
field.required = False
if field.name == "url":
field.show = True
if field.name == "entity_store":
field.show = True
if name == "SQLiteEntityStore":
field.show = True
class PostgresChatMessageHistoryFrontendNode(MemoryFrontendNode):
name: str = "PostgresChatMessageHistory"
template: Template = Template(
type_name="PostgresChatMessageHistory",
fields=[
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
name="session_id",
),
TemplateField(
field_type="str",
required=True,
show=True,
name="connection_string",
value=DEFAULT_CONNECTION_STRING,
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
value="message_store",
name="table_name",
),
],
)
description: str = "Memory store with Postgres"
base_classes: list[str] = ["PostgresChatMessageHistory", "BaseChatMessageHistory"]

View file

@ -0,0 +1,15 @@
from typing import Optional
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.base import FrontendNode
class RetrieverFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
FrontendNode.format_field(field, name)
# Define common field attributes
field.show = True
if field.name == "parser_key":
field.display_name = "Parser Key"
field.password = False

View file

@ -1,5 +1,6 @@
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.base import FrontendNode
from langchain.text_splitter import Language
class TextSplittersFrontendNode(FrontendNode):
@ -17,6 +18,24 @@ class TextSplittersFrontendNode(FrontendNode):
name = "separator"
elif self.template.type_name == "RecursiveCharacterTextSplitter":
name = "separators"
# Add a field for type of separator
# which will have Text or any value from the
# Language enum
options = [x.value for x in Language] + ["Text"]
options.sort()
self.template.add_field(
TemplateField(
field_type="str",
required=True,
show=True,
name="separator_type",
advanced=False,
is_list=True,
options=options,
value="Text",
display_name="Separator Type",
)
)
self.template.add_field(
TemplateField(
field_type="str",

View file

@ -200,7 +200,7 @@ class VectorStoreFrontendNode(FrontendNode):
self.template.add_field(field)
def add_extra_base_classes(self) -> None:
self.base_classes.append("BaseRetriever")
self.base_classes.extend(("BaseRetriever", "VectorStoreRetriever"))
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:

View file

@ -165,6 +165,7 @@ def build_template_from_method(
"required": param.default == param.empty,
}
for name, param in params.items()
if name not in ["self", "kwargs", "args"]
},
}
@ -233,6 +234,9 @@ def format_dict(d, name: Optional[str] = None):
_type = value["type"]
if not isinstance(_type, str):
_type = _type.__name__
# Remove 'Optional' wrapper
if "Optional" in _type:
_type = _type.replace("Optional[", "")[:-1]

View file

@ -37,6 +37,7 @@
"base64-js": "^1.5.1",
"class-variance-authority": "^0.6.0",
"clsx": "^1.2.1",
"dompurify": "^3.0.3",
"esbuild": "^0.17.18",
"lodash": "^4.17.21",
"lucide-react": "^0.233.0",
@ -1229,15 +1230,15 @@
"integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ=="
},
"node_modules/@mui/system": {
"version": "5.13.5",
"resolved": "https://registry.npmjs.org/@mui/system/-/system-5.13.5.tgz",
"integrity": "sha512-n0gzUxoZ2ZHZgnExkh2Htvo9uW2oakofgPRQrDoa/GQOWyRD0NH9MDszBwOb6AAoXZb+OV5TE7I4LeZ/dzgHYA==",
"version": "5.13.6",
"resolved": "https://registry.npmjs.org/@mui/system/-/system-5.13.6.tgz",
"integrity": "sha512-G3Xr28uLqU3DyF6r2LQkHGw/ku4P0AHzlKVe7FGXOPl7X1u+hoe2xxj8Vdiq/69II/mh9OP21i38yBWgWb7WgQ==",
"dependencies": {
"@babel/runtime": "^7.21.0",
"@babel/runtime": "^7.22.5",
"@mui/private-theming": "^5.13.1",
"@mui/styled-engine": "^5.13.2",
"@mui/types": "^7.2.4",
"@mui/utils": "^5.13.1",
"@mui/utils": "^5.13.6",
"clsx": "^1.2.1",
"csstype": "^3.1.2",
"prop-types": "^15.8.1"
@ -1297,11 +1298,11 @@
}
},
"node_modules/@mui/utils": {
"version": "5.13.1",
"resolved": "https://registry.npmjs.org/@mui/utils/-/utils-5.13.1.tgz",
"integrity": "sha512-6lXdWwmlUbEU2jUI8blw38Kt+3ly7xkmV9ljzY4Q20WhsJMWiNry9CX8M+TaP/HbtuyR8XKsdMgQW7h7MM3n3A==",
"version": "5.13.6",
"resolved": "https://registry.npmjs.org/@mui/utils/-/utils-5.13.6.tgz",
"integrity": "sha512-ggNlxl5NPSbp+kNcQLmSig6WVB0Id+4gOxhx644987v4fsji+CSXc+MFYLocFB/x4oHtzCUlSzbVHlJfP/fXoQ==",
"dependencies": {
"@babel/runtime": "^7.21.0",
"@babel/runtime": "^7.22.5",
"@types/prop-types": "^15.7.5",
"@types/react-is": "^18.2.0",
"prop-types": "^15.8.1",
@ -3838,9 +3839,9 @@
}
},
"node_modules/aria-query": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.2.1.tgz",
"integrity": "sha512-7uFg4b+lETFgdaJyETnILsXgnnzVnkHcgRbwbPwevm5x/LmUlt3MjczMRe1zg824iBgXZNRPTBftNYyRSKLp2g==",
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz",
"integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==",
"dev": true,
"dependencies": {
"dequal": "^2.0.3"
@ -5138,10 +5139,15 @@
"node": ">=12"
}
},
"node_modules/dompurify": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.3.tgz",
"integrity": "sha512-axQ9zieHLnAnHh0sfAamKYiqXMJAVwu+LM/alQ7WDagoWessyWvMSFyW65CqF3owufNu8HBcE4cM2Vflu7YWcQ=="
},
"node_modules/electron-to-chromium": {
"version": "1.4.438",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.438.tgz",
"integrity": "sha512-x94U0FhphEsHsOloCvlsujHCvoir0ZQ73ZAs/QN4PLx98uNvyEU79F75rq1db75Bx/atvuh7KPeuxelh+xfYJw=="
"version": "1.4.440",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.440.tgz",
"integrity": "sha512-r6dCgNpRhPwiWlxbHzZQ/d9swfPaEJGi8ekqRBwQYaR3WmA5VkqQfBWSDDjuJU1ntO+W9tHx8OHV/96Q8e0dVw=="
},
"node_modules/emoji-regex": {
"version": "8.0.0",
@ -6874,9 +6880,9 @@
}
},
"node_modules/katex": {
"version": "0.16.7",
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.7.tgz",
"integrity": "sha512-Xk9C6oGKRwJTfqfIbtr0Kes9OSv6IFsuhFGc7tW4urlpMJtuh+7YhzU6YEG9n8gmWKcMAFzkp7nr+r69kV0zrA==",
"version": "0.16.8",
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.8.tgz",
"integrity": "sha512-ftuDnJbcbOckGY11OO+zg3OofESlbR5DRl2cmN8HeWeeFIV7wTXvAOx8kEjZjobhA+9wh2fbKeO6cdcA9Mnovg==",
"funding": [
"https://opencollective.com/katex",
"https://github.com/sponsors/katex"

View file

@ -32,6 +32,7 @@
"base64-js": "^1.5.1",
"class-variance-authority": "^0.6.0",
"clsx": "^1.2.1",
"dompurify": "^3.0.3",
"esbuild": "^0.17.18",
"lodash": "^4.17.21",
"lucide-react": "^0.233.0",

View file

@ -25,6 +25,7 @@ import { nodeColors } from "../../../../utils";
import ShadTooltip from "../../../../components/ShadTooltipComponent";
import { PopUpContext } from "../../../../contexts/popUpContext";
import ToggleShadComponent from "../../../../components/toggleShadComponent";
import { Info } from "lucide-react";
export default function ParameterComponent({
left,
@ -36,9 +37,11 @@ export default function ParameterComponent({
type,
name = "",
required = false,
info = "",
}: ParameterComponentType) {
const ref = useRef(null);
const refHtml = useRef(null);
const infoHtml = useRef(null);
const updateNodeInternals = useUpdateNodeInternals();
const [position, setPosition] = useState(0);
const { closePopUp } = useContext(PopUpContext);
@ -79,6 +82,18 @@ export default function ParameterComponent({
});
};
useEffect(() => {
infoHtml.current = (
<div className="h-full w-full break-words">
{info.split("\n").map((line, i) => (
<p key={i} className="block">
{line}
</p>
))}
</div>
);
}, [info]);
useEffect(() => {
const groupedObj = groupByFamily(myData, tooltipTitle);
@ -126,9 +141,22 @@ export default function ParameterComponent({
className="w-full flex flex-wrap justify-between items-center bg-muted dark:bg-gray-800 dark:text-white mt-1 px-5 py-2"
>
<>
<div className={"text-sm truncate w-full " + (left ? "" : "text-end")}>
<div
className={
"text-sm truncate w-full" +
(left ? "" : " text-end") +
(info !== "" ? " flex items-center" : "")
}
>
{title}
<span className="text-red-600">{required ? " *" : ""}</span>
<div className="">
{info !== "" && (
<ShadTooltip content={infoHtml.current}>
<Info className="ml-2 relative bottom-0.5 w-3 h-3" />
</ShadTooltip>
)}
</div>
</div>
{left &&
(type === "str" ||

View file

@ -6,16 +6,7 @@ import {
} from "../../utils";
import ParameterComponent from "./components/parameterComponent";
import { typesContext } from "../../contexts/typesContext";
import {
useContext,
useState,
useEffect,
useRef,
ForwardRefExoticComponent,
ComponentType,
SVGProps,
ReactNode,
} from "react";
import { useContext, useState, useEffect, useRef } from "react";
import { NodeDataType } from "../../types/flow";
import { alertContext } from "../../contexts/alertContext";
import { PopUpContext } from "../../contexts/popUpContext";
@ -23,10 +14,8 @@ import NodeModal from "../../modals/NodeModal";
import Tooltip from "../../components/TooltipComponent";
import { NodeToolbar } from "reactflow";
import NodeToolbarComponent from "../../pages/FlowPage/components/nodeToolbarComponent";
import ShadTooltip from "../../components/ShadTooltipComponent";
import { useSSE } from "../../contexts/SSEContext";
import { ReactElement } from "react-markdown/lib/react-markdown";
export default function GenericNode({
data,
@ -46,6 +35,7 @@ export default function GenericNode({
const [validationStatus, setValidationStatus] = useState(null);
// State for outline color
const { sseData, isBuilding } = useSSE();
const refHtml = useRef(null);
// useEffect(() => {
// if (reactFlowInstance) {
@ -103,11 +93,8 @@ export default function GenericNode({
color: nodeColors[types[data.type]] ?? nodeColors.unknown,
}}
/>
<div className="ml-2 truncate">
<ShadTooltip
delayDuration={1500}
content={data.node.display_name}
>
<div className="ml-2 truncate flex">
<ShadTooltip content={data.node.display_name}>
<div className="ml-2 truncate text-gray-800">
{data.node.display_name}
</div>
@ -214,6 +201,7 @@ export default function GenericNode({
? toTitleCase(data.node.template[t].name)
: toTitleCase(t)
}
info={data.node.template[t].info}
name={t}
tooltipTitle={data.node.template[t].type}
required={data.node.template[t].required}

View file

@ -1,3 +1,4 @@
import { ShadTooltipProps } from "../../types/components";
import {
Tooltip,
TooltipContent,
@ -5,18 +6,19 @@ import {
TooltipTrigger,
} from "../ui/tooltip";
const ShadTooltip = (props) => {
const ShadTooltip = ({
delayDuration = 500,
side,
content,
children,
}: ShadTooltipProps) => {
return (
<TooltipProvider>
<Tooltip delayDuration={props.delayDuration}>
<TooltipTrigger asChild>{props.children}</TooltipTrigger>
<Tooltip delayDuration={delayDuration}>
<TooltipTrigger asChild>{children}</TooltipTrigger>
<TooltipContent
side={props.side}
avoidCollisions={false}
sticky="always"
>
{props.content}
<TooltipContent side={side} avoidCollisions={false} sticky="always">
{content}
</TooltipContent>
</Tooltip>
</TooltipProvider>

View file

@ -2,9 +2,9 @@ import { useState } from "react";
import { ChatMessageType } from "../../../types/chat";
import { nodeColors } from "../../../utils";
import Convert from "ansi-to-html";
const convert = new Convert({ newline: true });
import { MessageCircle } from "lucide-react";
import DOMPurify from "dompurify";
const convert = new Convert({ newline: true });
export default function ChatMessage({ chat }: { chat: ChatMessageType }) {
const [hidden, setHidden] = useState(true);
return (
@ -23,13 +23,14 @@ export default function ChatMessage({ chat }: { chat: ChatMessageType }) {
<MessageCircle className="w-5 h-5 animate-bounce" />
</div>
)}
{chat.thought && chat.thought !== "" && !hidden && (
<div
onClick={() => setHidden((prev) => !prev)}
style={{ backgroundColor: nodeColors["thought"] }}
className=" text-start inline-block w-full pb-3 pt-3 px-5 cursor-pointer"
className="text-start inline-block w-full pb-3 pt-3 px-5 cursor-pointer"
dangerouslySetInnerHTML={{
__html: convert.toHtml(chat.thought),
__html: DOMPurify.sanitize(convert.toHtml(chat.thought)),
}}
></div>
)}

View file

@ -1,9 +1,11 @@
import { Listbox, Transition } from "@headlessui/react";
import { Fragment, useEffect, useState } from "react";
import { Fragment, useContext, useEffect, useState } from "react";
import { DropDownComponentType } from "../../types/components";
import { classNames } from "../../utils";
import { INPUT_STYLE } from "../../constants";
import { ChevronsUpDown, Check } from "lucide-react";
import { PopUpContext } from "../../contexts/popUpContext";
import { TabsContext } from "../../contexts/tabsContext";
export default function Dropdown({
value,
@ -13,12 +15,15 @@ export default function Dropdown({
numberOfOptions = 0,
apiModal = false
}: DropDownComponentType) {
const { closePopUp } = useContext(PopUpContext);
let [internalValue, setInternalValue] = useState(
value === "" || !value ? "Choose an option" : value
);
useEffect(() => {
setInternalValue(value === "" || !value ? "Choose an option" : value);
}, [value]);
}, [closePopUp]);
return (
<>

View file

@ -2,6 +2,7 @@ import { useContext, useEffect, useState } from "react";
import { FloatComponentType } from "../../types/components";
import { TabsContext } from "../../contexts/tabsContext";
import { INPUT_STYLE } from "../../constants";
import { PopUpContext } from "../../contexts/popUpContext";
export default function FloatComponent({
value,
@ -12,6 +13,7 @@ export default function FloatComponent({
}: FloatComponentType) {
const [myValue, setMyValue] = useState(value ?? "");
const { setDisableCopyPaste } = useContext(TabsContext);
const { closePopUp } = useContext(PopUpContext);
const step = 0.1;
const min = 0;
@ -26,7 +28,7 @@ export default function FloatComponent({
useEffect(() => {
setMyValue(value);
}, [value]);
}, [closePopUp]);
return (
<div

View file

@ -5,6 +5,7 @@ import { TabsContext } from "../../contexts/tabsContext";
import _ from "lodash";
import { INPUT_STYLE } from "../../constants";
import { X, Plus } from "lucide-react";
import { PopUpContext } from "../../contexts/popUpContext";
export default function InputListComponent({
value,
@ -14,12 +15,19 @@ export default function InputListComponent({
onAddInput
}: InputListComponentType) {
const [inputList, setInputList] = useState(value ?? [""]);
const { closePopUp } = useContext(PopUpContext);
useEffect(() => {
if (disabled) {
setInputList([""]);
onChange([""]);
}
}, [disabled, onChange]);
useEffect(() => {
setInputList(value);
}, [closePopUp]);
return (
<div
className={
@ -45,9 +53,9 @@ export default function InputListComponent({
setInputList((old) => {
let newInputList = _.cloneDeep(old);
newInputList[idx] = e.target.value;
onChange(newInputList);
return newInputList;
});
onChange(inputList);
}}
/>
{idx === inputList.length - 1 ? (

View file

@ -3,6 +3,7 @@ import { FloatComponentType } from "../../types/components";
import { TabsContext } from "../../contexts/tabsContext";
import { classNames } from "../../utils";
import { INPUT_STYLE } from "../../constants";
import { PopUpContext } from "../../contexts/popUpContext";
export default function IntComponent({
value,
@ -14,6 +15,7 @@ export default function IntComponent({
const [myValue, setMyValue] = useState(value ?? "");
const { setDisableCopyPaste } = useContext(TabsContext);
const min = 0;
const { closePopUp } = useContext(PopUpContext);
useEffect(() => {
if (disabled) {
@ -24,7 +26,7 @@ export default function IntComponent({
useEffect(() => {
setMyValue(value);
}, [value]);
}, [closePopUp]);
return (
<div

View file

@ -195,39 +195,49 @@ export function TabsProvider({ children }: { children: ReactNode }) {
}
function processFlowEdges(flow) {
if(!flow.data || !flow.data.edges) return;
if (!flow.data || !flow.data.edges) return;
flow.data.edges.forEach((edge) => {
edge.className = "";
edge.style = { stroke: "#555555" };
});
}
function updateDisplay_name(node:NodeType,template:APIClassType) {
node.data.node.display_name = template["display_name"]?template["display_name"]:node.data.type;
function updateDisplay_name(node: NodeType, template: APIClassType) {
node.data.node.display_name = template["display_name"] || node.data.type;
}
function updateNodeDocumentation(node: NodeType, template: APIClassType) {
node.data.node.documentation = template["documentation"];
}
function processFlowNodes(flow) {
if(!flow.data || !flow.data.nodes) return;
flow.data.nodes.forEach((node:NodeType) => {
if (!flow.data || !flow.data.nodes) return;
flow.data.nodes.forEach((node: NodeType) => {
const template = templates[node.data.type];
if (!template) {
setErrorData({ title: `Unknown node type: ${node.data.type}` });
return;
}
if (Object.keys(template["template"]).length > 0) {
updateDisplay_name(node,template);
updateDisplay_name(node, template);
updateNodeBaseClasses(node, template);
updateNodeEdges(flow, node, template);
updateNodeDescription(node, template);
updateNodeTemplate(node, template);
updateNodeDocumentation(node, template);
}
});
}
function updateNodeBaseClasses(node:NodeType,template:APIClassType) {
function updateNodeBaseClasses(node: NodeType, template: APIClassType) {
node.data.node.base_classes = template["base_classes"];
}
function updateNodeEdges(flow:FlowType, node:NodeType,template:APIClassType) {
function updateNodeEdges(
flow: FlowType,
node: NodeType,
template: APIClassType
) {
flow.data.edges.forEach((edge) => {
if (edge.source === node.id) {
edge.sourceHandle = edge.sourceHandle
@ -239,11 +249,11 @@ export function TabsProvider({ children }: { children: ReactNode }) {
});
}
function updateNodeDescription(node:NodeType,template:APIClassType) {
function updateNodeDescription(node: NodeType, template: APIClassType) {
node.data.node.description = template["description"];
}
function updateNodeTemplate(node:NodeType,template:APIClassType) {
function updateNodeTemplate(node: NodeType, template: APIClassType) {
node.data.node.template = updateTemplate(
template["template"] as unknown as APITemplateType,
node.data.node.template as APITemplateType
@ -407,7 +417,7 @@ export function TabsProvider({ children }: { children: ReactNode }) {
y: insidePosition.y + n.position.y - minimumY,
},
data: {
...n.data,
..._.cloneDeep(n.data),
id: newId,
},
};

View file

@ -311,7 +311,7 @@ export async function getBuildStatus(
export async function postBuildInit(
flow: FlowType
): Promise<AxiosResponse<InitTypeAPI>> {
return await axios.post(`/api/v1/build/init`, flow);
return await axios.post(`/api/v1/build/init/${flow.id}`, flow);
}
// fetch(`/upload/${id}`, {

View file

@ -79,7 +79,7 @@ export default function EditNodeModal({ data }: { data: NodeDataType }) {
}
return (
<Dialog open={true} onOpenChange={setModalOpen} >
<Dialog open={true} onOpenChange={setModalOpen}>
<DialogTrigger asChild></DialogTrigger>
<DialogContent className="lg:max-w-[700px] ">
<DialogHeader>

View file

@ -6,8 +6,7 @@ import {
classNames,
limitScrollFieldsModal,
nodeColors,
nodeIcons,
toNormalCase,
nodeIconsLucide,
toTitleCase,
} from "../../utils";
import { typesContext } from "../../contexts/typesContext";
@ -28,7 +27,7 @@ export default function NodeModal({ data }: { data: NodeDataType }) {
}
}
// any to avoid type conflict
const Icon: any = nodeIcons[types[data.type]];
const Icon: any = nodeIconsLucide[types[data.type]];
return (
<Transition.Root show={open} appear={true} as={Fragment}>
<Dialog

View file

@ -11,7 +11,7 @@ import remarkMath from "remark-math";
import { CodeBlock } from "./codeBlock";
import Convert from "ansi-to-html";
import { User2, MessageCircle } from "lucide-react";
import DOMPurify from "dompurify";
export default function ChatMessage({
chat,
lockChat,
@ -78,10 +78,9 @@ export default function ChatMessage({
{chat.thought && chat.thought !== "" && !hidden && (
<div
onClick={() => setHidden((prev) => !prev)}
className=" text-start inline-block rounded-md text-gray-600 dark:text-gray-200 h-full border border-gray-300 dark:border-gray-500
bg-muted dark:bg-gray-800 w-[95%] pb-3 pt-3 px-2 ml-3 cursor-pointer scrollbar-hide overflow-scroll"
className="text-start inline-block rounded-md text-gray-600 dark:text-gray-200 h-full border border-gray-300 dark:border-gray-500 bg-muted dark:bg-gray-800 w-[95%] pb-3 pt-3 px-2 ml-3 cursor-pointer scrollbar-hide overflow-scroll"
dangerouslySetInnerHTML={{
__html: convert.toHtml(chat.thought),
__html: DOMPurify.sanitize(convert.toHtml(chat.thought)),
}}
></div>
)}
@ -152,12 +151,12 @@ export default function ChatMessage({
) : (
<div className="w-full flex items-center">
<div className="text-start inline-block px-3 text-gray-600 dark:text-white">
<span
className="text-gray-600 dark:text-gray-200"
dangerouslySetInnerHTML={{
__html: message.replace(/\n/g, "<br>"),
}}
></span>
{message.split("\n").map((line, index) => (
<span key={index}>
{line}
<br />
</span>
))}
</div>
</div>
)}

View file

@ -62,7 +62,7 @@ export default function ExtraSidebar() {
return (
<div className="w-52 flex flex-col overflow-hidden scrollbar-hide h-full border-r">
<div className="mt-2 mb-2 w-full flex gap-2 justify-between px-2 items-center">
<ShadTooltip delayDuration={1000} content="Import" side="top">
<ShadTooltip content="Import" side="top">
<button
className="hover:dark:hover:bg-[#242f47] text-gray-700 w-full justify-center shadow-sm transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-gray-300 relative inline-flex items-center rounded-md bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-gray-50"
onClick={() => {
@ -74,7 +74,7 @@ export default function ExtraSidebar() {
</button>
</ShadTooltip>
<ShadTooltip delayDuration={1000} content="Export" side="top">
<ShadTooltip content="Export" side="top">
<button
className={classNames(
"hover:dark:hover:bg-[#242f47] text-gray-700 w-full justify-center shadow-sm transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-gray-300 relative inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-gray-50 rounded-md"
@ -86,7 +86,7 @@ export default function ExtraSidebar() {
<FileDown className="w-5 h-5 dark:text-gray-300"></FileDown>
</button>
</ShadTooltip>
<ShadTooltip delayDuration={1000} content="Code" side="top">
<ShadTooltip content="Code" side="top">
<button
className={classNames(
"hover:dark:hover:bg-[#242f47] text-gray-700 w-full justify-center shadow-sm transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-gray-300 relative inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-gray-50 rounded-md"
@ -99,7 +99,7 @@ export default function ExtraSidebar() {
</button>
</ShadTooltip>
<ShadTooltip delayDuration={1000} content="Save" side="top">
<ShadTooltip content="Save" side="top">
<button
className="hover:dark:hover:bg-[#242f47] text-gray-700 w-full justify-center transition-all shadow-sm duration-500 ease-in-out dark:bg-gray-800 dark:text-gray-300 relative inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-gray-50 rounded-md"
onClick={(event) => {
@ -156,7 +156,6 @@ export default function ExtraSidebar() {
.map((t: string, k) => (
<ShadTooltip
content={data[d][t].display_name}
delayDuration={1500}
side="right"
key={data[d][t].display_name}
>

View file

@ -1,5 +1,5 @@
import { useContext, useState } from "react";
import { Settings2, Copy, Trash2 } from "lucide-react";
import { Settings2, Copy, Trash2, FileText } from "lucide-react";
import { classNames } from "../../../../utils";
import { TabsContext } from "../../../../contexts/tabsContext";
import { useReactFlow } from "reactflow";
@ -29,23 +29,21 @@ const NodeToolbarComponent = (props) => {
<>
<div className="h-10 w-26">
<span className="isolate inline-flex rounded-md shadow-sm">
<ShadTooltip delayDuration={1000} content="Delete" side="top">
<ShadTooltip content="Delete" side="top">
<button
className="hover:dark:hover:bg-[#242f47] text-gray-700 transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-gray-300 shadow-md relative inline-flex items-center rounded-l-md bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10"
className="hover:dark:hover:bg-[#242f47] text-foreground transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-muted-foreground shadow-md relative inline-flex items-center rounded-l-md bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10"
onClick={() => {
props.deleteNode(props.data.id);
}}
>
<Trash2 className="w-4 h-4 dark:text-gray-300"></Trash2>
<Trash2 className="w-4 h-4 dark:text-muted-foreground"></Trash2>
</button>
</ShadTooltip>
<ShadTooltip delayDuration={1000} content="Duplicate" side="top">
<ShadTooltip content="Duplicate" side="top">
<button
className={classNames(
nodeLength > 0
? "hover:dark:hover:bg-[#242f47] text-gray-700 transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-gray-300 shadow-md relative -ml-px inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10"
: "hover:dark:hover:bg-[#242f47] text-gray-700 transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-gray-300 shadow-md relative -ml-px inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10 rounded-r-md"
"hover:dark:hover:bg-[#242f47] text-foreground transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-muted-foreground shadow-md relative -ml-px inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10"
)}
onClick={(event) => {
event.preventDefault();
@ -64,31 +62,66 @@ const NodeToolbarComponent = (props) => {
);
}}
>
<Copy className="w-4 h-4 dark:text-gray-300"></Copy>
<Copy className="w-4 h-4 dark:text-muted-foreground"></Copy>
</button>
</ShadTooltip>
{nodeLength > 0 && (
<ShadTooltip delayDuration={1000} content="Edit" side="top">
<button
className="hover:dark:hover:bg-[#242f47] text-gray-700 transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-gray-300 shadow-md relative -ml-px inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10 rounded-r-md"
onClick={(event) => {
<ShadTooltip
content={
props.data.node.documentation === ""
? "Coming Soon"
: "Documentation"
}
side="top"
>
<a
className={classNames(
"hover:dark:hover:bg-[#242f47] transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-muted-foreground shadow-md relative -ml-px inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10" +
(props.data.node.documentation === ""
? " text-muted-foreground"
: " text-foreground")
)}
target="_blank"
rel="noopener noreferrer"
href={props.data.node.documentation}
// deactivate link if no documentation is provided
onClick={(event) => {
if (props.data.node.documentation === "") {
event.preventDefault();
props.openPopUp(<EditNodeModal data={props.data} />);
}}
>
<Settings2 className="w-4 h-4 dark:text-gray-300"></Settings2>
</button>
</ShadTooltip>
)}
}
}}
>
<FileText className="w-4 h-4 dark:text-muted-foreground"></FileText>
</a>
</ShadTooltip>
<ShadTooltip content="Edit" side="top">
<button
className={classNames(
"hover:dark:hover:bg-[#242f47] transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-muted-foreground shadow-md relative -ml-px inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10 rounded-r-md" +
(nodeLength == 0
? " text-muted-foreground"
: " text-foreground")
)}
onClick={(event) => {
if (nodeLength == 0) {
event.preventDefault();
}
event.preventDefault();
props.openPopUp(<EditNodeModal data={props.data} />);
}}
>
<Settings2 className="w-4 h-4 dark:text-muted-foreground"></Settings2>
</button>
</ShadTooltip>
{/*
<Menu as="div" className="relative inline-block text-left z-100">
<button className="hover:dark:hover:bg-[#242f47] text-gray-700 transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-gray-300 shadow-md relative -ml-px inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10 rounded-r-md">
<button className="hover:dark:hover:bg-[#242f47] text-foreground transition-all duration-500 ease-in-out dark:bg-gray-800 dark:text-muted-foreground shadow-md relative -ml-px inline-flex items-center bg-white px-2 py-2 ring-1 ring-inset ring-gray-300 hover:bg-muted focus:z-10 rounded-r-md">
<div>
<Menu.Button className="flex items-center">
<EllipsisVerticalIcon
className="w-5 h-5 dark:text-gray-300"
className="w-5 h-5 dark:text-muted-foreground"
aria-hidden="true"
/>
</Menu.Button>
@ -117,7 +150,7 @@ const NodeToolbarComponent = (props) => {
className={classNames(
active
? "bg-muted text-gray-900"
: "text-gray-700",
: "text-foreground",
"w-full group flex items-center px-4 py-2 text-sm"
)}
>
@ -157,7 +190,7 @@ const NodeToolbarComponent = (props) => {
className={classNames(
active
? "bg-muted text-gray-900"
: "text-gray-700",
: "text-foreground",
"w-full group flex items-center px-4 py-2 text-sm"
)}
>

View file

@ -12,6 +12,7 @@ export type APIClassType = {
description: string;
template: APITemplateType;
display_name: string;
documentation: string;
[key: string]: Array<string> | string | APITemplateType;
};
export type TemplateVariableType = {

View file

@ -1,10 +1,4 @@
import {
ComponentType,
ForwardRefExoticComponent,
ReactElement,
ReactNode,
SVGProps,
} from "react";
import { ReactElement, ReactNode } from "react";
import { NodeDataType } from "../flow/index";
import { typesContextType } from "../typesContext";
export type InputComponentType = {
@ -42,6 +36,7 @@ export type ParameterComponentType = {
name?: string;
tooltipTitle: string;
dataContext?: typesContextType;
info?: string;
};
export type InputListComponentType = {
value: string[];
@ -123,4 +118,12 @@ export type AccordionComponentType = {
children?: ReactElement;
open?: string[];
trigger?: string;
};
};
export type Side = "top" | "right" | "bottom" | "left";
export type ShadTooltipProps = {
delayDuration?: number;
side?: Side;
content: ReactNode;
children: ReactNode;
};

View file

@ -1,21 +1,3 @@
import {
RocketLaunchIcon,
LinkIcon,
CpuChipIcon,
LightBulbIcon,
CommandLineIcon,
WrenchScrewdriverIcon,
WrenchIcon,
ComputerDesktopIcon,
GiftIcon,
PaperClipIcon,
QuestionMarkCircleIcon,
FingerPrintIcon,
ScissorsIcon,
CircleStackIcon,
Squares2X2Icon,
Bars3CenterLeftIcon,
} from "@heroicons/react/24/outline";
import { Connection, Edge, Node, ReactFlowInstance } from "reactflow";
import { FlowType, NodeType } from "./types/flow";
import { APITemplateType } from "./types/api";
@ -58,6 +40,7 @@ import {
Paperclip,
Rocket,
Scissors,
FileSearch,
TerminalSquare,
Wand2,
Wrench,
@ -139,6 +122,7 @@ export const nodeColors: { [char: string]: string } = {
toolkits: "#DB2C2C",
wrappers: "#E6277A",
utilities: "#31A3CC",
retrievers: "#e6b25a",
unknown: "#9CA3AF",
};
@ -157,72 +141,11 @@ export const nodeNames: { [char: string]: string } = {
toolkits: "Toolkits",
wrappers: "Wrappers",
textsplitters: "Text Splitters",
retrievers: "Retrievers",
utilities: "Utilities",
unknown: "Unknown",
};
export const nodeIcons: {
[char: string]: React.ForwardRefExoticComponent<
React.SVGProps<SVGSVGElement>
>;
} = {
Chroma: ChromaIcon,
AirbyteJSONLoader: AirbyteIcon,
// SerpAPIWrapper: SerperIcon,
// AZLyricsLoader: AzIcon,
Anthropic: AnthropicIcon,
ChatAnthropic: AnthropicIcon,
BingSearchAPIWrapper: BingIcon,
BingSearchRun: BingIcon,
Cohere: CohereIcon,
CohereEmbeddings: CohereIcon,
EverNoteLoader: EvernoteIcon,
FacebookChatLoader: FBIcon,
GitbookLoader: GitBookIcon,
GoogleSearchAPIWrapper: GoogleIcon,
GoogleSearchResults: GoogleIcon,
GoogleSearchRun: GoogleIcon,
HNLoader: HackerNewsIcon,
HuggingFaceHub: HugginFaceIcon,
HuggingFaceEmbeddings: HugginFaceIcon,
IFixitLoader: IFixIcon,
Meta: MetaIcon,
Midjourney: MidjourneyIcon,
NotionDirectoryLoader: NotionIcon,
ChatOpenAI: OpenAiIcon,
OpenAI: OpenAiIcon,
OpenAIEmbeddings: OpenAiIcon,
Pinecone: PineconeIcon,
SupabaseVectorStore: SupabaseIcon,
MongoDBAtlasVectorSearch: MongoDBIcon,
// UnstructuredPowerPointLoader: PowerPointIcon, // word and powerpoint have differente styles
Qdrant: QDrantIcon,
// ReadTheDocsLoader: ReadTheDocsIcon, // does not work
Searx: SearxIcon,
SlackDirectoryLoader: SlackIcon,
// Weaviate: WeaviateIcon, // does not work
// WikipediaAPIWrapper: WikipediaIcon,
// WolframAlphaQueryRun: WolframIcon,
// WolframAlphaAPIWrapper: WolframIcon,
// UnstructuredWordDocumentLoader: WordIcon, // word and powerpoint have differente styles
agents: RocketLaunchIcon,
chains: LinkIcon,
memories: CpuChipIcon,
llms: LightBulbIcon,
prompts: CommandLineIcon,
tools: WrenchIcon,
advanced: ComputerDesktopIcon,
chat: Bars3CenterLeftIcon,
embeddings: FingerPrintIcon,
documentloaders: PaperClipIcon,
vectorstores: CircleStackIcon,
toolkits: WrenchScrewdriverIcon,
textsplitters: ScissorsIcon,
wrappers: GiftIcon,
utilities: Squares2X2Icon,
unknown: QuestionMarkCircleIcon,
};
export const nodeIconsLucide: {
[char: string]: React.ForwardRefExoticComponent<
ComponentType<SVGProps<SVGSVGElement>>
@ -363,6 +286,9 @@ export const nodeIconsLucide: {
utilities: Wand2 as React.ForwardRefExoticComponent<
ComponentType<SVGProps<SVGSVGElement>>
>,
retrievers: FileSearch as React.ForwardRefExoticComponent<
ComponentType<SVGProps<SVGSVGElement>>
>,
unknown: HelpCircle as React.ForwardRefExoticComponent<
ComponentType<SVGProps<SVGSVGElement>>
>,
@ -978,5 +904,5 @@ export function getRandomKeyByssmm(): string {
const now = new Date();
const seconds = String(now.getSeconds()).padStart(2, "0");
const milliseconds = String(now.getMilliseconds()).padStart(3, "0");
return seconds + milliseconds;
return seconds + milliseconds + Math.abs(Math.floor(Math.random() * 10001));
}

View file

@ -26,6 +26,7 @@ def test_zero_shot_agent(client: TestClient):
"type": "LLMChain",
"list": False,
"advanced": False,
"info": "",
}
assert template["allowed_tools"] == {
"required": False,
@ -37,6 +38,7 @@ def test_zero_shot_agent(client: TestClient):
"type": "Tool",
"list": True,
"advanced": False,
"info": "",
}
@ -60,6 +62,7 @@ def test_json_agent(client: TestClient):
"type": "BaseToolkit",
"list": False,
"advanced": False,
"info": "",
}
assert template["llm"] == {
"required": True,
@ -72,6 +75,7 @@ def test_json_agent(client: TestClient):
"list": False,
"advanced": False,
"display_name": "LLM",
"info": "",
}
@ -99,6 +103,7 @@ def test_csv_agent(client: TestClient):
"list": False,
"file_path": None,
"advanced": False,
"info": "",
}
assert template["llm"] == {
"required": True,
@ -111,6 +116,7 @@ def test_csv_agent(client: TestClient):
"list": False,
"advanced": False,
"display_name": "LLM",
"info": "",
}
@ -143,6 +149,7 @@ def test_initialize_agent(client: TestClient):
"type": "str",
"list": True,
"advanced": False,
"info": "",
}
assert template["memory"] == {
"required": False,
@ -154,6 +161,7 @@ def test_initialize_agent(client: TestClient):
"type": "BaseChatMemory",
"list": False,
"advanced": False,
"info": "",
}
assert template["tools"] == {
"required": False,
@ -165,6 +173,7 @@ def test_initialize_agent(client: TestClient):
"type": "Tool",
"list": True,
"advanced": False,
"info": "",
}
assert template["llm"] == {
"required": True,
@ -177,4 +186,5 @@ def test_initialize_agent(client: TestClient):
"list": False,
"advanced": False,
"display_name": "LLM",
"info": "",
}

View file

@ -38,6 +38,7 @@ def test_conversation_chain(client: TestClient):
"type": "BaseMemory",
"list": False,
"advanced": False,
"info": "",
}
assert template["verbose"] == {
"required": False,
@ -49,6 +50,7 @@ def test_conversation_chain(client: TestClient):
"type": "bool",
"list": False,
"advanced": True,
"info": "",
}
assert template["llm"] == {
"required": True,
@ -60,6 +62,7 @@ def test_conversation_chain(client: TestClient):
"type": "BaseLanguageModel",
"list": False,
"advanced": False,
"info": "",
}
assert template["input_key"] == {
"required": True,
@ -72,6 +75,7 @@ def test_conversation_chain(client: TestClient):
"type": "str",
"list": False,
"advanced": True,
"info": "",
}
assert template["output_key"] == {
"required": True,
@ -84,6 +88,7 @@ def test_conversation_chain(client: TestClient):
"type": "str",
"list": False,
"advanced": True,
"info": "",
}
assert template["_type"] == "ConversationChain"
@ -120,6 +125,7 @@ def test_llm_chain(client: TestClient):
"type": "BaseMemory",
"list": False,
"advanced": False,
"info": "",
}
assert template["verbose"] == {
"required": False,
@ -132,6 +138,7 @@ def test_llm_chain(client: TestClient):
"type": "bool",
"list": False,
"advanced": True,
"info": "",
}
assert template["llm"] == {
"required": True,
@ -143,6 +150,7 @@ def test_llm_chain(client: TestClient):
"type": "BaseLanguageModel",
"list": False,
"advanced": False,
"info": "",
}
assert template["output_key"] == {
"required": True,
@ -155,6 +163,7 @@ def test_llm_chain(client: TestClient):
"type": "str",
"list": False,
"advanced": True,
"info": "",
}
@ -184,6 +193,7 @@ def test_llm_checker_chain(client: TestClient):
"type": "BaseLanguageModel",
"list": False,
"advanced": False,
"info": "",
}
assert template["_type"] == "LLMCheckerChain"
@ -217,6 +227,7 @@ def test_llm_math_chain(client: TestClient):
"type": "BaseMemory",
"list": False,
"advanced": False,
"info": "",
}
assert template["verbose"] == {
"required": False,
@ -229,6 +240,7 @@ def test_llm_math_chain(client: TestClient):
"type": "bool",
"list": False,
"advanced": True,
"info": "",
}
assert template["llm"] == {
"required": True,
@ -240,6 +252,7 @@ def test_llm_math_chain(client: TestClient):
"type": "BaseLanguageModel",
"list": False,
"advanced": False,
"info": "",
}
assert template["input_key"] == {
"required": True,
@ -252,6 +265,7 @@ def test_llm_math_chain(client: TestClient):
"type": "str",
"list": False,
"advanced": True,
"info": "",
}
assert template["output_key"] == {
"required": True,
@ -264,6 +278,7 @@ def test_llm_math_chain(client: TestClient):
"type": "str",
"list": False,
"advanced": True,
"info": "",
}
assert template["_type"] == "LLMMathChain"
@ -304,6 +319,7 @@ def test_series_character_chain(client: TestClient):
"type": "BaseLanguageModel",
"list": False,
"advanced": False,
"info": "",
}
assert template["character"] == {
"required": True,
@ -315,6 +331,7 @@ def test_series_character_chain(client: TestClient):
"type": "str",
"list": False,
"advanced": False,
"info": "",
}
assert template["series"] == {
"required": True,
@ -326,6 +343,7 @@ def test_series_character_chain(client: TestClient):
"type": "str",
"list": False,
"advanced": False,
"info": "",
}
assert template["_type"] == "SeriesCharacterChain"
@ -367,6 +385,7 @@ def test_mid_journey_prompt_chain(client: TestClient):
"type": "BaseLanguageModel",
"list": False,
"advanced": False,
"info": "",
}
# Test the description object
assert (
@ -406,6 +425,7 @@ def test_time_travel_guide_chain(client: TestClient):
"type": "BaseLanguageModel",
"list": False,
"advanced": False,
"info": "",
}
assert template["memory"] == {
"required": False,
@ -417,6 +437,7 @@ def test_time_travel_guide_chain(client: TestClient):
"type": "BaseChatMemory",
"list": False,
"advanced": False,
"info": "",
}
assert chain["description"] == "Time travel guide chain."

View file

@ -121,6 +121,7 @@ def test_openai(client: TestClient):
"type": "bool",
"list": False,
"advanced": False,
"info": "",
}
assert template["verbose"] == {
"required": False,
@ -132,6 +133,7 @@ def test_openai(client: TestClient):
"type": "bool",
"list": False,
"advanced": False,
"info": "",
}
assert template["client"] == {
"required": False,
@ -143,6 +145,7 @@ def test_openai(client: TestClient):
"type": "Any",
"list": False,
"advanced": False,
"info": "",
}
assert template["model_name"] == {
"required": False,
@ -162,6 +165,7 @@ def test_openai(client: TestClient):
"type": "str",
"list": True,
"advanced": False,
"info": "",
}
# Add more assertions for other properties here
assert template["temperature"] == {
@ -175,6 +179,7 @@ def test_openai(client: TestClient):
"type": "float",
"list": False,
"advanced": False,
"info": "",
}
assert template["max_tokens"] == {
"required": False,
@ -187,6 +192,7 @@ def test_openai(client: TestClient):
"type": "int",
"list": False,
"advanced": False,
"info": "",
}
assert template["top_p"] == {
"required": False,
@ -199,6 +205,7 @@ def test_openai(client: TestClient):
"type": "float",
"list": False,
"advanced": False,
"info": "",
}
assert template["frequency_penalty"] == {
"required": False,
@ -211,6 +218,7 @@ def test_openai(client: TestClient):
"type": "float",
"list": False,
"advanced": False,
"info": "",
}
assert template["presence_penalty"] == {
"required": False,
@ -223,6 +231,7 @@ def test_openai(client: TestClient):
"type": "float",
"list": False,
"advanced": False,
"info": "",
}
assert template["n"] == {
"required": False,
@ -235,6 +244,7 @@ def test_openai(client: TestClient):
"type": "int",
"list": False,
"advanced": False,
"info": "",
}
assert template["best_of"] == {
"required": False,
@ -247,6 +257,7 @@ def test_openai(client: TestClient):
"type": "int",
"list": False,
"advanced": False,
"info": "",
}
assert template["model_kwargs"] == {
"required": False,
@ -258,6 +269,7 @@ def test_openai(client: TestClient):
"type": "code",
"list": False,
"advanced": True,
"info": "",
}
assert template["openai_api_key"] == {
"required": False,
@ -271,6 +283,7 @@ def test_openai(client: TestClient):
"type": "str",
"list": False,
"advanced": False,
"info": "",
}
assert template["batch_size"] == {
"required": False,
@ -283,6 +296,7 @@ def test_openai(client: TestClient):
"type": "int",
"list": False,
"advanced": False,
"info": "",
}
assert template["request_timeout"] == {
"required": False,
@ -294,6 +308,7 @@ def test_openai(client: TestClient):
"type": "float",
"list": False,
"advanced": False,
"info": "",
}
assert template["logit_bias"] == {
"required": False,
@ -305,6 +320,7 @@ def test_openai(client: TestClient):
"type": "code",
"list": False,
"advanced": False,
"info": "",
}
assert template["max_retries"] == {
"required": False,
@ -317,6 +333,7 @@ def test_openai(client: TestClient):
"type": "int",
"list": False,
"advanced": False,
"info": "",
}
assert template["streaming"] == {
"required": False,
@ -329,6 +346,7 @@ def test_openai(client: TestClient):
"type": "bool",
"list": False,
"advanced": False,
"info": "",
}
@ -352,6 +370,7 @@ def test_chat_open_ai(client: TestClient):
"type": "bool",
"list": False,
"advanced": False,
"info": "",
}
assert template["client"] == {
"required": False,
@ -363,6 +382,7 @@ def test_chat_open_ai(client: TestClient):
"type": "Any",
"list": False,
"advanced": False,
"info": "",
}
assert template["model_name"] == {
"required": False,
@ -385,6 +405,7 @@ def test_chat_open_ai(client: TestClient):
"type": "str",
"list": True,
"advanced": False,
"info": "",
}
assert template["temperature"] == {
"required": False,
@ -397,6 +418,7 @@ def test_chat_open_ai(client: TestClient):
"type": "float",
"list": False,
"advanced": False,
"info": "",
}
assert template["model_kwargs"] == {
"required": False,
@ -408,6 +430,7 @@ def test_chat_open_ai(client: TestClient):
"type": "code",
"list": False,
"advanced": True,
"info": "",
}
assert template["openai_api_key"] == {
"required": False,
@ -421,6 +444,7 @@ def test_chat_open_ai(client: TestClient):
"type": "str",
"list": False,
"advanced": False,
"info": "",
}
assert template["request_timeout"] == {
"required": False,
@ -432,6 +456,7 @@ def test_chat_open_ai(client: TestClient):
"type": "float",
"list": False,
"advanced": False,
"info": "",
}
assert template["max_retries"] == {
"required": False,
@ -444,6 +469,7 @@ def test_chat_open_ai(client: TestClient):
"type": "int",
"list": False,
"advanced": False,
"info": "",
}
assert template["streaming"] == {
"required": False,
@ -456,6 +482,7 @@ def test_chat_open_ai(client: TestClient):
"type": "bool",
"list": False,
"advanced": False,
"info": "",
}
assert template["n"] == {
"required": False,
@ -468,6 +495,7 @@ def test_chat_open_ai(client: TestClient):
"type": "int",
"list": False,
"advanced": False,
"info": "",
}
assert template["max_tokens"] == {
@ -480,6 +508,7 @@ def test_chat_open_ai(client: TestClient):
"type": "int",
"list": False,
"advanced": False,
"info": "",
}
assert template["_type"] == "ChatOpenAI"
assert (

View file

@ -28,6 +28,7 @@ def test_prompt_template(client: TestClient):
"type": "str",
"list": True,
"advanced": False,
"info": "",
}
assert template["output_parser"] == {
"required": False,
@ -39,6 +40,7 @@ def test_prompt_template(client: TestClient):
"type": "BaseOutputParser",
"list": False,
"advanced": False,
"info": "",
}
assert template["partial_variables"] == {
"required": False,
@ -50,6 +52,7 @@ def test_prompt_template(client: TestClient):
"type": "code",
"list": False,
"advanced": False,
"info": "",
}
assert template["template"] == {
"required": True,
@ -61,6 +64,7 @@ def test_prompt_template(client: TestClient):
"type": "prompt",
"list": False,
"advanced": False,
"info": "",
}
assert template["template_format"] == {
"required": False,
@ -73,6 +77,7 @@ def test_prompt_template(client: TestClient):
"type": "str",
"list": False,
"advanced": False,
"info": "",
}
assert template["validate_template"] == {
"required": False,
@ -85,85 +90,7 @@ def test_prompt_template(client: TestClient):
"type": "bool",
"list": False,
"advanced": False,
}
def test_few_shot_prompt_template(client: TestClient):
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
prompt = prompts["FewShotPromptTemplate"]
template = prompt["template"]
# Test other fields in the template similar to PromptTemplate
assert template["examples"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": True,
"password": False,
"name": "examples",
"type": "prompt",
"list": True,
"advanced": False,
}
assert template["example_selector"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "example_selector",
"type": "BaseExampleSelector",
"list": False,
"advanced": False,
}
assert template["example_prompt"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "example_prompt",
"type": "PromptTemplate",
"list": False,
"advanced": False,
}
assert template["suffix"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": True,
"password": False,
"name": "suffix",
"type": "prompt",
"list": False,
"advanced": False,
}
assert template["example_separator"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "\n\n",
"password": False,
"name": "example_separator",
"type": "str",
"list": False,
"advanced": False,
}
assert template["prefix"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": True,
"value": "",
"password": False,
"name": "prefix",
"type": "prompt",
"list": False,
"advanced": False,
"info": "",
}
@ -185,6 +112,7 @@ def test_zero_shot_prompt(client: TestClient):
"type": "prompt",
"list": False,
"advanced": False,
"info": "",
}
assert template["suffix"] == {
"required": True,
@ -197,6 +125,7 @@ def test_zero_shot_prompt(client: TestClient):
"type": "prompt",
"list": False,
"advanced": False,
"info": "",
}
assert template["format_instructions"] == {
"required": True,
@ -209,4 +138,5 @@ def test_zero_shot_prompt(client: TestClient):
"type": "prompt",
"list": False,
"advanced": False,
"info": "",
}

View file

@ -7,7 +7,7 @@ import pytest
def test_init_build(client):
response = client.post(
"api/v1/build/init", json={"id": "test", "data": {"key": "value"}}
"api/v1/build/init/test", json={"id": "test", "data": {"key": "value"}}
)
assert response.status_code == 201
assert response.json() == {"flowId": "test"}