Update to Pydantic V2 (#1112)

This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-11-10 12:21:10 -03:00 committed by GitHub
commit ee17f9edfb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
75 changed files with 1898 additions and 1199 deletions

View file

@ -15,7 +15,7 @@
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "make install_frontend && make install_backend",
"postCreateCommand": "make setup_devcontainer",
"containerEnv": {
"POETRY_VIRTUALENVS_IN_PROJECT": "true"
@ -31,11 +31,13 @@
"sourcery.sourcery",
"eamodio.gitlens",
"ms-vscode.makefile-tools",
"GitHub.vscode-pull-request-github"
"GitHub.vscode-pull-request-github",
"Codium.codium",
"ms-azuretools.vscode-docker"
]
}
}
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
}
}

View file

@ -15,7 +15,7 @@ on:
- "pyproject.toml"
env:
POETRY_VERSION: "1.4.0"
POETRY_VERSION: "1.7.0"
jobs:
lint:

View file

@ -49,15 +49,16 @@ run_frontend:
cd src/frontend && npm start
run_cli:
poetry run langflow run --path src/frontend/build
poetry run langflow --path src/frontend/build
run_cli_debug:
poetry run langflow run --path src/frontend/build --log-level debug
poetry run langflow --path src/frontend/build --log-level debug
setup_devcontainer:
make init
make build_frontend
poetry run langflow --path src/frontend/build
@echo 'Run Cli'
make run_cli
frontend:
@-make install_frontend || (echo "An error occurred while installing frontend dependencies. Attempting to fix." && make install_frontendc)

View file

@ -23,7 +23,7 @@ ENV PYTHONUNBUFFERED=1 \
\
# poetry
# https://python-poetry.org/docs/configuration/#using-environment-variables
POETRY_VERSION=1.5.1 \
POETRY_VERSION=1.7 \
# make poetry install to this location
POETRY_HOME="/opt/poetry" \
# make poetry create the virtual environment in the project's root

1812
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.5.5"
version = "0.5.6"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [
@ -25,6 +25,8 @@ documentation = "https://docs.langflow.org"
langflow = "langflow.__main__:main"
[tool.poetry.dependencies]
python = ">=3.9,<3.11"
fastapi = "^0.104.0"
uvicorn = "^0.23.0"
@ -33,19 +35,19 @@ google-search-results = "^2.4.1"
google-api-python-client = "^2.79.0"
typer = "^0.9.0"
gunicorn = "^21.2.0"
langchain = "^0.0.312"
openai = "^1.0.1"
langchain = "~0.0.327"
openai = "^0.27.8"
pandas = "2.0.3"
chromadb = "^0.3.21"
chromadb = "^0.4.0"
huggingface-hub = { version = "^0.16.0", extras = ["inference"] }
rich = "^13.5.0"
llama-cpp-python = { version = "~0.1.0", optional = true }
rich = "^13.6.0"
llama-cpp-python = { version = "~0.2.0", optional = true }
networkx = "^3.1"
unstructured = "^0.10.0"
pypdf = "^3.15.0"
pypdf = "^3.17.0"
lxml = "^4.9.2"
pysrt = "^1.1.2"
fake-useragent = "^1.2.1"
fake-useragent = "^1.3.0"
docstring-parser = "^0.15"
psycopg2-binary = "^2.9.6"
pyarrow = "^14.0.0"
@ -57,25 +59,26 @@ weaviate-client = "^3.23.0"
jina = "3.15.2"
sentence-transformers = { version = "^2.2.2", optional = true }
ctransformers = { version = "^0.2.10", optional = true }
cohere = "^4.27.0"
cohere = "^4.32.0"
python-multipart = "^0.0.6"
sqlmodel = "^0.0.8"
# install sqlmodel using https://github.com/honglei/sqlmodel.git
sqlmodel = { git = "https://github.com/AntonDeMeester/sqlmodel.git", branch = "main" }
faiss-cpu = "^1.7.4"
anthropic = "^0.3.0"
anthropic = "^0.5.0"
orjson = "3.9.3"
multiprocess = "^0.70.14"
cachetools = "^5.3.1"
types-cachetools = "^5.3.0.5"
platformdirs = "^3.11.0"
pinecone-client = "^2.2.2"
pymongo = "^4.5.0"
supabase = "^2.0.3"
pymongo = "^4.4.0"
certifi = "^2023.5.7"
google-cloud-aiplatform = "^1.36.0"
psycopg = "^3.1.9"
psycopg-binary = "^3.1.9"
fastavro = "^1.8.0"
langchain-experimental = "^0.0.8"
langchain-experimental = "*"
celery = { extras = ["redis"], version = "^5.3.1", optional = true }
redis = { version = "^4.6.0", optional = true }
flower = { version = "^2.0.0", optional = true }
@ -84,12 +87,17 @@ passlib = "^1.7.4"
bcrypt = "^4.0.1"
python-jose = "^3.3.0"
metaphor-python = "^0.1.11"
pydantic = "^2.4.0"
pydantic-settings = "^2.0.3"
zep-python = { version = "^1.3.0", allow-prereleases = true }
pywin32 = { version = "^306", markers = "sys_platform == 'win32'" }
loguru = "^0.7.1"
langfuse = "^1.0.13"
langfuse = "^1.1.11"
pillow = "^10.0.0"
metal-sdk = "^2.2.0"
metal-sdk = "^2.4.0"
markupsafe = "^2.1.3"
extract-msg = "^0.45.0"
jq = "^1.6.0"
boto3 = "^1.28.63"
numexpr = "^2.8.6"
qianfan = "0.0.5"
@ -97,15 +105,15 @@ pgvector = "^0.2.3"
[tool.poetry.group.dev.dependencies]
types-redis = "^4.6.0.5"
black = "^23.1.0"
black = "^23.10.0"
ipykernel = "^6.21.2"
mypy = "^1.1.1"
ruff = "^0.0.254"
mypy = "^1.6.1"
ruff = "^0.1.5"
httpx = "*"
pytest = "^7.2.2"
types-requests = "^2.28.11"
requests = "^2.28.0"
pytest-cov = "^4.0.0"
pytest = "^7.4.2"
types-requests = "^2.31.0"
requests = "^2.31.0"
pytest-cov = "^4.1.0"
pandas-stubs = "^2.0.0.230412"
types-pillow = "^9.5.0.2"
types-pyyaml = "^6.0.12.8"

View file

@ -1,6 +1,6 @@
from typing import Optional
from langflow.template.frontend_node.base import FrontendNode
from pydantic import BaseModel, validator
from pydantic import field_validator, BaseModel
from langflow.interface.utils import extract_input_variables_from_prompt
from langchain.prompts import PromptTemplate
@ -30,11 +30,13 @@ class CodeValidationResponse(BaseModel):
imports: dict
function: dict
@validator("imports")
@field_validator("imports")
@classmethod
def validate_imports(cls, v):
return v or {"errors": []}
@validator("function")
@field_validator("function")
@classmethod
def validate_function(cls, v):
return v or {"errors": []}

View file

@ -163,6 +163,11 @@ async def stream_build(
number_of_nodes = len(graph.nodes)
update_build_status(cache_service, flow_id, BuildStatus.IN_PROGRESS)
try:
user_id = cache_service[flow_id]["user_id"]
except KeyError:
logger.debug("No user_id found in cache_service")
user_id = None
for i, vertex in enumerate(graph.generator_build(), 1):
try:
log_dict = {
@ -170,9 +175,9 @@ async def stream_build(
}
yield str(StreamData(event="log", data=log_dict))
if vertex.is_task:
vertex = try_running_celery_task(vertex)
vertex = try_running_celery_task(vertex, user_id)
else:
vertex.build()
vertex.build(user_id=user_id)
params = vertex._built_object_repr()
valid = True
logger.debug(f"Building node {str(vertex.vertex_type)}")
@ -236,7 +241,7 @@ async def stream_build(
raise HTTPException(status_code=500, detail=str(exc))
def try_running_celery_task(vertex):
def try_running_celery_task(vertex, user_id):
# Try running the task in celery
# and set the task_id to the local vertex
# if it fails, run the task locally
@ -248,5 +253,5 @@ def try_running_celery_task(vertex):
except Exception as exc:
logger.debug(f"Error running task in celery: {exc}")
vertex.task_id = None
vertex.build()
vertex.build(user_id=user_id)
return vertex

View file

@ -227,6 +227,7 @@ def get_version():
@router.post("/custom_component", status_code=HTTPStatus.OK)
async def custom_component(
raw_code: CustomComponentCode,
user: User = Depends(get_current_active_user),
):
from langflow.interface.types import (
build_langchain_template_custom_component,
@ -235,4 +236,4 @@ async def custom_component(
extractor = CustomComponent(code=raw_code.code)
extractor.is_check_valid()
return build_langchain_template_custom_component(extractor)
return build_langchain_template_custom_component(extractor, user_id=user.id)

View file

@ -7,7 +7,7 @@ from langflow.services.database.models.flow import FlowCreate, FlowRead
from langflow.services.database.models.user import UserRead
from langflow.services.database.models.base import orjson_dumps
from pydantic import BaseModel, Field, validator
from pydantic import BaseModel, Field, field_validator
class BuildStatus(Enum):
@ -91,7 +91,8 @@ class ChatResponse(ChatMessage):
is_bot: bool = True
files: list = []
@validator("type")
@field_validator("type")
@classmethod
def validate_message_type(cls, v):
if v not in ["start", "stream", "end", "error", "info", "file"]:
raise ValueError("type must be start, stream, end, error, info, or file")
@ -109,12 +110,13 @@ class PromptResponse(ChatMessage):
class FileResponse(ChatMessage):
"""File response schema."""
data: Any
data: Any = None
data_type: str
type: str = "file"
is_bot: bool = True
@validator("data_type")
@field_validator("data_type")
@classmethod
def validate_data_type(cls, v):
if v not in ["image", "csv"]:
raise ValueError("data_type must be image or csv")

View file

@ -0,0 +1,29 @@
from langflow import CustomComponent
from langchain.chains import ConversationChain
from typing import Optional, Union, Callable
from langflow.field_typing import BaseLanguageModel, BaseMemory, Chain
class ConversationChainComponent(CustomComponent):
display_name = "ConversationChain"
description = "Chain to have a conversation and load context from memory."
def build_config(self):
return {
"prompt": {"display_name": "Prompt"},
"llm": {"display_name": "LLM"},
"memory": {
"display_name": "Memory",
"info": "Memory to load context from. If none is provided, a ConversationBufferMemory will be used.",
},
"code": {"show": False},
}
def build(
self,
llm: BaseLanguageModel,
memory: Optional[BaseMemory] = None,
) -> Union[Chain, Callable]:
if memory is None:
return ConversationChain(llm=llm)
return ConversationChain(llm=llm, memory=memory)

View file

@ -0,0 +1,30 @@
from langflow import CustomComponent
from langchain.chains import LLMChain
from typing import Optional, Union, Callable
from langflow.field_typing import (
BasePromptTemplate,
BaseLanguageModel,
BaseMemory,
Chain,
)
class LLMChainComponent(CustomComponent):
display_name = "LLMChain"
description = "Chain to run queries against LLMs"
def build_config(self):
return {
"prompt": {"display_name": "Prompt"},
"llm": {"display_name": "LLM"},
"memory": {"display_name": "Memory"},
"code": {"show": False},
}
def build(
self,
prompt: BasePromptTemplate,
llm: BaseLanguageModel,
memory: Optional[BaseMemory] = None,
) -> Union[Chain, Callable]:
return LLMChain(prompt=prompt, llm=llm, memory=memory)

View file

@ -8,7 +8,7 @@ from langchain.schema import Document
class PromptRunner(CustomComponent):
display_name: str = "Prompt Runner"
description: str = "Run a Chain with the given PromptTemplate"
beta = True
beta: bool = True
field_config = {
"llm": {"display_name": "LLM"},
"prompt": {

View file

@ -0,0 +1,232 @@
from langflow import CustomComponent
from langchain.schema import Document
from typing import Any, Dict, List
loaders_info: List[Dict[str, Any]] = [
{
"loader": "AirbyteJSONLoader",
"name": "Airbyte JSON (.jsonl)",
"import": "langchain.document_loaders.AirbyteJSONLoader",
"defaultFor": ["jsonl"],
"allowdTypes": ["jsonl"],
},
{
"loader": "JSONLoader",
"name": "JSON (.json)",
"import": "langchain.document_loaders.JSONLoader",
"defaultFor": ["json"],
"allowdTypes": ["json"],
},
{
"loader": "BSHTMLLoader",
"name": "BeautifulSoup4 HTML (.html, .htm)",
"import": "langchain.document_loaders.BSHTMLLoader",
"allowdTypes": ["html", "htm"],
},
{
"loader": "CSVLoader",
"name": "CSV (.csv)",
"import": "langchain.document_loaders.CSVLoader",
"defaultFor": ["csv"],
"allowdTypes": ["csv"],
},
{
"loader": "CoNLLULoader",
"name": "CoNLL-U (.conllu)",
"import": "langchain.document_loaders.CoNLLULoader",
"defaultFor": ["conllu"],
"allowdTypes": ["conllu"],
},
{
"loader": "EverNoteLoader",
"name": "EverNote (.enex)",
"import": "langchain.document_loaders.EverNoteLoader",
"defaultFor": ["enex"],
"allowdTypes": ["enex"],
},
{
"loader": "FacebookChatLoader",
"name": "Facebook Chat (.json)",
"import": "langchain.document_loaders.FacebookChatLoader",
"allowdTypes": ["json"],
},
{
"loader": "OutlookMessageLoader",
"name": "Outlook Message (.msg)",
"import": "langchain.document_loaders.OutlookMessageLoader",
"defaultFor": ["msg"],
"allowdTypes": ["msg"],
},
{
"loader": "PyPDFLoader",
"name": "PyPDF (.pdf)",
"import": "langchain.document_loaders.PyPDFLoader",
"defaultFor": ["pdf"],
"allowdTypes": ["pdf"],
},
{
"loader": "STRLoader",
"name": "Subtitle (.str)",
"import": "langchain.document_loaders.STRLoader",
"defaultFor": ["str"],
"allowdTypes": ["str"],
},
{
"loader": "TextLoader",
"name": "Text (.txt)",
"import": "langchain.document_loaders.TextLoader",
"defaultFor": ["txt"],
"allowdTypes": ["txt"],
},
{
"loader": "UnstructuredEmailLoader",
"name": "Unstructured Email (.eml)",
"import": "langchain.document_loaders.UnstructuredEmailLoader",
"defaultFor": ["eml"],
"allowdTypes": ["eml"],
},
{
"loader": "UnstructuredHTMLLoader",
"name": "Unstructured HTML (.html, .htm)",
"import": "langchain.document_loaders.UnstructuredHTMLLoader",
"defaultFor": ["html", "htm"],
"allowdTypes": ["html", "htm"],
},
{
"loader": "UnstructuredMarkdownLoader",
"name": "Unstructured Markdown (.md)",
"import": "langchain.document_loaders.UnstructuredMarkdownLoader",
"defaultFor": ["md"],
"allowdTypes": ["md"],
},
{
"loader": "UnstructuredPowerPointLoader",
"name": "Unstructured PowerPoint (.pptx)",
"import": "langchain.document_loaders.UnstructuredPowerPointLoader",
"defaultFor": ["pptx"],
"allowdTypes": ["pptx"],
},
{
"loader": "UnstructuredWordLoader",
"name": "Unstructured Word (.docx)",
"import": "langchain.document_loaders.UnstructuredWordLoader",
"defaultFor": ["docx"],
"allowdTypes": ["docx"],
},
]
class FileLoaderComponent(CustomComponent):
display_name: str = "File Loader"
description: str = "Generic File Loader"
beta = True
def build_config(self):
loader_options = ["Automatic"] + [
loader_info["name"] for loader_info in loaders_info
]
file_types = []
suffixes = []
for loader_info in loaders_info:
if "allowedTypes" in loader_info:
file_types.extend(loader_info["allowedTypes"])
suffixes.extend([f".{ext}" for ext in loader_info["allowedTypes"]])
return {
"file_path": {
"display_name": "File Path",
"required": True,
"field_type": "file",
"file_types": [
"json",
"txt",
"csv",
"jsonl",
"html",
"htm",
"conllu",
"enex",
"msg",
"pdf",
"srt",
"eml",
"md",
"pptx",
"docx",
],
"suffixes": [
".json",
".txt",
".csv",
".jsonl",
".html",
".htm",
".conllu",
".enex",
".msg",
".pdf",
".srt",
".eml",
".md",
".pptx",
".docx",
],
# "file_types" : file_types,
# "suffixes": suffixes,
},
"loader": {
"display_name": "Loader",
"is_list": True,
"required": True,
"options": loader_options,
"value": "Automatic",
},
"code": {"show": False},
}
def build(self, file_path: str, loader: str) -> Document:
file_type = file_path.split(".")[-1]
# Mapeie o nome do loader selecionado para suas informações
selected_loader_info = None
for loader_info in loaders_info:
if loader_info["name"] == loader:
selected_loader_info = loader_info
break
if selected_loader_info is None and loader != "Automatic":
raise ValueError(f"Loader {loader} not found in the loader info list")
if loader == "Automatic":
# Determine o loader automaticamente com base na extensão do arquivo
default_loader_info = None
for info in loaders_info:
if "defaultFor" in info and file_type in info["defaultFor"]:
default_loader_info = info
break
if default_loader_info is None:
raise ValueError(f"No default loader found for file type: {file_type}")
selected_loader_info = default_loader_info
if isinstance(selected_loader_info, dict):
loader_import: str = selected_loader_info["import"]
else:
raise ValueError(
f"Loader info for {loader} is not a dict\nLoader info:\n{selected_loader_info}"
)
module_name, class_name = loader_import.rsplit(".", 1)
try:
# Importe o loader dinamicamente
loader_module = __import__(module_name, fromlist=[class_name])
loader_instance = getattr(loader_module, class_name)
except ImportError as e:
raise ValueError(
f"Loader {loader} could not be imported\nLoader info:\n{selected_loader_info}"
) from e
result = loader_instance(file_path=file_path)
return result.load()

View file

@ -0,0 +1,62 @@
from typing import List
from langflow import CustomComponent
from langchain.document_loaders import AZLyricsLoader
from langchain.document_loaders import CollegeConfidentialLoader
from langchain.document_loaders import GitbookLoader
from langchain.document_loaders import HNLoader
from langchain.document_loaders import IFixitLoader
from langchain.document_loaders import IMSDbLoader
from langchain.document_loaders import WebBaseLoader
from langchain.schema import Document
class UrlLoaderComponent(CustomComponent):
display_name: str = "Url Loader"
description: str = "Generic Url Loader Component"
def build_config(self):
return {
"web_path": {
"display_name": "Url",
"required": True,
},
"loader": {
"display_name": "Loader",
"is_list": True,
"required": True,
"options": [
"AZLyricsLoader",
"CollegeConfidentialLoader",
"GitbookLoader",
"HNLoader",
"IFixitLoader",
"IMSDbLoader",
"WebBaseLoader",
],
"value": "WebBaseLoader",
},
"code": {"show": False},
}
def build(self, web_path: str, loader: str) -> List[Document]:
if loader == "AZLyricsLoader":
loader_instance = AZLyricsLoader(web_path=web_path) # type: ignore
elif loader == "CollegeConfidentialLoader":
loader_instance = CollegeConfidentialLoader(web_path=web_path) # type: ignore
elif loader == "GitbookLoader":
loader_instance = GitbookLoader(web_page=web_path) # type: ignore
elif loader == "HNLoader":
loader_instance = HNLoader(web_path=web_path) # type: ignore
elif loader == "IFixitLoader":
loader_instance = IFixitLoader(web_path=web_path) # type: ignore
elif loader == "IMSDbLoader":
loader_instance = IMSDbLoader(web_path=web_path) # type: ignore
elif loader == "WebBaseLoader":
loader_instance = WebBaseLoader(web_path=web_path) # type: ignore
if loader_instance is None:
raise ValueError(f"No loader found for: {web_path}")
return loader_instance.load()

View file

@ -0,0 +1,39 @@
from langchain.memory.zep_memory import ZepMemory
from langflow import CustomComponent
from langchain.schema.memory import BaseMemory
class ZepMemoryComponent(CustomComponent):
display_name: str = "Zep Memory"
def build_config(self):
return {
"zep_api_url": {
"display_name": "Zep API URL",
"value": "http://localhost:8000",
},
"api_key": {
"password": True,
"display_name": "API Key",
},
"session_id": {
"display_name": "Session ID",
"info": "The session ID to use for the memory.",
},
}
def build(
self,
api_key: str,
session_id: str,
memory_key: str,
return_messages: bool,
zep_api_url: str = "http://localhost:8000",
) -> BaseMemory:
return ZepMemory(
session_id=session_id,
url=zep_api_url,
api_key=api_key,
memory_key=memory_key,
return_messages=return_messages,
)

View file

@ -13,7 +13,7 @@ class MetaphorToolkit(CustomComponent):
documentation = (
"https://python.langchain.com/docs/integrations/tools/metaphor_search"
)
beta = True
beta: bool = True
# api key should be password = True
field_config = {
"metaphor_api_key": {"display_name": "Metaphor API Key", "password": True},

View file

@ -10,7 +10,7 @@ class GetRequest(CustomComponent):
description: str = "Make a GET request to the given URL."
output_types: list[str] = ["Document"]
documentation: str = "https://docs.langflow.org/components/utilities#get-request"
beta = True
beta: bool = True
field_config = {
"url": {
"display_name": "URL",

View file

@ -20,7 +20,7 @@ class JSONDocumentBuilder(CustomComponent):
display_name: str = "JSON Document Builder"
description: str = "Build a Document containing a JSON object using a key and another Document page content."
output_types: list[str] = ["Document"]
beta = True
beta: bool = True
documentation: str = (
"https://docs.langflow.org/components/utilities#json-document-builder"
)

View file

@ -10,7 +10,7 @@ class PostRequest(CustomComponent):
description: str = "Make a POST request to the given URL."
output_types: list[str] = ["Document"]
documentation: str = "https://docs.langflow.org/components/utilities#post-request"
beta = True
beta: bool = True
field_config = {
"url": {"display_name": "URL", "info": "The URL to make the request to."},
"headers": {

View file

@ -10,7 +10,7 @@ class UpdateRequest(CustomComponent):
description: str = "Make a PATCH request to the given URL."
output_types: list[str] = ["Document"]
documentation: str = "https://docs.langflow.org/components/utilities#update-request"
beta = True
beta: bool = True
field_config = {
"url": {"display_name": "URL", "info": "The URL to make the request to."},
"headers": {

View file

@ -17,7 +17,7 @@ class ChromaComponent(CustomComponent):
display_name: str = "Chroma"
description: str = "Implementation of Vector Store using Chroma"
documentation = "https://python.langchain.com/docs/integrations/vectorstores/chroma"
beta = True
beta: bool = True
def build_config(self):
"""

View file

@ -13,7 +13,7 @@ class VectaraComponent(CustomComponent):
documentation = (
"https://python.langchain.com/docs/integrations/vectorstores/vectara"
)
beta = True
beta: bool = True
# api key should be password = True
field_config = {
"vectara_customer_id": {"display_name": "Vectara Customer ID"},

View file

@ -1,4 +1,4 @@
from typing import Optional
from typing import Optional, List
from langflow import CustomComponent
from langchain.vectorstores.pgvector import PGVector
@ -43,7 +43,7 @@ class PostgresqlVectorComponent(CustomComponent):
embedding: Embeddings,
pg_server_url: str,
collection_name: str,
documents: Optional[Document] = None,
documents: Optional[List[Document]] = None,
) -> VectorStore:
"""
Builds the Vector Store or BaseRetriever object.
@ -59,6 +59,13 @@ class PostgresqlVectorComponent(CustomComponent):
"""
try:
if documents is None:
return PGVector.from_existing_index(
embedding=embedding,
collection_name=collection_name,
connection_string=pg_server_url,
)
return PGVector.from_documents(
embedding=embedding,
documents=documents,

View file

@ -14,14 +14,14 @@ agents:
SQLAgent:
documentation: ""
chains:
LLMChain:
documentation: "https://python.langchain.com/docs/modules/chains/foundational/llm_chain"
# LLMChain:
# documentation: "https://python.langchain.com/docs/modules/chains/foundational/llm_chain"
LLMMathChain:
documentation: "https://python.langchain.com/docs/modules/chains/additional/llm_math"
LLMCheckerChain:
documentation: "https://python.langchain.com/docs/modules/chains/additional/llm_checker"
ConversationChain:
documentation: ""
# ConversationChain:
# documentation: ""
SeriesCharacterChain:
documentation: ""
MidJourneyPromptChain:

View file

@ -17,6 +17,8 @@
from .constants import (
Tool,
PromptTemplate,
ChatPromptTemplate,
BasePromptTemplate,
Chain,
BaseChatMemory,
BaseLLM,
@ -31,6 +33,8 @@ from .constants import (
AgentExecutor,
NestedDict,
Data,
BaseLanguageModel,
Callable,
)
__all__ = [
@ -41,6 +45,7 @@ __all__ = [
"Chain",
"BaseChatMemory",
"BaseLLM",
"BaseLanguageModel",
"BaseLoader",
"BaseMemory",
"BaseOutputParser",
@ -50,4 +55,7 @@ __all__ = [
"TextSplitter",
"Document",
"AgentExecutor",
"Callable",
"BasePromptTemplate",
"ChatPromptTemplate",
]

View file

@ -1,16 +1,16 @@
from langchain.agents.agent import AgentExecutor
from langchain.chains.base import Chain
from langchain.document_loaders.base import BaseLoader
from langchain.llms.base import BaseLLM
from langchain.llms.base import BaseLLM, BaseLanguageModel
from langchain.memory.chat_memory import BaseChatMemory
from langchain.prompts import PromptTemplate
from langchain.prompts import PromptTemplate, ChatPromptTemplate, BasePromptTemplate
from langchain.schema import BaseOutputParser, BaseRetriever, Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.memory import BaseMemory
from langchain.text_splitter import TextSplitter
from langchain.tools import Tool
from langchain.vectorstores.base import VectorStore
from typing import Union, Dict
from typing import Union, Dict, Callable
# Type alias for more complex dicts
NestedDict = Dict[str, Union[str, Dict]]
@ -25,7 +25,10 @@ LANGCHAIN_BASE_TYPES = {
"AgentExecutor": AgentExecutor,
"Tool": Tool,
"BaseLLM": BaseLLM,
"BaseLanguageModel": BaseLanguageModel,
"PromptTemplate": PromptTemplate,
"ChatPromptTemplate": ChatPromptTemplate,
"BasePromptTemplate": BasePromptTemplate,
"BaseLoader": BaseLoader,
"Document": Document,
"TextSplitter": TextSplitter,
@ -47,4 +50,5 @@ CUSTOM_COMPONENT_SUPPORTED_TYPES = {
"dict": dict,
"NestedDict": NestedDict,
"Data": Data,
"Callable": Callable,
}

View file

@ -206,6 +206,10 @@ class ChainVertex(Vertex):
) -> Any:
if not self._built or force:
# Check if the chain requires a PromptVertex
# Temporarily remove "code" from the params
self.params.pop("code", None)
for key, value in self.params.items():
if isinstance(value, PromptVertex):
# Build the PromptVertex, passing the tools if available

View file

@ -1,4 +1,4 @@
from typing import Dict, List, Optional
from typing import ClassVar, Dict, List, Optional
from langchain.agents import types
@ -15,7 +15,7 @@ from langflow.utils.util import build_template_from_class, build_template_from_m
class AgentCreator(LangChainTypeCreator):
type_name: str = "agents"
from_method_nodes = {"ZeroShotAgent": "from_llm_and_tools"}
from_method_nodes: ClassVar[Dict] = {"ZeroShotAgent": "from_llm_and_tools"}
@property
def frontend_node_class(self) -> type[AgentFrontendNode]:

View file

@ -1,4 +1,4 @@
from typing import Any, Dict, List, Optional, Type
from typing import Any, ClassVar, Dict, List, Optional, Type
from langflow.custom.customs import get_custom_nodes
from langflow.interface.base import LangChainTypeCreator
@ -9,7 +9,7 @@ from langflow.template.frontend_node.chains import ChainFrontendNode
from loguru import logger
from langflow.utils.util import build_template_from_class, build_template_from_method
from langchain import chains
from langchain_experimental.sql import SQLDatabaseChain # type: ignore
from langchain_experimental.sql import SQLDatabaseChain
# Assuming necessary imports for Field, Template, and FrontendNode classes
@ -22,7 +22,7 @@ class ChainCreator(LangChainTypeCreator):
return ChainFrontendNode
#! We need to find a better solution for this
from_method_nodes = {
from_method_nodes: ClassVar[Dict] = {
"ConversationalRetrievalChain": "from_llm",
"LLMCheckerChain": "from_llm",
"SQLDatabaseChain": "from_llm",

View file

@ -4,7 +4,7 @@ from langchain.chains import ConversationChain
from langchain.memory.buffer import ConversationBufferMemory
from langchain.schema import BaseMemory
from langflow.interface.base import CustomChain
from pydantic import Field, root_validator
from pydantic.v1 import Field, root_validator
from langchain.chains.question_answering import load_qa_chain
from langflow.interface.utils import extract_input_variables_from_prompt
from langchain.base_language import BaseLanguageModel

View file

@ -1,6 +1,5 @@
import ast
from typing import Any, Optional
from pydantic import BaseModel
from typing import Any, ClassVar, Optional
from fastapi import HTTPException
from langflow.utils import validate
@ -15,18 +14,19 @@ class ComponentFunctionEntrypointNameNullError(HTTPException):
pass
class Component(BaseModel):
ERROR_CODE_NULL = "Python code must be provided."
ERROR_FUNCTION_ENTRYPOINT_NAME_NULL = (
"The name of the entrypoint function must be provided."
)
class Component:
ERROR_CODE_NULL: ClassVar[str] = "Python code must be provided."
ERROR_FUNCTION_ENTRYPOINT_NAME_NULL: ClassVar[
str
] = "The name of the entrypoint function must be provided."
code: Optional[str]
function_entrypoint_name = "build"
code: Optional[str] = None
_function_entrypoint_name: str = "build"
field_config: dict = {}
def __init__(self, **data):
super().__init__(**data)
for key, value in data.items():
setattr(self, key, value)
def get_code_tree(self, code: str):
parser = CodeParser(code)
@ -39,7 +39,7 @@ class Component(BaseModel):
detail={"error": self.ERROR_CODE_NULL, "traceback": ""},
)
if not self.function_entrypoint_name:
if not self._function_entrypoint_name:
raise ComponentFunctionEntrypointNameNullError(
status_code=400,
detail={
@ -48,7 +48,7 @@ class Component(BaseModel):
},
)
return validate.create_function(self.code, self.function_entrypoint_name)
return validate.create_function(self.code, self._function_entrypoint_name)
def build_template_config(self, attributes) -> dict:
template_config = {}

View file

@ -1,27 +1,28 @@
from typing import Any, Callable, List, Optional, Union
from typing import Any, Callable, ClassVar, List, Optional, Union
from uuid import UUID
from fastapi import HTTPException
from langflow.field_typing.constants import CUSTOM_COMPONENT_SUPPORTED_TYPES
from langflow.interface.custom.component import Component
from langflow.interface.custom.directory_reader import DirectoryReader
from langflow.services.getters import get_db_service
from langflow.interface.custom.utils import extract_inner_type
from langflow.interface.custom.utils import extract_inner_type, extract_union_types
from langflow.utils import validate
from langflow.services.database.utils import session_getter
from langflow.services.database.models.flow import Flow
from pydantic import Extra
import yaml
class CustomComponent(Component, extra=Extra.allow):
code: Optional[str]
class CustomComponent(Component):
display_name: Optional[str] = "Custom Component"
description: Optional[str] = "Custom Component"
code: Optional[str] = None
field_config: dict = {}
code_class_base_inheritance = "CustomComponent"
function_entrypoint_name = "build"
code_class_base_inheritance: ClassVar[str] = "CustomComponent"
function_entrypoint_name: ClassVar[str] = "build"
function: Optional[Callable] = None
return_type_valid_list = list(CUSTOM_COMPONENT_SUPPORTED_TYPES.keys())
return_type_valid_list: List[str] = list(CUSTOM_COMPONENT_SUPPORTED_TYPES.keys())
repr_value: Optional[Any] = ""
user_id: Optional[Union[UUID, str]] = None
@ -151,9 +152,7 @@ class CustomComponent(Component, extra=Extra.allow):
return [return_type] if return_type in self.return_type_valid_list else []
# If the return type is a Union, then we need to parse it
return_type = return_type.replace("Union", "").replace("[", "").replace("]", "")
return_type = return_type.split(",")
return_type = [item.strip() for item in return_type]
return_type = extract_union_types(return_type)
return [item for item in return_type if item in self.return_type_valid_list]
@property
@ -202,7 +201,7 @@ class CustomComponent(Component, extra=Extra.allow):
raise ValueError(f"Flow {flow_id} not found")
if tweaks:
graph_data = process_tweaks(graph_data=graph_data, tweaks=tweaks)
return build_sorted_vertices(graph_data)
return build_sorted_vertices(graph_data, self.user_id)
def list_flows(self, *, get_session: Optional[Callable] = None) -> List[Flow]:
if not self.user_id:

View file

@ -10,7 +10,7 @@ class ClassCodeDetails(BaseModel):
"""
name: str
doc: Optional[str]
doc: Optional[str] = None
bases: list
attributes: list
methods: list
@ -23,7 +23,7 @@ class CallableCodeDetails(BaseModel):
"""
name: str
doc: Optional[str]
doc: Optional[str] = None
args: list
body: list
return_type: Optional[str]
return_type: Optional[str] = None

View file

@ -8,3 +8,14 @@ def extract_inner_type(return_type: str) -> str:
if match := re.match(r"list\[(.*)\]", return_type, re.IGNORECASE):
return match[1]
return return_type
def extract_union_types(return_type: str) -> list[str]:
"""
Extracts the inner type from a type hint that is a list.
"""
# If the return type is a Union, then we need to parse it
return_type = return_type.replace("Union", "").replace("[", "").replace("]", "")
return_types = return_type.split(",")
return_types = [item.strip() for item in return_types]
return return_types

View file

@ -293,7 +293,7 @@ def instantiate_embedding(node_type, class_object, params: Dict):
params = {
key: value
for key, value in params.items()
if key in class_object.__fields__
if key in class_object.model_fields
}
return class_object(**params)

View file

@ -196,7 +196,7 @@ def initialize_chroma(class_object: Type[Chroma], params: dict):
params.pop("documents", None)
params.pop("texts", None)
params["embedding_function"] = params.pop("embedding")
chromadb = class_object(**params)
chromadb_instance = class_object(**params)
else:
if "texts" in params:
params["documents"] = params.pop("texts")
@ -211,10 +211,10 @@ def initialize_chroma(class_object: Type[Chroma], params: dict):
if value is None:
doc.metadata[key] = ""
chromadb = class_object.from_documents(**params)
chromadb_instance = class_object.from_documents(**params)
if persist:
chromadb.persist()
return chromadb
chromadb_instance.persist()
return chromadb_instance
def initialize_qdrant(class_object: Type[Qdrant], params: dict):

View file

@ -1,4 +1,4 @@
from typing import Dict, List, Optional, Type
from typing import ClassVar, Dict, List, Optional, Type
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import memory_type_to_cls_dict
@ -14,7 +14,7 @@ from langflow.custom.customs import get_custom_nodes
class MemoryCreator(LangChainTypeCreator):
type_name: str = "memories"
from_method_nodes = {
from_method_nodes: ClassVar[Dict] = {
"ZepChatMessageHistory": "__init__",
"SQLiteEntityStore": "__init__",
}

View file

@ -1,4 +1,4 @@
from typing import Dict, List, Optional, Type
from typing import ClassVar, Dict, List, Optional, Type
from langchain import output_parsers
@ -13,7 +13,7 @@ from langflow.utils.util import build_template_from_class, build_template_from_m
class OutputParserCreator(LangChainTypeCreator):
type_name: str = "output_parsers"
from_method_nodes = {
from_method_nodes: ClassVar[Dict] = {
"StructuredOutputParser": "from_response_schemas",
}

View file

@ -1,7 +1,7 @@
from typing import Dict, List, Optional, Type
from langchain.prompts import PromptTemplate
from pydantic import root_validator
from pydantic.v1 import root_validator
from langflow.interface.utils import extract_input_variables_from_prompt

View file

@ -1,4 +1,4 @@
from typing import Any, Dict, List, Optional, Type
from typing import Any, ClassVar, Dict, List, Optional, Type
from langchain import retrievers
@ -14,7 +14,10 @@ from langflow.utils.util import build_template_from_method, build_template_from_
class RetrieverCreator(LangChainTypeCreator):
type_name: str = "retrievers"
from_method_nodes = {"MultiQueryRetriever": "from_llm", "ZepRetriever": "__init__"}
from_method_nodes: ClassVar[Dict] = {
"MultiQueryRetriever": "from_llm",
"ZepRetriever": "__init__",
}
@property
def frontend_node_class(self) -> Type[RetrieverFrontendNode]:

View file

@ -1,9 +1,12 @@
from typing import Dict, Tuple
from typing import Dict, Tuple, Optional, Union
from langflow.graph import Graph
from loguru import logger
from uuid import UUID
def build_sorted_vertices(data_graph) -> Tuple[Graph, Dict]:
def build_sorted_vertices(
data_graph, user_id: Optional[Union[str, UUID]] = None
) -> Tuple[Graph, Dict]:
"""
Build langchain object from data_graph.
"""
@ -13,7 +16,7 @@ def build_sorted_vertices(data_graph) -> Tuple[Graph, Dict]:
sorted_vertices = graph.topological_sort()
artifacts = {}
for vertex in sorted_vertices:
vertex.build()
vertex.build(user_id=user_id)
if vertex.artifacts:
artifacts.update(vertex.artifacts)
return graph, artifacts

View file

@ -21,6 +21,7 @@ from langflow.template.field.base import TemplateField
from langflow.template.template.base import Template
from langflow.utils import util
from langflow.utils.util import build_template_from_class
from langflow.utils.logger import logger
TOOL_INPUTS = {
"str": TemplateField(
@ -35,7 +36,7 @@ TOOL_INPUTS = {
field_type="BaseLanguageModel", required=True, is_list=False, show=True
),
"func": TemplateField(
field_type="function",
field_type="Callable",
required=True,
is_list=False,
show=True,
@ -72,7 +73,11 @@ class ToolCreator(LangChainTypeCreator):
all_tools = {}
for tool, tool_fcn in ALL_TOOLS_NAMES.items():
tool_params = get_tool_params(tool_fcn)
try:
tool_params = get_tool_params(tool_fcn)
except Exception:
logger.error(f"Error getting params for tool {tool}")
continue
tool_name = tool_params.get("name") or tool
@ -121,7 +126,7 @@ class ToolCreator(LangChainTypeCreator):
elif tool_type in CUSTOM_TOOLS:
# Get custom tool params
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes = ["function"]
base_classes = ["Callable"]
if node := customs.get_custom_nodes("tools").get(tool_type):
return node
elif tool_type in FILE_TOOLS:
@ -131,10 +136,15 @@ class ToolCreator(LangChainTypeCreator):
tool_dict = build_template_from_class(tool_type, OTHER_TOOLS)
fields = tool_dict["template"]
# _type is the only key in fields
# return None
if len(fields) == 1 and "_type" in fields:
return None
# Pop unnecessary fields and add name
fields.pop("_type") # type: ignore
fields.pop("return_direct") # type: ignore
fields.pop("verbose") # type: ignore
fields.pop("return_direct", None) # type: ignore
fields.pop("verbose", None) # type: ignore
tool_params = {
"name": fields.pop("name")["value"], # type: ignore

View file

@ -1,7 +1,7 @@
from typing import Callable, Optional
from langflow.interface.importing.utils import get_function
from pydantic import BaseModel, validator
from pydantic.v1 import BaseModel, validator
from langflow.utils import validate
from langchain.agents.tools import Tool

View file

@ -1,6 +1,7 @@
import ast
import contextlib
from typing import Any, List
from typing import Any, List, Union, Optional
from uuid import UUID
from langflow.api.utils import get_new_key
from langflow.interface.agents.base import agent_creator
from langflow.interface.chains.base import chain_creator
@ -208,7 +209,9 @@ def update_attributes(frontend_node, template_config):
frontend_node[attribute] = template_config[attribute]
def build_field_config(custom_component: CustomComponent):
def build_field_config(
custom_component: CustomComponent, user_id: Optional[Union[str, UUID]] = None
):
"""Build the field configuration for a custom component"""
try:
@ -218,7 +221,7 @@ def build_field_config(custom_component: CustomComponent):
return {}
try:
return custom_class().build_config()
return custom_class(user_id=user_id).build_config()
except Exception as exc:
logger.error(f"Error while building field config: {str(exc)}")
return {}
@ -306,7 +309,9 @@ def add_output_types(frontend_node, return_types: List[str]):
frontend_node.get("output_types").append(return_type)
def build_langchain_template_custom_component(custom_component: CustomComponent):
def build_langchain_template_custom_component(
custom_component: CustomComponent, user_id: Optional[Union[str, UUID]] = None
):
"""Build a custom component template for the langchain"""
try:
logger.debug("Building custom component template")
@ -319,7 +324,7 @@ def build_langchain_template_custom_component(custom_component: CustomComponent)
update_attributes(frontend_node, template_config)
logger.debug("Updated attributes")
field_config = build_field_config(custom_component)
field_config = build_field_config(custom_component, user_id=user_id)
logger.debug("Built field config")
entrypoint_args = custom_component.get_function_entrypoint_args

View file

@ -28,10 +28,14 @@ class UtilityCreator(LangChainTypeCreator):
"""
if self.type_dict is None:
settings_service = get_settings_service()
self.type_dict = {
utility_name: import_class(f"langchain.utilities.{utility_name}")
for utility_name in utilities.__all__
}
self.type_dict = {}
for utility_name in utilities.__all__:
try:
imported = import_class(f"langchain.utilities.{utility_name}")
self.type_dict[utility_name] = imported
except Exception:
pass
self.type_dict["SQLDatabase"] = utilities.SQLDatabase
# Filter according to settings.utilities
self.type_dict = {

View file

@ -1,4 +1,4 @@
from typing import Dict, List, Optional
from typing import ClassVar, Dict, List, Optional
from langchain.utilities import requests, sql_database
@ -10,7 +10,7 @@ from langflow.utils.util import build_template_from_class, build_template_from_m
class WrapperCreator(LangChainTypeCreator):
type_name: str = "wrappers"
from_method_nodes = {"SQLDatabase": "from_uri"}
from_method_nodes: ClassVar[Dict] = {"SQLDatabase": "from_uri"}
@property
def type_to_loader_dict(self) -> Dict:

View file

@ -94,7 +94,7 @@ class DatabaseService(Service):
legacy_tables = ["flowstyle"]
for table, model in model_mapping.items():
expected_columns = list(model.__fields__.keys())
expected_columns = list(model.model_fields.keys())
try:
available_columns = [

View file

@ -19,7 +19,6 @@ def orjson_dumps(v, *, default=None, sort_keys=False, indent_2=True):
class SQLModelSerializable(SQLModel):
class Config:
orm_mode = True
json_loads = orjson.loads
json_dumps = orjson_dumps
# TODO[pydantic]: The following keys were removed: `json_loads`, `json_dumps`.
# Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-config for more information.
pass

View file

@ -1,7 +1,7 @@
# Path: src/backend/langflow/database/models/flow.py
from langflow.services.database.models.base import SQLModelSerializable
from pydantic import validator
from pydantic import field_validator
from sqlmodel import Field, JSON, Column, Relationship
from uuid import UUID, uuid4
@ -13,10 +13,10 @@ if TYPE_CHECKING:
class FlowBase(SQLModelSerializable):
name: str = Field(index=True)
description: Optional[str] = Field(index=True)
description: Optional[str] = Field(index=True, nullable=True, default=None)
data: Optional[Dict] = Field(default=None, nullable=True)
@validator("data")
@field_validator("data")
def validate_json(v):
if not v:
return v

View file

@ -54,6 +54,9 @@ def update_user(
def update_user_last_login_at(user_id: UUID, db: Session = Depends(get_session)):
user_data = UserUpdate(last_login_at=datetime.now(timezone.utc)) # type: ignore
user = get_user_by_id(db, user_id)
return update_user(user, user_data, db)
try:
user_data = UserUpdate(last_login_at=datetime.now(timezone.utc)) # type: ignore
user = get_user_by_id(db, user_id)
return update_user(user, user_data, db)
except Exception:
pass

View file

@ -20,7 +20,7 @@ class User(SQLModelSerializable, table=True):
is_superuser: bool = Field(default=False)
create_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
last_login_at: Optional[datetime] = Field()
last_login_at: Optional[datetime] = Field(nullable=True)
api_keys: list["ApiKey"] = Relationship(
back_populates="user",
sa_relationship_kwargs={"cascade": "delete"},
@ -41,13 +41,13 @@ class UserRead(SQLModel):
is_superuser: bool = Field()
create_at: datetime = Field()
updated_at: datetime = Field()
last_login_at: Optional[datetime] = Field()
last_login_at: Optional[datetime] = Field(nullable=True)
class UserUpdate(SQLModel):
username: Optional[str] = Field()
profile_image: Optional[str] = Field()
password: Optional[str] = Field()
is_active: Optional[bool] = Field()
is_superuser: Optional[bool] = Field()
last_login_at: Optional[datetime] = Field()
username: Optional[str] = None
profile_image: Optional[str] = None
password: Optional[str] = None
is_active: Optional[bool] = None
is_superuser: Optional[bool] = None
last_login_at: Optional[datetime] = None

View file

@ -1,16 +1,18 @@
import secrets
from pathlib import Path
from typing import Optional
import secrets
from loguru import logger
from passlib.context import CryptContext
from pydantic import Field, validator
from pydantic_settings import BaseSettings
from langflow.services.settings.constants import (
DEFAULT_SUPERUSER,
DEFAULT_SUPERUSER_PASSWORD,
)
from langflow.services.settings.utils import read_secret_from_file, write_secret_to_file
from pydantic import BaseSettings, Field, validator
from passlib.context import CryptContext
from loguru import logger
class AuthSettings(BaseSettings):
# Login settings
@ -18,8 +20,7 @@ class AuthSettings(BaseSettings):
SECRET_KEY: str = Field(
default="",
description="Secret key for JWT. If not provided, a random one will be generated.",
env="LANGFLOW_SECRET_KEY",
allow_mutation=False,
frozen=False,
)
ALGORITHM: str = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES: int = 60
@ -39,7 +40,7 @@ class AuthSettings(BaseSettings):
SUPERUSER: str = DEFAULT_SUPERUSER
SUPERUSER_PASSWORD: str = DEFAULT_SUPERUSER_PASSWORD
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
pwd_context: CryptContext = CryptContext(schemes=["bcrypt"], deprecated="auto")
class Config:
validate_assignment = True

View file

@ -7,7 +7,8 @@ from typing import Optional, List
from pathlib import Path
import yaml
from pydantic import BaseSettings, root_validator, validator
from pydantic import field_validator, validator
from pydantic_settings import BaseSettings, SettingsConfigDict
from loguru import logger
# BASE_COMPONENTS_PATH = str(Path(__file__).parent / "components")
@ -111,7 +112,7 @@ class Settings(BaseSettings):
return value
@validator("COMPONENTS_PATH", pre=True)
@field_validator("COMPONENTS_PATH", mode="before")
def set_components_path(cls, value):
if os.getenv("LANGFLOW_COMPONENTS_PATH"):
logger.debug("Adding LANGFLOW_COMPONENTS_PATH to components_path")
@ -143,17 +144,17 @@ class Settings(BaseSettings):
logger.debug(f"Components path: {value}")
return value
class Config:
validate_assignment = True
extra = "ignore"
env_prefix = "LANGFLOW_"
model_config = SettingsConfigDict(
validate_assignment=True, extra="ignore", env_prefix="LANGFLOW_"
)
@root_validator(allow_reuse=True)
def validate_lists(cls, values):
for key, value in values.items():
if key != "dev" and not value:
values[key] = []
return values
# @model_validator()
# @classmethod
# def validate_lists(cls, values):
# for key, value in values.items():
# if key != "dev" and not value:
# values[key] = []
# return values
def update_from_yaml(self, file_path: str, dev: bool = False):
new_settings = load_settings_from_yaml(file_path)
@ -227,7 +228,7 @@ def load_settings_from_yaml(file_path: str) -> Settings:
settings_dict = {k.upper(): v for k, v in settings_dict.items()}
for key in settings_dict:
if key not in Settings.__fields__.keys():
if key not in Settings.model_fields.keys():
raise KeyError(f"Key {key} not found in settings")
logger.debug(f"Loading {len(settings_dict[key])} {key} from {file_path}")

View file

@ -28,7 +28,7 @@ class SettingsService(Service):
settings_dict = {k.upper(): v for k, v in settings_dict.items()}
for key in settings_dict:
if key not in Settings.__fields__.keys():
if key not in Settings.model_fields.keys():
raise KeyError(f"Key {key} not found in settings")
logger.debug(
f"Loading {len(settings_dict[key])} {key} from {file_path}"

View file

@ -0,0 +1,177 @@
import contextlib
import json
import os
from typing import Optional, List
from pathlib import Path
import yaml
from pydantic import validator, model_validator
from pydantic_settings import BaseSettings
from langflow.utils.logger import logger
BASE_COMPONENTS_PATH = str(Path(__file__).parent / "components")
class Settings(BaseSettings):
CHAINS: dict = {}
AGENTS: dict = {}
PROMPTS: dict = {}
LLMS: dict = {}
TOOLS: dict = {}
MEMORIES: dict = {}
EMBEDDINGS: dict = {}
VECTORSTORES: dict = {}
DOCUMENTLOADERS: dict = {}
WRAPPERS: dict = {}
RETRIEVERS: dict = {}
TOOLKITS: dict = {}
TEXTSPLITTERS: dict = {}
UTILITIES: dict = {}
OUTPUT_PARSERS: dict = {}
CUSTOM_COMPONENTS: dict = {}
DEV: bool = False
DATABASE_URL: Optional[str] = None
CACHE: str = "InMemoryCache"
REMOVE_API_KEYS: bool = False
COMPONENTS_PATH: List[str] = []
@validator("DATABASE_URL", pre=True)
def set_database_url(cls, value):
if not value:
logger.debug(
"No database_url provided, trying LANGFLOW_DATABASE_URL env variable"
)
if langflow_database_url := os.getenv("LANGFLOW_DATABASE_URL"):
value = langflow_database_url
logger.debug("Using LANGFLOW_DATABASE_URL env variable.")
else:
logger.debug("No DATABASE_URL env variable, using sqlite database")
value = "sqlite:///./langflow.db"
return value
@validator("COMPONENTS_PATH", pre=True)
def set_components_path(cls, value):
if os.getenv("LANGFLOW_COMPONENTS_PATH"):
logger.debug("Adding LANGFLOW_COMPONENTS_PATH to components_path")
langflow_component_path = os.getenv("LANGFLOW_COMPONENTS_PATH")
if (
Path(langflow_component_path).exists()
and langflow_component_path not in value
):
if isinstance(langflow_component_path, list):
for path in langflow_component_path:
if path not in value:
value.append(path)
logger.debug(
f"Extending {langflow_component_path} to components_path"
)
elif langflow_component_path not in value:
value.append(langflow_component_path)
logger.debug(
f"Appending {langflow_component_path} to components_path"
)
if not value:
value = [BASE_COMPONENTS_PATH]
logger.debug("Setting default components path to components_path")
elif BASE_COMPONENTS_PATH not in value:
value.append(BASE_COMPONENTS_PATH)
logger.debug("Adding default components path to components_path")
logger.debug(f"Components path: {value}")
return value
class Config:
validate_assignment = True
extra = "ignore"
env_prefix = "LANGFLOW_"
@model_validator(mode="after")
def validate_lists(cls, values):
for key, value in values.items():
if key != "dev" and not value:
values[key] = []
return values
def update_from_yaml(self, file_path: str, dev: bool = False):
new_settings = load_settings_from_yaml(file_path)
self.CHAINS = new_settings.CHAINS or {}
self.AGENTS = new_settings.AGENTS or {}
self.PROMPTS = new_settings.PROMPTS or {}
self.LLMS = new_settings.LLMS or {}
self.TOOLS = new_settings.TOOLS or {}
self.MEMORIES = new_settings.MEMORIES or {}
self.WRAPPERS = new_settings.WRAPPERS or {}
self.TOOLKITS = new_settings.TOOLKITS or {}
self.TEXTSPLITTERS = new_settings.TEXTSPLITTERS or {}
self.UTILITIES = new_settings.UTILITIES or {}
self.EMBEDDINGS = new_settings.EMBEDDINGS or {}
self.VECTORSTORES = new_settings.VECTORSTORES or {}
self.DOCUMENTLOADERS = new_settings.DOCUMENTLOADERS or {}
self.RETRIEVERS = new_settings.RETRIEVERS or {}
self.OUTPUT_PARSERS = new_settings.OUTPUT_PARSERS or {}
self.CUSTOM_COMPONENTS = new_settings.CUSTOM_COMPONENTS or {}
self.COMPONENTS_PATH = new_settings.COMPONENTS_PATH or []
self.DEV = dev
def update_settings(self, **kwargs):
logger.debug("Updating settings")
for key, value in kwargs.items():
# value may contain sensitive information, so we don't want to log it
if not hasattr(self, key):
logger.debug(f"Key {key} not found in settings")
continue
logger.debug(f"Updating {key}")
if isinstance(getattr(self, key), list):
# value might be a '[something]' string
with contextlib.suppress(json.decoder.JSONDecodeError):
value = json.loads(str(value))
if isinstance(value, list):
for item in value:
if isinstance(item, Path):
item = str(item)
if item not in getattr(self, key):
getattr(self, key).append(item)
logger.debug(f"Extended {key}")
else:
if isinstance(value, Path):
value = str(value)
if value not in getattr(self, key):
getattr(self, key).append(value)
logger.debug(f"Appended {key}")
else:
setattr(self, key, value)
logger.debug(f"Updated {key}")
logger.debug(f"{key}: {getattr(self, key)}")
def save_settings_to_yaml(settings: Settings, file_path: str):
with open(file_path, "w") as f:
settings_dict = settings.dict()
yaml.dump(settings_dict, f)
def load_settings_from_yaml(file_path: str) -> Settings:
# Check if a string is a valid path or a file name
if "/" not in file_path:
# Get current path
current_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_path, file_path)
with open(file_path, "r") as f:
settings_dict = yaml.safe_load(f)
settings_dict = {k.upper(): v for k, v in settings_dict.items()}
for key in settings_dict:
if key not in Settings.model_fields.keys():
raise KeyError(f"Key {key} not found in settings")
logger.debug(f"Loading {len(settings_dict[key])} {key} from {file_path}")
return Settings(**settings_dict)
settings = load_settings_from_yaml("config.yaml")

View file

@ -206,7 +206,7 @@ class InitializeAgentNode(FrontendNode):
],
)
description: str = """Construct a zero shot agent from an LLM and tools."""
base_classes: list[str] = ["AgentExecutor", "function"]
base_classes: list[str] = ["AgentExecutor", "Callable"]
def to_dict(self):
return super().to_dict()

View file

@ -1,6 +1,6 @@
from collections import defaultdict
import re
from typing import List, Optional
from typing import ClassVar, DefaultDict, Dict, List, Optional
from pydantic import BaseModel, Field
@ -15,10 +15,10 @@ from langflow.utils import constants
class FieldFormatters(BaseModel):
formatters = {
formatters: ClassVar[Dict] = {
"openai_api_key": field_formatters.OpenAIAPIKeyFormatter(),
}
base_formatters = {
base_formatters: ClassVar[Dict] = {
"kwargs": field_formatters.KwargsFormatter(),
"optional": field_formatters.RemoveOptionalFormatter(),
"list": field_formatters.ListTypeFormatter(),
@ -49,7 +49,7 @@ class FrontendNode(BaseModel):
name: str = ""
display_name: str = ""
documentation: str = ""
custom_fields: defaultdict = defaultdict(list)
custom_fields: Optional[DefaultDict[str, List[str]]] = defaultdict(list)
output_types: List[str] = []
field_formatters: FieldFormatters = Field(default_factory=FieldFormatters)
beta: bool = False

View file

@ -87,6 +87,8 @@ class ChainFrontendNode(FrontendNode):
field.required = True
field.show = True
field.advanced = False
field.field_type = "BaseLanguageModel" # temporary fix
field.is_list = False
if field.name == "return_source_documents":
field.required = False
@ -140,7 +142,7 @@ class SeriesCharacterChainNode(FrontendNode):
"Chain",
"ConversationChain",
"SeriesCharacterChain",
"function",
"Callable",
]
@ -241,7 +243,7 @@ class CombineDocsChainNode(FrontendNode):
],
)
description: str = """Load question answering chain."""
base_classes: list[str] = ["BaseCombineDocumentsChain", "function"]
base_classes: list[str] = ["BaseCombineDocumentsChain", "Callable"]
def to_dict(self):
return super().to_dict()

View file

@ -1,4 +1,4 @@
from typing import Optional
from typing import ClassVar, Dict, Optional
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.base import FrontendNode
@ -23,7 +23,7 @@ class DocumentLoaderFrontNode(FrontendNode):
self.base_classes = ["Document"]
self.output_types = ["Document"]
file_path_templates = {
file_path_templates: ClassVar[Dict] = {
"AirbyteJSONLoader": build_file_field(suffixes=[".json"], fileTypes=["json"]),
"CoNLLULoader": build_file_field(suffixes=[".csv"], fileTypes=["csv"]),
"CSVLoader": build_file_field(suffixes=[".csv"], fileTypes=["csv"]),

View file

@ -1,4 +1,4 @@
from typing import Optional
from typing import ClassVar, Dict, Optional
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.constants import FORCE_SHOW_FIELDS
from langflow.template.frontend_node.formatter.base import FieldFormatter
@ -21,7 +21,7 @@ class OpenAIAPIKeyFormatter(FieldFormatter):
class ModelSpecificFieldFormatter(FieldFormatter):
MODEL_DICT = {
MODEL_DICT: ClassVar[Dict] = {
"OpenAI": OPENAI_MODELS,
"ChatOpenAI": CHAT_OPENAI_MODELS,
"Anthropic": ANTHROPIC_MODELS,
@ -86,7 +86,7 @@ class UnionTypeFormatter(FieldFormatter):
class SpecialFieldFormatter(FieldFormatter):
SPECIAL_FIELD_HANDLERS = {
SPECIAL_FIELD_HANDLERS: ClassVar[Dict] = {
"allowed_tools": lambda field: "Tool",
"max_value_length": lambda field: "int",
}

View file

@ -35,7 +35,7 @@ class ToolNode(FrontendNode):
),
TemplateField(
name="func",
field_type="function",
field_type="Callable",
required=True,
is_list=False,
show=True,
@ -135,7 +135,7 @@ class PythonFunctionNode(FrontendNode):
],
)
description: str = "Python function to be executed."
base_classes: list[str] = ["function"]
base_classes: list[str] = ["Callable"]
def to_dict(self):
return super().to_dict()

View file

@ -34,7 +34,7 @@ def build_template_from_function(
docs = parse(_class.__doc__)
variables = {"_type": _type}
for class_field_items, value in _class.__fields__.items():
for class_field_items, value in _class.model_fields.items():
if class_field_items in ["callback_manager"]:
continue
variables[class_field_items] = {}
@ -60,7 +60,7 @@ def build_template_from_function(
# the output to be a function
base_classes = get_base_classes(_class)
if add_function:
base_classes.append("function")
base_classes.append("Callable")
return {
"template": format_dict(variables, name),
@ -114,7 +114,7 @@ def build_template_from_class(
# Adding function to base classes to allow
# the output to be a function
if add_function:
base_classes.append("function")
base_classes.append("Callable")
return {
"template": format_dict(variables, name),
"description": docs.short_description or "",
@ -178,7 +178,7 @@ def build_template_from_method(
# Adding function to base classes to allow the output to be a function
if add_function:
base_classes.append("function")
base_classes.append("Callable")
return {
"template": format_dict(variables, class_name),
@ -276,6 +276,7 @@ def format_dict(
_type = remove_optional_wrapper(_type)
_type = check_list_type(_type, value)
_type = replace_mapping_with_dict(_type)
_type = get_type_from_union_literal(_type)
value["type"] = get_formatted_type(key, _type)
value["show"] = should_show_field(value, key)
@ -295,6 +296,15 @@ def format_dict(
return dictionary
# "Union[Literal['f-string'], Literal['jinja2']]" -> "str"
def get_type_from_union_literal(union_literal: str) -> str:
# if types are literal strings
# the type is a string
if "Literal" in union_literal:
return "str"
return union_literal
def get_type(value: Any) -> Union[str, type]:
"""
Retrieves the type value from the dictionary.
@ -302,7 +312,8 @@ def get_type(value: Any) -> Union[str, type]:
Returns:
The type value.
"""
_type = value["type"]
# get "type" or "annotation" from the value
_type = value.get("type") or value.get("annotation")
return _type if isinstance(_type, str) else _type.__name__

View file

@ -8,7 +8,7 @@ export default function DisclosureComponent({
openDisc,
}: DisclosureComponentType): JSX.Element {
return (
<Disclosure as="div" key={title}>
<Disclosure as="div" defaultOpen={openDisc} key={title}>
{({ open }) => (
<>
<div>
@ -35,9 +35,7 @@ export default function DisclosureComponent({
</div>
</Disclosure.Button>
</div>
<Disclosure.Panel as="div" static={openDisc}>
{children}
</Disclosure.Panel>
<Disclosure.Panel as="div">{children}</Disclosure.Panel>
</>
)}
</Disclosure>

View file

@ -122,7 +122,7 @@ export default function ExtraSidebar(): JSX.Element {
}
}
});
setSearch("search");
setSearch("");
return ret;
});
}
@ -227,8 +227,12 @@ export default function ExtraSidebar(): JSX.Element {
.map((SBSectionName: keyof APIObjectType, index) =>
Object.keys(dataFilter[SBSectionName]).length > 0 ? (
<DisclosureComponent
openDisc={search.length == 0 ? false : true}
key={index}
openDisc={
getFilterEdge.length !== 0 || search.length !== 0
? true
: false
}
key={index + search + JSON.stringify(getFilterEdge)}
button={{
title: nodeNames[SBSectionName] ?? nodeNames.unknown,
Icon:

View file

@ -8,7 +8,6 @@ import {
ReactFlowJsonObject,
XYPosition,
} from "reactflow";
import ShortUniqueId from "short-unique-id";
import { specialCharsRegex } from "../constants/constants";
import { APITemplateType, TemplateVariableType } from "../types/api";
import {

View file

@ -287,7 +287,10 @@ def flow(client, json_flow: str, active_user):
loaded_json = json.loads(json_flow)
flow_data = FlowCreate(
name="test_flow", data=loaded_json.get("data"), user_id=active_user.id
name="test_flow",
data=loaded_json.get("data"),
user_id=active_user.id,
description="description",
)
flow = Flow(**flow_data.dict())
with session_getter(get_db_service()) as session:

View file

@ -12,7 +12,7 @@ def test_zero_shot_agent(client: TestClient, logged_in_headers):
"ZeroShotAgent",
"BaseSingleActionAgent",
"Agent",
"function",
"Callable",
}
template = zero_shot_agent["template"]
@ -202,7 +202,7 @@ def test_initialize_agent(client: TestClient, logged_in_headers):
agents = json_response["agents"]
initialize_agent = agents["AgentInitializer"]
assert initialize_agent["base_classes"] == ["AgentExecutor", "function"]
assert initialize_agent["base_classes"] == ["AgentExecutor", "Callable"]
template = initialize_agent["template"]
assert template["agent"] == {

View file

@ -9,170 +9,6 @@ from fastapi.testclient import TestClient
# assert set(chains.keys()) == set(settings.chains)
# Test the ConversationChain object
def test_conversation_chain(client: TestClient, logged_in_headers):
response = client.get("api/v1/all", headers=logged_in_headers)
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
chain = chains["ConversationChain"]
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {
"ConversationChain",
"LLMChain",
"Chain",
"function",
}
template = chain["template"]
assert template["memory"] == {
"required": False,
"dynamic": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "memory",
"type": "BaseMemory",
"list": False,
"advanced": False,
"info": "",
}
assert template["verbose"] == {
"required": False,
"dynamic": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
"advanced": True,
"info": "",
}
assert template["llm"] == {
"required": True,
"dynamic": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
"advanced": False,
"info": "",
}
assert template["input_key"] == {
"required": True,
"dynamic": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": "input",
"password": False,
"name": "input_key",
"type": "str",
"list": False,
"advanced": True,
"info": "",
}
assert template["output_key"] == {
"required": True,
"dynamic": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": "response",
"password": False,
"name": "output_key",
"type": "str",
"list": False,
"advanced": True,
"info": "",
}
assert template["_type"] == "ConversationChain"
# Test the description object
assert (
chain["description"]
== "Chain to have a conversation and load context from memory."
)
def test_llm_chain(client: TestClient, logged_in_headers):
response = client.get("api/v1/all", headers=logged_in_headers)
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
chain = chains["LLMChain"]
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {
"function",
"LLMChain",
"Chain",
}
template = chain["template"]
assert template["memory"] == {
"required": False,
"dynamic": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "memory",
"type": "BaseMemory",
"list": False,
"advanced": False,
"info": "",
}
assert template["verbose"] == {
"required": False,
"dynamic": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
"advanced": True,
"info": "",
}
assert template["llm"] == {
"required": True,
"dynamic": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
"advanced": False,
"info": "",
}
assert template["output_key"] == {
"required": True,
"dynamic": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": "text",
"password": False,
"name": "output_key",
"type": "str",
"list": False,
"advanced": True,
"info": "",
}
def test_llm_checker_chain(client: TestClient, logged_in_headers):
response = client.get("api/v1/all", headers=logged_in_headers)
assert response.status_code == 200
@ -182,7 +18,7 @@ def test_llm_checker_chain(client: TestClient, logged_in_headers):
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {
"function",
"Callable",
"LLMCheckerChain",
"Chain",
}
@ -216,7 +52,7 @@ def test_llm_math_chain(client: TestClient, logged_in_headers):
chain = chains["LLMMathChain"]
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {
"function",
"Callable",
"LLMMathChain",
"Chain",
}
@ -309,7 +145,7 @@ def test_series_character_chain(client: TestClient, logged_in_headers):
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {
"function",
"Callable",
"LLMChain",
"BaseCustomChain",
"Chain",

View file

@ -10,7 +10,6 @@ from langflow.interface.custom.base import CustomComponent
from langflow.interface.custom.component import (
Component,
ComponentCodeNullError,
ComponentFunctionEntrypointNameNullError,
)
from langflow.interface.custom.code_parser import CodeParser, CodeSyntaxError
@ -73,16 +72,16 @@ def test_component_init():
"""
Test the initialization of the Component class.
"""
component = Component(code=code_default, function_entrypoint_name="build")
component = Component(code=code_default, _function_entrypoint_name="build")
assert component.code == code_default
assert component.function_entrypoint_name == "build"
assert component._function_entrypoint_name == "build"
def test_component_get_code_tree():
"""
Test the get_code_tree method of the Component class.
"""
component = Component(code=code_default, function_entrypoint_name="build")
component = Component(code=code_default, _function_entrypoint_name="build")
tree = component.get_code_tree(component.code)
assert "imports" in tree
@ -92,19 +91,20 @@ def test_component_code_null_error():
Test the get_function method raises the
ComponentCodeNullError when the code is empty.
"""
component = Component(code="", function_entrypoint_name="")
component = Component(code="", _function_entrypoint_name="")
with pytest.raises(ComponentCodeNullError):
component.get_function()
def test_component_function_entrypoint_name_null_error():
"""
Test the get_function method raises the ComponentFunctionEntrypointNameNullError
when the function_entrypoint_name is empty.
"""
component = Component(code=code_default, function_entrypoint_name="")
with pytest.raises(ComponentFunctionEntrypointNameNullError):
component.get_function()
# TODO: Validate if we should remove this
# def test_component_function_entrypoint_name_null_error():
# """
# Test the get_function method raises the ComponentFunctionEntrypointNameNullError
# when the function_entrypoint_name is empty.
# """
# component = Component(code=code_default, _function_entrypoint_name="")
# with pytest.raises(ComponentFunctionEntrypointNameNullError):
# component.get_function()
def test_custom_component_init():
@ -212,7 +212,7 @@ def test_component_get_function_valid():
Test the get_function method of the Component
class with valid code and function_entrypoint_name.
"""
component = Component(code="def build(): pass", function_entrypoint_name="build")
component = Component(code="def build(): pass", _function_entrypoint_name="build")
my_function = component.get_function()
assert callable(my_function)
@ -382,7 +382,7 @@ def test_component_get_code_tree_syntax_error():
Test the get_code_tree method of the Component class
raises the CodeSyntaxError when given incorrect syntax.
"""
component = Component(code="import os as", function_entrypoint_name="build")
component = Component(code="import os as", _function_entrypoint_name="build")
with pytest.raises(CodeSyntaxError):
component.get_code_tree(component.code)

View file

@ -38,7 +38,7 @@ def test_create_flow(
assert response.json()["name"] == flow.name
assert response.json()["data"] == flow.data
# flow is optional so we can create a flow without a flow
flow = FlowCreate(name="Test Flow")
flow = FlowCreate(name="Test Flow", description="description")
response = client.post(
"api/v1/flows/", json=flow.dict(exclude_unset=True), headers=logged_in_headers
)

View file

@ -96,7 +96,7 @@ def test_prompt_template(client: TestClient, logged_in_headers):
"placeholder": "",
"show": False,
"multiline": False,
"value": True,
"value": False,
"password": False,
"name": "validate_template",
"type": "bool",

View file

@ -69,7 +69,7 @@ def test_build_template_from_function():
"ExampleClass1", type_to_loader_dict, add_function=True
)
assert result_with_function is not None
assert "function" in result_with_function["base_classes"]
assert "Callable" in result_with_function["base_classes"]
# Test with invalid name
with pytest.raises(ValueError, match=r".* not found"):
@ -237,7 +237,7 @@ def test_format_dict():
"password": False,
"multiline": False,
"options": CHAT_OPENAI_MODELS,
"value": "gpt-3.5-turbo-0613",
"value": "gpt-4-1106-preview",
},
}
assert format_dict(input_dict, "OpenAI") == expected_output_openai

View file

@ -212,9 +212,14 @@ def test_patch_user_wrong_id(client, active_user, logged_in_headers):
assert response.json() == {
"detail": [
{
"type": "uuid_parsing",
"loc": ["path", "user_id"],
"msg": "value is not a valid uuid",
"type": "type_error.uuid",
"msg": "Input should be a valid UUID, invalid character: expected an optional prefix of `urn:uuid:` followed by [0-9a-fA-F-], found `w` at 1", # noqa
"input": "wrong_id",
"ctx": {
"error": "invalid character: expected an optional prefix of `urn:uuid:` followed by [0-9a-fA-F-], found `w` at 1" # noqa
},
"url": "https://errors.pydantic.dev/2.4/v/uuid_parsing",
}
]
}
@ -234,9 +239,14 @@ def test_delete_user_wrong_id(client, test_user, super_user_headers):
assert response.json() == {
"detail": [
{
"type": "uuid_parsing",
"loc": ["path", "user_id"],
"msg": "value is not a valid uuid",
"type": "type_error.uuid",
"msg": "Input should be a valid UUID, invalid character: expected an optional prefix of `urn:uuid:` followed by [0-9a-fA-F-], found `w` at 1", # noqa
"input": "wrong_id",
"ctx": {
"error": "invalid character: expected an optional prefix of `urn:uuid:` followed by [0-9a-fA-F-], found `w` at 1" # noqa
},
"url": "https://errors.pydantic.dev/2.4/v/uuid_parsing",
}
]
}