logs: suppress verbose warnings (#8003)

* suppress nvidia warnings about nim key

* Wrap import libs with the warnings handling

* Handle sigterm for docker exists

* makefile revert

* [autofix.ci] apply automated fixes

* ruff

* use sqlmodel paginate

* Try to update polling timeout

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
Jordan Frazier 2025-05-19 10:39:44 -07:00 committed by GitHub
commit fe9aa9ecb3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 72 additions and 18 deletions

View file

@ -15,7 +15,7 @@ from fastapi import APIRouter, Depends, File, HTTPException, Response, UploadFil
from fastapi.encoders import jsonable_encoder
from fastapi.responses import StreamingResponse
from fastapi_pagination import Page, Params
from fastapi_pagination.ext.sqlalchemy import apaginate
from fastapi_pagination.ext.sqlmodel import apaginate
from sqlmodel import and_, col, select
from sqlmodel.ext.asyncio.session import AsyncSession

View file

@ -59,7 +59,14 @@ def tools_from_package(your_package) -> None:
module_name = f"{package_name}.{module_info.name}"
# Dynamically import the module
module = importlib.import_module(module_name)
import warnings
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="Support for class-based `config` is deprecated", category=DeprecationWarning
)
warnings.filterwarnings("ignore", message="Valid config keys have changed in V2", category=UserWarning)
module = importlib.import_module(module_name)
# Iterate over all members of the module
for name, obj in inspect.getmembers(module, inspect.isclass):

View file

@ -276,9 +276,17 @@ class LCModelComponent(Component):
# Ensure component_inputs is a list of the expected types
if not isinstance(component_inputs, list):
component_inputs = []
models_module = importlib.import_module("langflow.components.models")
component_class = getattr(models_module, str(module_name))
component = component_class()
import warnings
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="Support for class-based `config` is deprecated", category=DeprecationWarning
)
warnings.filterwarnings("ignore", message="Valid config keys have changed in V2", category=UserWarning)
models_module = importlib.import_module("langflow.components.models")
component_class = getattr(models_module, str(module_name))
component = component_class()
return self.build_llm_model_from_inputs(component, component_inputs)
except Exception as e:

View file

@ -17,6 +17,10 @@ class NVIDIAModelComponent(LCModelComponent):
icon = "NVIDIA"
try:
import warnings
# Suppresses repeated warnings about NIM key in langchain_nvidia_ai_endpoints==0.3.8
warnings.filterwarnings("ignore", category=UserWarning, module="langchain_nvidia_ai_endpoints._common")
from langchain_nvidia_ai_endpoints import ChatNVIDIA
all_models = ChatNVIDIA().get_available_models()

View file

@ -492,7 +492,7 @@ async def load_starter_projects(retries=3, delay=1) -> list[tuple[anyio.Path, di
try:
project = orjson.loads(content)
starter_projects.append((file, project))
logger.info(f"Loaded starter project {file}")
logger.debug(f"Loaded starter project {file}")
break # Break if load is successful
except orjson.JSONDecodeError as e:
attempt += 1
@ -601,7 +601,7 @@ async def update_project_file(project_path: anyio.Path, project: dict, updated_p
project["data"] = updated_project_data
async with async_open(str(project_path), "w", encoding="utf-8") as f:
await f.write(orjson.dumps(project, option=ORJSON_OPTIONS).decode())
logger.info(f"Updated starter project {project['name']} file")
logger.debug(f"Updated starter project {project['name']} file")
def update_existing_project(

View file

@ -8,12 +8,26 @@ def import_module(module_path: str) -> Any:
"""Import module from module path."""
if "from" not in module_path:
# Import the module using the module path
return importlib.import_module(module_path)
import warnings
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="Support for class-based `config` is deprecated", category=DeprecationWarning
)
warnings.filterwarnings("ignore", message="Valid config keys have changed in V2", category=UserWarning)
return importlib.import_module(module_path)
# Split the module path into its components
_, module_path, _, object_name = module_path.split()
# Import the module using the module path
module = importlib.import_module(module_path)
import warnings
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="Support for class-based `config` is deprecated", category=DeprecationWarning
)
warnings.filterwarnings("ignore", message="Valid config keys have changed in V2", category=UserWarning)
module = importlib.import_module(module_path)
return getattr(module, object_name)

View file

@ -115,4 +115,4 @@ def set_langchain_cache(settings) -> None:
except ImportError:
logger.warning(f"Could not import {cache_type}. ")
else:
logger.info("No LLM cache set.")
logger.debug("No LLM cache set.")

View file

@ -11,6 +11,7 @@ from langflow.logging.logger import InterceptHandler
class LangflowUvicornWorker(UvicornWorker):
CONFIG_KWARGS = {"loop": "asyncio"}
_has_exited = False
def _install_sigint_handler(self) -> None:
"""Install a SIGQUIT handler on workers.
@ -20,6 +21,14 @@ class LangflowUvicornWorker(UvicornWorker):
"""
loop = asyncio.get_running_loop()
loop.add_signal_handler(signal.SIGINT, self.handle_exit, signal.SIGINT, None)
loop.add_signal_handler(signal.SIGTERM, self.handle_exit, signal.SIGTERM, None)
def handle_exit(self, sig, frame):
if not self._has_exited:
print("👋 See you next time!") # noqa: T201
self._has_exited = True
super().handle_exit(sig, frame)
async def _serve(self) -> None:
# We do this to not log the "Worker (pid:XXXXX) was sent SIGINT"

View file

@ -336,7 +336,7 @@ class DatabaseService(Service):
logger.exception(msg)
raise RuntimeError(msg) from exc
else:
logger.info("Alembic already initialized")
logger.info("Alembic initialized")
logger.info(f"Running DB migrations in {self.script_location}")

View file

@ -117,7 +117,7 @@ class JobQueueService(Service):
# Clean up each registered job queue.
for job_id in list(self._queues.keys()):
await self.cleanup_job(job_id)
logger.info("JobQueueService stopped: all job queues have been cleaned up.")
logger.debug("JobQueueService stopped: all job queues have been cleaned up.")
async def teardown(self) -> None:
await self.stop()
@ -225,7 +225,7 @@ class JobQueueService(Service):
logger.debug(f"No queue found for job_id {job_id} during cleanup.")
return
logger.info(f"Commencing cleanup for job_id {job_id}")
logger.debug(f"Commencing cleanup for job_id {job_id}")
main_queue, _event_manager, task, _ = self._queues[job_id]
# Cancel the associated task if it is still running.
@ -250,7 +250,7 @@ class JobQueueService(Service):
logger.debug(f"Removed {items_cleared} items from queue for job_id {job_id}")
# Remove the job entry from the registry
self._queues.pop(job_id, None)
logger.info(f"Cleanup successful for job_id {job_id}: resources have been released.")
logger.debug(f"Cleanup successful for job_id {job_id}: resources have been released.")
async def _periodic_cleanup(self) -> None:
"""Execute a periodic task that cleans up completed or cancelled job queues.

View file

@ -165,8 +165,15 @@ def get_default_factory(module: str, function: str):
pattern = r"<function (\w+)>"
if match := re.search(pattern, function):
imported_module = importlib.import_module(module)
return getattr(imported_module, match[1])()
import warnings
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="Support for class-based `config` is deprecated", category=DeprecationWarning
)
warnings.filterwarnings("ignore", message="Valid config keys have changed in V2", category=UserWarning)
imported_module = importlib.import_module(module)
return getattr(imported_module, match[1])()
return None

View file

@ -4,7 +4,6 @@ from unittest.mock import patch
import openai
import pytest
from langchain_nvidia_ai_endpoints import ChatNVIDIA
from langchain_openai import ChatOpenAI
from langflow.components.helpers.structured_output import StructuredOutputComponent
from langflow.helpers.base_model import build_model_from_schema
@ -415,6 +414,12 @@ class TestStructuredOutputComponent(ComponentTestBaseWithoutClient):
)
def test_with_real_nvidia_model_simple_schema(self):
# Create a real NVIDIA model
try:
from langchain_nvidia_ai_endpoints import ChatNVIDIA
except ImportError as e:
msg = "Please install langchain-nvidia-ai-endpoints to use the NVIDIA model."
raise ImportError(msg) from e
llm = ChatNVIDIA(model="meta/llama-3.2-3b-instruct", temperature=0, max_tokens=10)
# Create a component with a simple schema

View file

@ -164,7 +164,7 @@ async def test_build_flow_polling(client, json_memory_chatbot_no_llm, logged_in_
self.status_code = codes.OK
self.max_total_events = 50 # Limit to prevent infinite loops
self.max_empty_polls = 10 # Maximum number of empty polls before giving up
self.poll_timeout = 1.0 # Timeout for each polling request
self.poll_timeout = 3.0 # Timeout for each polling request
async def aiter_lines(self):
try: