feat: expose serialization truncation constants in /config route (#7316)

* test: validate truncation logic and response structure

* feat: expose serialization truncation constants in /config route

* chore: tidy up redundant import from module
This commit is contained in:
Ítalo Johnny 2025-03-31 11:46:40 -03:00 committed by GitHub
commit cac85c62b4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 85 additions and 2 deletions

View file

@ -748,8 +748,6 @@ async def custom_component_update(
@router.get("/config", response_model=ConfigResponse)
async def get_config():
try:
from langflow.services.deps import get_settings_service
settings_service: SettingsService = get_settings_service()
return {

View file

@ -17,6 +17,7 @@ from langflow.graph.schema import RunOutputs
from langflow.schema import dotdict
from langflow.schema.graph import Tweaks
from langflow.schema.schema import InputType, OutputType, OutputValue
from langflow.serialization import constants as serialization_constants
from langflow.serialization.constants import MAX_ITEMS_LENGTH, MAX_TEXT_LENGTH
from langflow.serialization.serialization import serialize
from langflow.services.database.models.api_key.model import ApiKeyRead
@ -378,6 +379,8 @@ class FlowDataRequest(BaseModel):
class ConfigResponse(BaseModel):
feature_flags: FeatureFlags
serialization_max_items_lenght: int = serialization_constants.MAX_ITEMS_LENGTH
serialization_max_text_length: int = serialization_constants.MAX_TEXT_LENGTH
frontend_timeout: int
auto_saving: bool
auto_saving_interval: int

View file

@ -0,0 +1,82 @@
import pytest
from langflow.api.v1.schemas import VertexBuildResponse
from langflow.serialization.constants import MAX_ITEMS_LENGTH
expected_keys_vertex_build_response = {
"id",
"inactivated_vertices",
"next_vertices_ids",
"top_level_vertices",
"valid",
"params",
"data",
"timestamp",
}
expected_keys_data = {
"results",
"outputs",
"logs",
"message",
"artifacts",
"timedelta",
"duration",
"used_frozen_result",
}
expected_keys_outputs = {"message", "type"}
def assert_vertex_response_structure(result):
assert set(result.keys()).issuperset(expected_keys_vertex_build_response)
assert set(result["data"].keys()).issuperset(expected_keys_data)
assert set(result["data"]["outputs"]["dataframe"].keys()).issuperset(expected_keys_outputs)
def test_vertex_response_structure_without_truncate():
message = [{"key": 1, "value": 1}]
output_value = {"message": message, "type": "bar"}
data = {
"data": {"outputs": {"dataframe": output_value}, "type": "foo"},
"valid": True,
}
result = VertexBuildResponse(**data).model_dump()
assert_vertex_response_structure(result)
assert len(result["data"]["outputs"]["dataframe"]["message"]) == len(message)
def test_vertex_response_structure_when_truncate_applies():
message = [{"key": i, "value": i} for i in range(MAX_ITEMS_LENGTH + 5000)]
output_value = {"message": message, "type": "bar"}
data = {
"data": {"outputs": {"dataframe": output_value}, "type": "foo"},
"valid": True,
}
result = VertexBuildResponse(**data).model_dump()
assert_vertex_response_structure(result)
assert len(result["data"]["outputs"]["dataframe"]["message"]) == MAX_ITEMS_LENGTH + 1
@pytest.mark.parametrize(
("size", "expected"),
[
(0, 0),
(42, 42),
(MAX_ITEMS_LENGTH, MAX_ITEMS_LENGTH),
(MAX_ITEMS_LENGTH + 1000, MAX_ITEMS_LENGTH + 1),
(MAX_ITEMS_LENGTH + 2000, MAX_ITEMS_LENGTH + 1),
(MAX_ITEMS_LENGTH + 3000, MAX_ITEMS_LENGTH + 1),
],
)
def test_vertex_response_truncation_behavior(size, expected):
message = [{"key": i, "value": i} for i in range(size)]
output_value = {"message": message, "type": "bar"}
data = {
"data": {"outputs": {"dataframe": output_value}, "type": "foo"},
"valid": True,
}
result = VertexBuildResponse(**data).model_dump()
assert len(result["data"]["outputs"]["dataframe"]["message"]) == expected