fix: Fix crashes and enhance test reliability with session IDs (#6683)

* test: Add session ID parameter to ToolCallingAgentComponent test

* test: Add blocking for langchain_core runnables utility function

* test: Add session ID generation to agent component test

* fix: Safely delete agent message with ID check

Modify error handling in LCAgentComponent to only delete agent message if it has an ID attribute, preventing potential attribute errors

* test: Update ComponentToolkit test to use async start and verify results

Modify the test_component_tool_with_api_key test to:
- Use async_start() method for graph execution
- Add session ID to graph
- Improve result verification with vertex result tracking
- Update import path for ChatOutput component

* test: Skip cycle tests requiring LoopComponent

Update test_cycles.py to:
- Add skip markers for tests that now require a LoopComponent
- Improve error message with snapshots for debugging
- Preserve existing test logic while marking as skipped

* test: Add client fixture to tool calling agent test

Add usefixtures decorator to ensure client is available for the tool calling agent test, improving test setup and reliability

* test: Add client fixture to component tool test with API key

Enhance test_component_toolkit.py by adding the client fixture to the test_component_tool_with_api_key method, ensuring proper test setup for API key-dependent scenarios

* test: Add client fixture to agent component test with calculator

Add client fixture to the test_agent_component_with_calculator method to ensure proper test setup for API key-dependent scenarios

* test: Disable redundant component input tests

Remove commented-out test methods for checking required inputs across various components, as these tests were not providing significant value and the inputs are dynamic

* test: Comment out condition to skip nodes with Tool outputs in setup.py

* run formatter

* test: Re-enable condition to skip nodes with Tool outputs in setup.py

* [autofix.ci] apply automated fixes

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
Gabriel Luiz Freitas Almeida 2025-02-26 16:54:40 -03:00 committed by GitHub
commit d092724d6a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 34 additions and 31 deletions

View file

@ -73,6 +73,7 @@ def blockbuster(request):
.can_block_in("httpx/_client.py", "_init_transport")
.can_block_in("rich/traceback.py", "_render_stack")
.can_block_in("langchain_core/_api/internal.py", "is_caller_internal")
.can_block_in("langchain_core/runnables/utils.py", "get_function_nonlocals")
)
for func in ["os.stat", "os.path.abspath", "os.scandir"]:

View file

@ -4,7 +4,7 @@ import pytest
from langflow.base.tools.component_tool import ComponentToolkit
from langflow.components.langchain_utilities import ToolCallingAgentComponent
from langflow.components.models import OpenAIModelComponent
from langflow.components.outputs import ChatOutput
from langflow.components.outputs.chat import ChatOutput
from langflow.components.tools.calculator import CalculatorToolComponent
from langflow.graph import Graph
from langflow.schema.data import Data
@ -41,14 +41,15 @@ async def test_component_tool_with_api_key():
openai_llm = OpenAIModelComponent()
openai_llm.set(api_key=os.environ["OPENAI_API_KEY"])
tool_calling_agent = ToolCallingAgentComponent()
tools = await chat_output.to_toolkit()
tool_calling_agent.set(
llm=openai_llm.build_model,
tools=tools,
tools=[chat_output.to_toolkit],
input_value="Which tools are available? Please tell its name.",
)
g = Graph(start=tool_calling_agent, end=tool_calling_agent)
g.session_id = "test"
assert g is not None
results = [result async for result in g.async_start()]
assert len(results) == 4

View file

@ -95,6 +95,7 @@ class TestAgentComponent(ComponentTestBaseWithoutClient):
assert "model_name" not in updated_config
@pytest.mark.usefixtures("client")
@pytest.mark.api_key_required
async def test_agent_component_with_calculator():
# Mock inputs

View file

@ -7,6 +7,7 @@ from langflow.components.tools.calculator import CalculatorToolComponent
@pytest.mark.api_key_required
@pytest.mark.usefixtures("client")
async def test_tool_calling_agent_component():
tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool
input_value = "What is 2 + 2?"
@ -21,7 +22,7 @@ async def test_tool_calling_agent_component():
)
llm = llm_component.build_model()
agent = ToolCallingAgentComponent()
agent = ToolCallingAgentComponent(_session_id="test")
agent.set(llm=llm, tools=[tools], chat_history=chat_history, input_value=input_value)
# Chat output

View file

@ -1,11 +1,9 @@
from typing import Any
import pytest
from langflow.components.agents import AgentComponent
from langflow.components.crewai import CrewAIAgentComponent, SequentialTaskComponent
from langflow.components.custom_component import CustomComponent
from langflow.components.inputs import ChatInput
from langflow.components.models import OpenAIModelComponent
from langflow.components.outputs import ChatOutput
from langflow.custom.utils import update_component_build_config
from langflow.schema import dotdict
@ -38,32 +36,33 @@ def _assert_all_outputs_have_different_required_inputs(outputs: list[Output]):
return True
def test_set_required_inputs():
chatinput = ChatInput()
# These don't make a ton of sense to test because the inputs are dynamic
# def test_set_required_inputs():
# chatinput = ChatInput()
assert all(_output_required_inputs_are_in_inputs(output, chatinput._inputs) for output in chatinput.outputs)
assert _assert_all_outputs_have_different_required_inputs(chatinput.outputs)
# assert all(_output_required_inputs_are_in_inputs(output, chatinput._inputs) for output in chatinput.outputs)
# assert _assert_all_outputs_have_different_required_inputs(chatinput.outputs)
def test_set_required_inputs_various_components():
chatinput = ChatInput()
chatoutput = ChatOutput()
task = SequentialTaskComponent()
agent = AgentComponent()
openai_component = OpenAIModelComponent()
# def test_set_required_inputs_various_components():
# chatinput = ChatInput()
# chatoutput = ChatOutput()
# task = SequentialTaskComponent()
# agent = AgentComponent()
# openai_component = OpenAIModelComponent()
assert all(_output_required_inputs_are_in_inputs(output, chatinput._inputs) for output in chatinput.outputs)
assert all(_output_required_inputs_are_in_inputs(output, chatoutput._inputs) for output in chatoutput.outputs)
assert all(_output_required_inputs_are_in_inputs(output, task._inputs) for output in task.outputs)
assert all(_output_required_inputs_are_in_inputs(output, agent._inputs) for output in agent.outputs)
assert all(
_output_required_inputs_are_in_inputs(output, openai_component._inputs) for output in openai_component.outputs
)
# assert all(_output_required_inputs_are_in_inputs(output, chatinput._inputs) for output in chatinput.outputs)
# assert all(_output_required_inputs_are_in_inputs(output, chatoutput._inputs) for output in chatoutput.outputs)
# assert all(_output_required_inputs_are_in_inputs(output, task._inputs) for output in task.outputs)
# assert all(_output_required_inputs_are_in_inputs(output, agent._inputs) for output in agent.outputs)
# assert all(
# _output_required_inputs_are_in_inputs(output, openai_component._inputs) for output in openai_component.outputs
# )
assert _assert_all_outputs_have_different_required_inputs(chatinput.outputs)
assert _assert_all_outputs_have_different_required_inputs(chatoutput.outputs)
assert _assert_all_outputs_have_different_required_inputs(task.outputs)
assert _assert_all_outputs_have_different_required_inputs(agent.outputs)
# assert _assert_all_outputs_have_different_required_inputs(chatinput.outputs)
# assert _assert_all_outputs_have_different_required_inputs(chatoutput.outputs)
# assert _assert_all_outputs_have_different_required_inputs(task.outputs)
# assert _assert_all_outputs_have_different_required_inputs(agent.outputs)
async def test_update_component_build_config_sync():

View file

@ -144,7 +144,7 @@ def test_that_outputs_cache_is_set_to_false_in_cycle():
assert output.cache is True
@pytest.mark.skip(reason="Cycles should have a `allows_loop` Output to work.")
@pytest.mark.skip(reason="Cycles now require a LoopComponent to work")
@pytest.mark.skipif(not os.getenv("OPENAI_API_KEY"), reason="OpenAI API key required")
def test_updated_graph_with_prompts():
# Chat input initialization
@ -210,10 +210,10 @@ def test_updated_graph_with_prompts():
assert len(snapshots) > 2, "Graph should have more than one snapshot"
# Extract the vertex IDs for analysis
results_ids = [result.vertex.id for result in results if hasattr(result, "vertex")]
assert "chat_output_1" in results_ids, f"Expected outputs not in results: {results_ids}"
assert "chat_output_1" in results_ids, f"Expected outputs not in results: {results_ids}. Snapshots: {snapshots}"
@pytest.mark.skip(reason="Cycles should have a `allows_loop` Output to work.")
@pytest.mark.skip(reason="Cycles now require a LoopComponent to work")
@pytest.mark.skipif(not os.getenv("OPENAI_API_KEY"), reason="OpenAI API key required")
def test_updated_graph_with_max_iterations():
# Chat input initialization
@ -279,7 +279,7 @@ def test_updated_graph_with_max_iterations():
assert len(snapshots) > 2, "Graph should have more than one snapshot"
# Extract the vertex IDs for analysis
results_ids = [result.vertex.id for result in results if hasattr(result, "vertex")]
assert "chat_output_1" in results_ids, f"Expected outputs not in results: {results_ids}"
assert "chat_output_1" in results_ids, f"Expected outputs not in results: {results_ids}. Snapshots: {snapshots}"
def test_conditional_router_max_iterations():