fix: langwatch component initialization and some QoL (#8885)

* Fix langwatch component initialization and some QoL

* [autofix.ci] apply automated fixes

* Fix ruff and add unit tests

* [autofix.ci] apply automated fixes

* feat(langwatch): add utility for caching evaluators and refactor component to use it

* test(langwatch): add initial test file for LangWatchComponent and mock evaluator method

* fix(langwatch): use getattr for safer access to current_evaluator attribute

* test(langwatch): update cache clearing method to use utility function

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Gabriel Luiz Freitas Almeida <gabriel@langflow.org>
This commit is contained in:
Jordan Frazier 2025-07-07 06:42:33 -07:00 committed by GitHub
commit 64855b2f49
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 435 additions and 30 deletions

View file

@ -0,0 +1,17 @@
from functools import lru_cache
from typing import Any
import httpx
from loguru import logger
@lru_cache(maxsize=1)
def get_cached_evaluators(url: str) -> dict[str, Any]:
try:
response = httpx.get(url, timeout=10)
response.raise_for_status()
data = response.json()
return data.get("evaluators", {})
except httpx.RequestError as e:
logger.error(f"Error fetching evaluators: {e}")
return {}

View file

@ -5,6 +5,7 @@ from typing import Any
import httpx
from loguru import logger
from langflow.base.langwatch.utils import get_cached_evaluators
from langflow.custom.custom_component.component import Component
from langflow.inputs.inputs import MultilineInput
from langflow.io import (
@ -81,36 +82,24 @@ class LangWatchComponent(Component):
Output(name="evaluation_result", display_name="Evaluation Result", method="evaluate"),
]
def __init__(self, **data):
super().__init__(**data)
self.evaluators = self.get_evaluators()
self.dynamic_inputs = {}
self._code = data.get("_code", "")
self.current_evaluator = None
if self.evaluators:
self.current_evaluator = next(iter(self.evaluators))
def get_evaluators(self) -> dict[str, Any]:
url = f"{os.getenv('LANGWATCH_ENDPOINT', 'https://app.langwatch.ai')}/api/evaluations/list"
try:
response = httpx.get(url, timeout=10)
response.raise_for_status()
data = response.json()
return data.get("evaluators", {})
except httpx.RequestError as e:
self.status = f"Error fetching evaluators: {e}"
return {}
def set_evaluators(self, endpoint: str):
url = f"{endpoint}/api/evaluations/list"
self.evaluators = get_cached_evaluators(url)
if not self.evaluators or len(self.evaluators) == 0:
self.status = f"No evaluators found from {endpoint}"
msg = f"No evaluators found from {endpoint}"
raise ValueError(msg)
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:
try:
logger.info("Updating build config. Field name: %s, Field value: %s", field_name, field_value)
logger.info(f"Updating build config. Field name: {field_name}, Field value: {field_value}")
if field_name is None or field_name == "evaluator_name":
self.evaluators = self.get_evaluators()
self.evaluators = self.get_evaluators(os.getenv("LANGWATCH_ENDPOINT", "https://app.langwatch.ai"))
build_config["evaluator_name"]["options"] = list(self.evaluators.keys())
# Set a default evaluator if none is selected
if not self.current_evaluator and self.evaluators:
if not getattr(self, "current_evaluator", None) and self.evaluators:
self.current_evaluator = next(iter(self.evaluators))
build_config["evaluator_name"]["value"] = self.current_evaluator
@ -150,7 +139,7 @@ class LangWatchComponent(Component):
# Validate presence of default keys
missing_keys = [key for key in default_keys if key not in build_config]
if missing_keys:
logger.warning("Missing required keys in build_config: %s", missing_keys)
logger.warning(f"Missing required keys in build_config: {missing_keys}")
# Add missing keys with default values
for key in missing_keys:
build_config[key] = {"value": None, "type": "str"}
@ -158,14 +147,11 @@ class LangWatchComponent(Component):
# Ensure the current_evaluator is always set in the build_config
build_config["evaluator_name"]["value"] = self.current_evaluator
logger.info("Current evaluator set to: %s", self.current_evaluator)
return build_config
logger.info(f"Current evaluator set to: {self.current_evaluator}")
except (KeyError, AttributeError, ValueError) as e:
self.status = f"Error updating component: {e!s}"
return build_config
else:
return build_config
return build_config
def get_dynamic_inputs(self, evaluator: dict[str, Any]):
try:
@ -229,13 +215,18 @@ class LangWatchComponent(Component):
if not self.api_key:
return Data(data={"error": "API key is required"})
self.set_evaluators(os.getenv("LANGWATCH_ENDPOINT", "https://app.langwatch.ai"))
self.dynamic_inputs = {}
if getattr(self, "current_evaluator", None) is None and self.evaluators:
self.current_evaluator = next(iter(self.evaluators))
# Prioritize evaluator_name if it exists
evaluator_name = getattr(self, "evaluator_name", None) or self.current_evaluator
if not evaluator_name:
if self.evaluators:
evaluator_name = next(iter(self.evaluators))
logger.info("No evaluator was selected. Using default: %s", evaluator_name)
logger.info(f"No evaluator was selected. Using default: {evaluator_name}")
else:
return Data(
data={"error": "No evaluator selected and no evaluators available. Please choose an evaluator."}
@ -246,7 +237,7 @@ class LangWatchComponent(Component):
if not evaluator:
return Data(data={"error": f"Selected evaluator '{evaluator_name}' not found."})
logger.info("Evaluating with evaluator: %s", evaluator_name)
logger.info(f"Evaluating with evaluator: {evaluator_name}")
endpoint = f"/api/evaluations/{evaluator_name}/evaluate"
url = f"{os.getenv('LANGWATCH_ENDPOINT', 'https://app.langwatch.ai')}{endpoint}"

View file

@ -0,0 +1,397 @@
import json
import os
from unittest.mock import Mock, patch
import httpx
import pytest
import respx
from httpx import Response
from langflow.base.langwatch.utils import get_cached_evaluators
from langflow.components.langwatch.langwatch import LangWatchComponent
from langflow.schema.data import Data
from langflow.schema.dotdict import dotdict
from tests.base import ComponentTestBaseWithoutClient
class TestLangWatchComponent(ComponentTestBaseWithoutClient):
@pytest.fixture
def component_class(self):
"""Return the component class to test."""
return LangWatchComponent
@pytest.fixture
def default_kwargs(self):
"""Return the default kwargs for the component."""
return {
"evaluator_name": "test_evaluator",
"api_key": "test_api_key",
"input": "test input",
"output": "test output",
"expected_output": "expected output",
"contexts": "context1, context2",
"timeout": 30,
}
@pytest.fixture
def file_names_mapping(self):
"""Return an empty list since this component doesn't have version-specific files."""
return []
@pytest.fixture
def mock_evaluators(self):
"""Mock evaluators data."""
return {
"test_evaluator": {
"name": "test_evaluator",
"requiredFields": ["input", "output"],
"optionalFields": ["contexts"],
"settings": {
"temperature": {
"description": "Temperature setting",
"default": 0.7,
}
},
"settings_json_schema": {
"properties": {
"temperature": {
"type": "number",
"default": 0.7,
}
}
},
},
"boolean_evaluator": {
"name": "boolean_evaluator",
"requiredFields": ["input"],
"optionalFields": [],
"settings": {
"strict_mode": {
"description": "Strict mode setting",
"default": True,
}
},
"settings_json_schema": {
"properties": {
"strict_mode": {
"type": "boolean",
"default": True,
}
}
},
},
}
@pytest.fixture
async def component(self, component_class, default_kwargs, mock_evaluators):
"""Return a component instance."""
comp = component_class(**default_kwargs)
comp.evaluators = mock_evaluators
return comp
@pytest.fixture(autouse=True)
def clear_cache(self):
"""Clear the LRU cache before each test."""
get_cached_evaluators.cache_clear()
@patch("langflow.components.langwatch.langwatch.httpx.get")
async def test_set_evaluators_success(self, mock_get, component, mock_evaluators):
"""Test successful setting of evaluators."""
mock_response = Mock()
mock_response.json.return_value = {"evaluators": mock_evaluators}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
endpoint = "https://app.langwatch.ai"
component.set_evaluators(endpoint)
assert component.evaluators == mock_evaluators
@patch("langflow.components.langwatch.langwatch.httpx.get")
async def test_set_evaluators_empty_response(self, mock_get, component):
"""Test setting evaluators with empty response."""
mock_response = Mock()
mock_response.json.return_value = {"evaluators": {}}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
endpoint = "https://app.langwatch.ai"
with pytest.raises(ValueError, match="No evaluators found"):
component.set_evaluators(endpoint)
def test_get_dynamic_inputs(self, component, mock_evaluators):
"""Test dynamic input generation."""
evaluator = mock_evaluators["test_evaluator"]
dynamic_inputs = component.get_dynamic_inputs(evaluator)
# Should create inputs for contexts (from optionalFields)
assert "contexts" in dynamic_inputs
# Should create inputs for temperature (from settings)
assert "temperature" in dynamic_inputs
def test_get_dynamic_inputs_with_boolean_setting(self, component, mock_evaluators):
"""Test dynamic input generation with boolean settings."""
evaluator = mock_evaluators["boolean_evaluator"]
dynamic_inputs = component.get_dynamic_inputs(evaluator)
# Should create boolean input for strict_mode
assert "strict_mode" in dynamic_inputs
def test_get_dynamic_inputs_error_handling(self, component):
"""Test error handling in dynamic input generation."""
# Test with invalid evaluator data
invalid_evaluator = {"invalid": "data"}
result = component.get_dynamic_inputs(invalid_evaluator)
assert result == {}
@patch.dict(os.environ, {"LANGWATCH_ENDPOINT": "https://test.langwatch.ai"})
def test_update_build_config_basic(self, component, mock_evaluators):
"""Test basic build config update."""
build_config = dotdict(
{
"evaluator_name": {"options": [], "value": None},
"api_key": {"value": "test_key"},
"code": {"value": ""},
"_type": {"value": ""},
"input": {"value": ""},
"output": {"value": ""},
"timeout": {"value": 30},
}
)
# Mock the get_evaluators method (which doesn't exist, so create it)
def mock_get_evaluators(endpoint): # noqa: ARG001
return mock_evaluators
with patch.object(component, "get_evaluators", side_effect=mock_get_evaluators, create=True):
result = component.update_build_config(build_config, None, None)
# Should populate evaluator options
assert "test_evaluator" in result["evaluator_name"]["options"]
assert "boolean_evaluator" in result["evaluator_name"]["options"]
@patch.dict(os.environ, {"LANGWATCH_ENDPOINT": "https://test.langwatch.ai"})
def test_update_build_config_with_evaluator_selection(self, component, mock_evaluators):
"""Test build config update with evaluator selection."""
build_config = dotdict(
{
"evaluator_name": {"options": [], "value": None},
"api_key": {"value": "test_key"},
"code": {"value": ""},
"_type": {"value": ""},
"input": {"value": ""},
"output": {"value": ""},
"timeout": {"value": 30},
}
)
# Mock the get_evaluators method (which doesn't exist, so create it)
def mock_get_evaluators(endpoint): # noqa: ARG001
return mock_evaluators
with patch.object(component, "get_evaluators", side_effect=mock_get_evaluators, create=True):
# Initialize current_evaluator attribute
component.current_evaluator = None
result = component.update_build_config(build_config, "test_evaluator", "evaluator_name")
# Should set the selected evaluator
assert result["evaluator_name"]["value"] == "test_evaluator"
@patch("langflow.components.langwatch.langwatch.httpx.get")
@respx.mock
async def test_evaluate_success(self, mock_get, component, mock_evaluators):
"""Test successful evaluation."""
# Mock the evaluators HTTP request
mock_response = Mock()
mock_response.json.return_value = {"evaluators": mock_evaluators}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
# Mock the evaluation endpoint
eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
expected_response = {"score": 0.95, "reasoning": "Good evaluation"}
respx.post(eval_url).mock(return_value=Response(200, json=expected_response))
# Set up component
component.evaluator_name = "test_evaluator"
component.api_key = "test_api_key"
component.input = "test input"
component.output = "test output"
component.contexts = "context1, context2"
result = await component.evaluate()
assert isinstance(result, Data)
assert result.data == expected_response
@respx.mock
async def test_evaluate_no_api_key(self, component):
"""Test evaluation with missing API key."""
component.api_key = None
result = await component.evaluate()
assert isinstance(result, Data)
assert result.data["error"] == "API key is required"
async def test_evaluate_no_evaluators(self, component):
"""Test evaluation when no evaluators are available."""
component.api_key = "test_api_key"
component.evaluator_name = None
# Mock set_evaluators to avoid external HTTP calls
with patch.object(component, "set_evaluators"):
component.evaluators = {} # Set empty evaluators directly
component.current_evaluator = None # Initialize the attribute
result = await component.evaluate()
assert isinstance(result, Data)
assert "No evaluator selected" in result.data["error"]
@patch("langflow.components.langwatch.langwatch.httpx.get")
@respx.mock
async def test_evaluate_evaluator_not_found(self, mock_get, component, mock_evaluators):
"""Test evaluation with non-existent evaluator."""
# Mock evaluators HTTP request
mock_response = Mock()
mock_response.json.return_value = {"evaluators": mock_evaluators}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
component.api_key = "test_api_key"
component.evaluator_name = "non_existent_evaluator"
result = await component.evaluate()
assert isinstance(result, Data)
assert "Selected evaluator 'non_existent_evaluator' not found" in result.data["error"]
@patch("langflow.components.langwatch.langwatch.httpx.get")
@respx.mock
async def test_evaluate_http_error(self, mock_get, component, mock_evaluators):
"""Test evaluation with HTTP error."""
# Mock evaluators HTTP request
mock_response = Mock()
mock_response.json.return_value = {"evaluators": mock_evaluators}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
# Mock evaluation endpoint with error
eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
respx.post(eval_url).mock(side_effect=httpx.RequestError("Connection failed"))
component.api_key = "test_api_key"
component.evaluator_name = "test_evaluator"
component.input = "test input"
component.output = "test output"
result = await component.evaluate()
assert isinstance(result, Data)
assert "Evaluation error" in result.data["error"]
@patch("langflow.components.langwatch.langwatch.httpx.get")
@respx.mock
async def test_evaluate_with_tracing(self, mock_get, component, mock_evaluators):
"""Test evaluation with tracing service."""
# Mock evaluators HTTP request
mock_response = Mock()
mock_response.json.return_value = {"evaluators": mock_evaluators}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
# Mock evaluation endpoint
eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
expected_response = {"score": 0.95, "reasoning": "Good evaluation"}
# Set up request capture
request_data = None
def capture_request(request):
nonlocal request_data
request_data = json.loads(request.content.decode())
return Response(200, json=expected_response)
respx.post(eval_url).mock(side_effect=capture_request)
# Set up component with mock tracing
component.api_key = "test_api_key"
component.evaluator_name = "test_evaluator"
component.input = "test input"
component.output = "test output"
# Mock tracing service
mock_tracer = Mock()
mock_tracer.trace_id = "test_trace_id"
component._tracing_service = Mock()
component._tracing_service.get_tracer.return_value = mock_tracer
result = await component.evaluate()
# Verify trace_id was included in the request
assert request_data["settings"]["trace_id"] == "test_trace_id"
assert isinstance(result, Data)
assert result.data == expected_response
@patch("langflow.components.langwatch.langwatch.httpx.get")
@respx.mock
async def test_evaluate_with_contexts_parsing(self, mock_get, component, mock_evaluators):
"""Test evaluation with contexts parsing."""
# Mock evaluators HTTP request
mock_response = Mock()
mock_response.json.return_value = {"evaluators": mock_evaluators}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
# Mock evaluation endpoint
eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
expected_response = {"score": 0.95, "reasoning": "Good evaluation"}
# Set up request capture
request_data = None
def capture_request(request):
nonlocal request_data
request_data = json.loads(request.content.decode())
return Response(200, json=expected_response)
respx.post(eval_url).mock(side_effect=capture_request)
# Set up component
component.api_key = "test_api_key"
component.evaluator_name = "test_evaluator"
component.input = "test input"
component.output = "test output"
component.contexts = "context1, context2, context3"
result = await component.evaluate()
# Verify contexts were parsed correctly (contexts are split by comma, including whitespace)
assert request_data["data"]["contexts"] == ["context1", " context2", " context3"]
assert isinstance(result, Data)
assert result.data == expected_response
@patch("langflow.components.langwatch.langwatch.httpx.get")
@respx.mock
async def test_evaluate_timeout_handling(self, mock_get, component, mock_evaluators):
"""Test evaluation with timeout."""
# Mock evaluators HTTP request
mock_response = Mock()
mock_response.json.return_value = {"evaluators": mock_evaluators}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
# Mock evaluation endpoint with timeout
eval_url = "https://app.langwatch.ai/api/evaluations/test_evaluator/evaluate"
respx.post(eval_url).mock(side_effect=httpx.TimeoutException("Request timed out"))
component.api_key = "test_api_key"
component.evaluator_name = "test_evaluator"
component.input = "test input"
component.output = "test output"
component.timeout = 5
result = await component.evaluate()
assert isinstance(result, Data)
assert "Evaluation error" in result.data["error"]