🚀 chore(server, tests): update API endpoint URLs to include version number

The API endpoint URLs have been updated to include the version number to improve the API's versioning and maintainability. The changes were made to the server.ts file and the tests that use the API endpoints.

🐛 fix(tests): update API endpoint paths in test files
The API endpoint paths in the test files were outdated and have been updated to reflect the current API version. This ensures that the tests are running against the correct endpoints and that the tests are up-to-date with the current API version.
This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-06-06 10:06:40 -03:00
commit 6b5539232f
9 changed files with 40 additions and 40 deletions

View file

@ -5,7 +5,7 @@ from langflow.settings import settings
# check that all agents are in settings.agents
# are in json_response["agents"]
def test_agents_settings(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
@ -13,7 +13,7 @@ def test_agents_settings(client: TestClient):
def test_zero_shot_agent(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
@ -52,7 +52,7 @@ def test_zero_shot_agent(client: TestClient):
def test_json_agent(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
@ -87,7 +87,7 @@ def test_json_agent(client: TestClient):
def test_csv_agent(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
@ -126,7 +126,7 @@ def test_csv_agent(client: TestClient):
def test_initialize_agent(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]

View file

@ -1,10 +1,10 @@
import json
from langflow.graph import Graph
from langflow.processing.process import load_or_build_langchain_object
import pytest
from langflow.interface.run import (
build_graph,
build_langchain_object_with_caching,
load_or_build_langchain_object,
)
@ -62,7 +62,7 @@ def test_build_langchain_object_with_caching(basic_data_graph):
# Test build_graph
def test_build_graph(basic_data_graph):
graph = build_graph(basic_data_graph)
graph = Graph.from_payload(basic_data_graph)
assert graph is not None
assert len(graph.nodes) == len(basic_data_graph["nodes"])
assert len(graph.edges) == len(basic_data_graph["edges"])

View file

@ -3,7 +3,7 @@ from langflow.settings import settings
def test_chains_settings(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -12,7 +12,7 @@ def test_chains_settings(client: TestClient):
# Test the ConversationChain object
def test_conversation_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -94,7 +94,7 @@ def test_conversation_chain(client: TestClient):
def test_llm_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -152,7 +152,7 @@ def test_llm_chain(client: TestClient):
def test_llm_checker_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -228,7 +228,7 @@ def test_llm_checker_chain(client: TestClient):
def test_llm_math_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -306,7 +306,7 @@ def test_llm_math_chain(client: TestClient):
def test_series_character_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -368,7 +368,7 @@ def test_series_character_chain(client: TestClient):
def test_mid_journey_prompt_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
@ -407,7 +407,7 @@ def test_mid_journey_prompt_chain(client: TestClient):
def test_time_travel_guide_chain(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]

View file

@ -4,7 +4,7 @@ from langflow.interface.tools.constants import CUSTOM_TOOLS
def test_get_all(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
# We need to test the custom nodes
@ -21,7 +21,7 @@ import math
def square(x):
return x ** 2
"""
response1 = client.post("/validate/code", json={"code": code1})
response1 = client.post("api/v1/validate/code", json={"code": code1})
assert response1.status_code == 200
assert response1.json() == {"imports": {"errors": []}, "function": {"errors": []}}
@ -32,7 +32,7 @@ import non_existent_module
def square(x):
return x ** 2
"""
response2 = client.post("/validate/code", json={"code": code2})
response2 = client.post("api/v1/validate/code", json={"code": code2})
assert response2.status_code == 200
assert response2.json() == {
"imports": {"errors": ["No module named 'non_existent_module'"]},
@ -46,7 +46,7 @@ import math
def square(x)
return x ** 2
"""
response3 = client.post("/validate/code", json={"code": code3})
response3 = client.post("api/v1/validate/code", json={"code": code3})
assert response3.status_code == 200
assert response3.json() == {
"imports": {"errors": []},
@ -54,11 +54,11 @@ def square(x)
}
# Test case with invalid JSON payload
response4 = client.post("/validate/code", json={"invalid_key": code1})
response4 = client.post("api/v1/validate/code", json={"invalid_key": code1})
assert response4.status_code == 422
# Test case with an empty code string
response5 = client.post("/validate/code", json={"code": ""})
response5 = client.post("api/v1/validate/code", json={"code": ""})
assert response5.status_code == 200
assert response5.json() == {"imports": {"errors": []}, "function": {"errors": []}}
@ -69,7 +69,7 @@ import math
def square(x)
return x ** 2
"""
response6 = client.post("/validate/code", json={"code": code6})
response6 = client.post("api/v1/validate/code", json={"code": code6})
assert response6.status_code == 200
assert response6.json() == {
"imports": {"errors": []},
@ -95,13 +95,13 @@ INVALID_PROMPT = "This is an invalid prompt without any input variable."
def test_valid_prompt(client: TestClient):
response = client.post("/validate/prompt", json={"template": VALID_PROMPT})
response = client.post("api/v1/validate/prompt", json={"template": VALID_PROMPT})
assert response.status_code == 200
assert response.json() == {"input_variables": ["product"]}
def test_invalid_prompt(client: TestClient):
response = client.post("/validate/prompt", json={"template": INVALID_PROMPT})
response = client.post("api/v1/validate/prompt", json={"template": INVALID_PROMPT})
assert response.status_code == 200
assert response.json() == {"input_variables": []}
@ -116,7 +116,7 @@ def test_invalid_prompt(client: TestClient):
],
)
def test_various_prompts(client, prompt, expected_input_variables):
response = client.post("/validate/prompt", json={"template": prompt})
response = client.post("api/v1/validate/prompt", json={"template": prompt})
assert response.status_code == 200
assert response.json() == {
"input_variables": expected_input_variables,

View file

@ -16,7 +16,7 @@ from langflow.graph.vertex.types import (
ToolVertex,
WrapperVertex,
)
from langflow.interface.run import get_result_and_thought
from langflow.processing.process import get_result_and_thought
from langflow.utils.payload import get_root_node
# Test cases for the graph module

View file

@ -3,7 +3,7 @@ from langflow.settings import settings
def test_llms_settings(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
llms = json_response["llms"]
@ -11,7 +11,7 @@ def test_llms_settings(client: TestClient):
# def test_hugging_face_hub(client: TestClient):
# response = client.get("/all")
# response = client.get("api/v1/all")
# assert response.status_code == 200
# json_response = response.json()
# language_models = json_response["llms"]
@ -103,7 +103,7 @@ def test_llms_settings(client: TestClient):
def test_openai(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
language_models = json_response["llms"]
@ -333,7 +333,7 @@ def test_openai(client: TestClient):
def test_chat_open_ai(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
language_models = json_response["llms"]

View file

@ -2,7 +2,7 @@ import json
import pytest
from langchain.chains.base import Chain
from langflow import load_flow_from_json
from langflow.processing.process import load_flow_from_json
from langflow.graph import Graph
from langflow.utils.payload import get_root_node

View file

@ -3,7 +3,7 @@ from langflow.settings import settings
def test_prompts_settings(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
@ -11,7 +11,7 @@ def test_prompts_settings(client: TestClient):
def test_prompt_template(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
@ -89,7 +89,7 @@ def test_prompt_template(client: TestClient):
def test_few_shot_prompt_template(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
@ -168,7 +168,7 @@ def test_few_shot_prompt_template(client: TestClient):
def test_zero_shot_prompt(client: TestClient):
response = client.get("/all")
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]

View file

@ -5,17 +5,17 @@ from fastapi.testclient import TestClient
def test_websocket_connection(client: TestClient):
with client.websocket_connect("/chat/test_client") as websocket:
with client.websocket_connect("api/v1/chat/test_client") as websocket:
assert websocket.scope["client"] == ["testclient", 50000]
assert websocket.scope["path"] == "/chat/test_client"
assert websocket.scope["path"] == "/api/v1/chat/test_client"
def test_chat_history(client: TestClient):
# Mock the process_graph function to return a specific value
with patch("langflow.api.chat_manager.process_graph") as mock_process_graph:
with patch("langflow.chat.manager.process_graph") as mock_process_graph:
mock_process_graph.return_value = ("Hello, I'm a mock response!", "")
with client.websocket_connect("/chat/test_client") as websocket:
with client.websocket_connect("api/v1/chat/test_client") as websocket:
# First message should be the history
history = websocket.receive_json()
assert history == [] # Empty history