🔧 fix(test_endpoints.py): fix import statement for TimeTravelGuideChainNode

🔧 fix(test_endpoints.py): update test_get_all to assert "PromptTemplate" instead of "ZeroShotPrompt"
🔧 fix(test_endpoints.py): update test_valid_prompt and test_invalid_prompt to use PROMPT_REQUEST variable
🔧 fix(test_endpoints.py): update test_various_prompts to use PROMPT_REQUEST variable
🔧 fix(test_prompts_template.py): remove test_zero_shot_prompt as it is no longer needed
The import statement for TimeTravelGuideChainNode is fixed to ensure the correct module is imported. The test_get_all function is updated to assert the presence of "PromptTemplate" instead of "ZeroShotPrompt" in the response. The test_valid_prompt, test_invalid_prompt, and test_various_prompts functions are updated to use the PROMPT_REQUEST variable for the request payload. The test_zero_shot_prompt function is removed as it is no longer needed.
This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-07-07 18:22:59 -03:00
commit 94d598b59c
2 changed files with 94 additions and 57 deletions

View file

@ -1,6 +1,86 @@
import pytest
from fastapi.testclient import TestClient
from langflow.interface.tools.constants import CUSTOM_TOOLS
from langflow.template.frontend_node.chains import TimeTravelGuideChainNode
PROMPT_REQUEST = {
"name": "string",
"template": "string",
"frontend_node": {
"template": {},
"description": "string",
"base_classes": ["string"],
"name": "",
"display_name": "",
"documentation": "",
"custom_fields": {},
"output_types": [],
"field_formatters": {
"formatters": {"openai_api_key": {}},
"base_formatters": {
"kwargs": {},
"optional": {},
"list": {},
"dict": {},
"union": {},
"multiline": {},
"show": {},
"password": {},
"default": {},
"headers": {},
"dict_code_file": {},
"model_fields": {
"MODEL_DICT": {
"OpenAI": [
"text-davinci-003",
"text-davinci-002",
"text-curie-001",
"text-babbage-001",
"text-ada-001",
],
"ChatOpenAI": [
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-16k",
"gpt-4-0613",
"gpt-4-32k-0613",
"gpt-4",
"gpt-4-32k",
],
"Anthropic": [
"claude-v1",
"claude-v1-100k",
"claude-instant-v1",
"claude-instant-v1-100k",
"claude-v1.3",
"claude-v1.3-100k",
"claude-v1.2",
"claude-v1.0",
"claude-instant-v1.1",
"claude-instant-v1.1-100k",
"claude-instant-v1.0",
],
"ChatAnthropic": [
"claude-v1",
"claude-v1-100k",
"claude-instant-v1",
"claude-instant-v1-100k",
"claude-v1.3",
"claude-v1.3-100k",
"claude-v1.2",
"claude-v1.0",
"claude-instant-v1.1",
"claude-instant-v1.1-100k",
"claude-instant-v1.0",
],
}
},
},
},
},
}
def test_get_all(client: TestClient):
@ -8,7 +88,7 @@ def test_get_all(client: TestClient):
assert response.status_code == 200
json_response = response.json()
# We need to test the custom nodes
assert "ZeroShotPrompt" in json_response["prompts"]
assert "PromptTemplate" in json_response["prompts"]
# All CUSTOM_TOOLS(dict) should be in the response
assert all(tool in json_response["tools"] for tool in CUSTOM_TOOLS.keys())
@ -95,15 +175,20 @@ INVALID_PROMPT = "This is an invalid prompt without any input variable."
def test_valid_prompt(client: TestClient):
response = client.post("api/v1/validate/prompt", json={"template": VALID_PROMPT})
PROMPT_REQUEST["template"] = VALID_PROMPT
response = client.post("api/v1/validate/prompt", json=PROMPT_REQUEST)
assert response.status_code == 200
assert response.json() == {"input_variables": ["product"]}
assert response.json()["input_variables"] == ["product"]
def test_invalid_prompt(client: TestClient):
response = client.post("api/v1/validate/prompt", json={"template": INVALID_PROMPT})
PROMPT_REQUEST["template"] = INVALID_PROMPT
response = client.post(
"api/v1/validate/prompt",
json=PROMPT_REQUEST,
)
assert response.status_code == 200
assert response.json() == {"input_variables": []}
assert response.json()["input_variables"] == []
@pytest.mark.parametrize(
@ -116,8 +201,8 @@ def test_invalid_prompt(client: TestClient):
],
)
def test_various_prompts(client, prompt, expected_input_variables):
response = client.post("api/v1/validate/prompt", json={"template": prompt})
TimeTravelGuideChainNode().to_dict()
PROMPT_REQUEST["template"] = prompt
response = client.post("api/v1/validate/prompt", json=PROMPT_REQUEST)
assert response.status_code == 200
assert response.json() == {
"input_variables": expected_input_variables,
}
assert response.json()["input_variables"] == expected_input_variables

View file

@ -92,51 +92,3 @@ def test_prompt_template(client: TestClient):
"advanced": False,
"info": "",
}
def test_zero_shot_prompt(client: TestClient):
response = client.get("api/v1/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
prompt = prompts["ZeroShotPrompt"]
template = prompt["template"]
assert template["prefix"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": True,
"value": "Answer the following questions as best you can. You have access to the following tools:", # noqa: E501
"password": False,
"name": "prefix",
"type": "prompt",
"list": False,
"advanced": False,
"info": "",
}
assert template["suffix"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": True,
"value": "Begin!\n\nQuestion: {input}\nThought:{agent_scratchpad}",
"password": False,
"name": "suffix",
"type": "prompt",
"list": False,
"advanced": False,
"info": "",
}
assert template["format_instructions"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": True,
"value": "Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question", # noqa: E501
"password": False,
"name": "format_instructions",
"type": "prompt",
"list": False,
"advanced": False,
"info": "",
}