test: fix output group preview test to be interactive (#2880)

* 🐛 (generalBugs-shard-5.spec.ts): fix test to wait for elements to be interactable before performing actions to prevent flakiness

* change temperature on canvas beside on component
This commit is contained in:
Cristhian Zanforlin Lousa 2024-07-22 17:50:07 -03:00 committed by GitHub
commit 7007c05a08
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 163 additions and 134 deletions

View file

@ -8,146 +8,145 @@ test("user must be able to freeze a path", async ({ page }) => {
"OPENAI_API_KEY required to run this test",
);
const codeOpenAI = `
import operator
from functools import reduce
// const codeOpenAI = `
// import operator
// from functools import reduce
from langchain_openai import ChatOpenAI
from pydantic.v1 import SecretStr
// from langchain_openai import ChatOpenAI
// from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.base.models.openai_constants import MODEL_NAMES
from langflow.field_typing import LanguageModel
from langflow.inputs import (
BoolInput,
DictInput,
DropdownInput,
FloatInput,
IntInput,
MessageInput,
SecretStrInput,
StrInput,
)
// from langflow.base.constants import STREAM_INFO_TEXT
// from langflow.base.models.model import LCModelComponent
// from langflow.base.models.openai_constants import MODEL_NAMES
// from langflow.field_typing import LanguageModel
// from langflow.inputs import (
// BoolInput,
// DictInput,
// DropdownInput,
// FloatInput,
// IntInput,
// MessageInput,
// SecretStrInput,
// StrInput,
// )
// class OpenAIModelComponent(LCModelComponent):
// display_name = "OpenAI"
// description = "Generates text using OpenAI LLMs."
// icon = "OpenAI"
// name = "OpenAIModel"
class OpenAIModelComponent(LCModelComponent):
display_name = "OpenAI"
description = "Generates text using OpenAI LLMs."
icon = "OpenAI"
name = "OpenAIModel"
// inputs = [
// MessageInput(name="input_value", display_name="Input"),
// IntInput(
// name="max_tokens",
// display_name="Max Tokens",
// advanced=True,
// info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
// ),
// DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True),
// BoolInput(
// name="json_mode",
// display_name="JSON Mode",
// advanced=True,
// info="If True, it will output JSON regardless of passing a schema.",
// ),
// DictInput(
// name="output_schema",
// is_list=True,
// display_name="Schema",
// advanced=True,
// info="The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.",
// ),
// DropdownInput(
// name="model_name", display_name="Model Name", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]
// ),
// StrInput(
// name="openai_api_base",
// display_name="OpenAI API Base",
// advanced=True,
// info="The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
// ),
// SecretStrInput(
// name="api_key",
// display_name="OpenAI API Key",
// info="The OpenAI API Key to use for the OpenAI model.",
// advanced=False,
// value="OPENAI_API_KEY",
// ),
// FloatInput(name="temperature", display_name="Temperature", value=0.1),
// BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True),
// StrInput(
// name="system_message",
// display_name="System Message",
// info="System message to pass to the model.",
// advanced=True,
// ),
// IntInput(
// name="seed",
// display_name="Seed",
// info="The seed controls the reproducibility of the job.",
// advanced=True,
// value=1,
// ),
// ]
inputs = [
MessageInput(name="input_value", display_name="Input"),
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
),
DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True),
BoolInput(
name="json_mode",
display_name="JSON Mode",
advanced=True,
info="If True, it will output JSON regardless of passing a schema.",
),
DictInput(
name="output_schema",
is_list=True,
display_name="Schema",
advanced=True,
info="The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.",
),
DropdownInput(
name="model_name", display_name="Model Name", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]
),
StrInput(
name="openai_api_base",
display_name="OpenAI API Base",
advanced=True,
info="The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
),
SecretStrInput(
name="api_key",
display_name="OpenAI API Key",
info="The OpenAI API Key to use for the OpenAI model.",
advanced=False,
value="OPENAI_API_KEY",
),
FloatInput(name="temperature", display_name="Temperature", value=0.1),
BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True),
StrInput(
name="system_message",
display_name="System Message",
info="System message to pass to the model.",
advanced=True,
),
IntInput(
name="seed",
display_name="Seed",
info="The seed controls the reproducibility of the job.",
advanced=True,
value=1,
),
]
// def build_model(self) -> LanguageModel: # type: ignore[type-var]
// # self.output_schema is a list of dictionaries
// # let's convert it to a dictionary
// output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})
// openai_api_key = self.api_key
// temperature = self.temperature
// model_name: str = self.model_name
// max_tokens = self.max_tokens
// model_kwargs = self.model_kwargs or {}
// openai_api_base = self.openai_api_base or "https://api.openai.com/v1"
// json_mode = bool(output_schema_dict) or self.json_mode
// seed = self.seed
def build_model(self) -> LanguageModel: # type: ignore[type-var]
# self.output_schema is a list of dictionaries
# let's convert it to a dictionary
output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})
openai_api_key = self.api_key
temperature = self.temperature
model_name: str = self.model_name
max_tokens = self.max_tokens
model_kwargs = self.model_kwargs or {}
openai_api_base = self.openai_api_base or "https://api.openai.com/v1"
json_mode = bool(output_schema_dict) or self.json_mode
seed = self.seed
// if openai_api_key:
// api_key = SecretStr(openai_api_key)
// else:
// api_key = None
// output = ChatOpenAI(
// max_tokens=max_tokens or None,
// model_kwargs=model_kwargs,
// model=model_name,
// base_url=openai_api_base,
// api_key=api_key,
// temperature=0.8,
// seed=seed,
// )
// if json_mode:
// if output_schema_dict:
// output = output.with_structured_output(schema=output_schema_dict, method="json_mode") # type: ignore
// else:
// output = output.bind(response_format={"type": "json_object"}) # type: ignore
if openai_api_key:
api_key = SecretStr(openai_api_key)
else:
api_key = None
output = ChatOpenAI(
max_tokens=max_tokens or None,
model_kwargs=model_kwargs,
model=model_name,
base_url=openai_api_base,
api_key=api_key,
temperature=0.8,
seed=seed,
)
if json_mode:
if output_schema_dict:
output = output.with_structured_output(schema=output_schema_dict, method="json_mode") # type: ignore
else:
output = output.bind(response_format={"type": "json_object"}) # type: ignore
// return output # type: ignore
return output # type: ignore
// def _get_exception_message(self, e: Exception):
// """
// Get a message from an OpenAI exception.
def _get_exception_message(self, e: Exception):
"""
Get a message from an OpenAI exception.
// Args:
// exception (Exception): The exception to get the message from.
Args:
exception (Exception): The exception to get the message from.
// Returns:
// str: The message from the exception.
// """
Returns:
str: The message from the exception.
"""
// try:
// from openai import BadRequestError
// except ImportError:
// return
// if isinstance(e, BadRequestError):
// message = e.body.get("message") # type: ignore
// if message:
// return message
// return
try:
from openai import BadRequestError
except ImportError:
return
if isinstance(e, BadRequestError):
message = e.body.get("message") # type: ignore
if message:
return message
return
`;
// `;
if (!process.env.CI) {
dotenv.config({ path: path.resolve(__dirname, "../../.env") });
@ -205,13 +204,17 @@ class OpenAIModelComponent(LCModelComponent):
await page.getByTestId("dropdown-model_name").click();
await page.getByTestId("gpt-4o-1-option").click();
await page.getByText("OpenAI").first().click();
// await page.getByText("OpenAI").first().click();
await page.getByTestId("code-button-modal").first().click();
// await page.getByTestId("code-button-modal").first().click();
await page.locator("textarea").press("Control+a");
await page.locator("textarea").fill(codeOpenAI);
await page.locator('//*[@id="checkAndSaveBtn"]').click();
// await page.locator("textarea").press("Control+a");
// await page.locator("textarea").fill(codeOpenAI);
// await page.locator('//*[@id="checkAndSaveBtn"]').click();
await page.waitForTimeout(2000);
await page.getByTestId("float-input").fill("1.0");
await page.waitForTimeout(2000);
@ -234,6 +237,10 @@ class OpenAIModelComponent(LCModelComponent):
await page.waitForTimeout(3000);
await page.getByTestId("float-input").fill("1.2");
await page.waitForTimeout(2000);
await page.getByTestId("button_run_chat output").click();
await page.waitForSelector("text=built successfully", { timeout: 30000 });

View file

@ -196,19 +196,26 @@ test("should be able to see output preview from grouped components", async ({
.getByTestId("popover-anchor-input-input_value")
.nth(0)
.fill(randomName);
await page.waitForTimeout(1000);
await page
.getByTestId("popover-anchor-input-input_value")
.nth(1)
.fill(secondRandomName);
await page.waitForTimeout(1000);
await page
.getByPlaceholder("Type something...", { exact: true })
.nth(6)
.fill(thirdRandomName);
await page.waitForTimeout(1000);
await page
.getByPlaceholder("Type something...", { exact: true })
.nth(3)
.fill("-");
await page.waitForTimeout(1000);
await page
.getByPlaceholder("Type something...", { exact: true })
.nth(4)
@ -229,9 +236,24 @@ test("should be able to see output preview from grouped components", async ({
await page.getByTestId("output-inspection-combined text").first(),
).not.toBeDisabled();
await page.getByTestId("output-inspection-combined text").first().click();
await page.waitForTimeout(1000);
await page.getByText("Component Output").isVisible();
const text = await page.getByPlaceholder("Empty").textContent();
expect(text).toBe(`${randomName}-${secondRandomName}-${thirdRandomName}`);
const permutations = [
`${randomName}-${secondRandomName}-${thirdRandomName}`,
`${randomName}-${thirdRandomName}-${secondRandomName}`,
`${thirdRandomName}-${randomName}-${secondRandomName}`,
`${thirdRandomName}-${secondRandomName}-${randomName}`,
`${secondRandomName}-${randomName}-${thirdRandomName}`,
`${secondRandomName}-${thirdRandomName}-${randomName}`,
];
const isPermutationIncluded = permutations.some((permutation) =>
text!.includes(permutation),
);
expect(isPermutationIncluded).toBe(true);
});