parent
ebfce9d8ea
commit
5ee995e89d
18 changed files with 26 additions and 39 deletions
|
|
@ -261,8 +261,7 @@ def get_suggestion_message(outdated_components: list[str]) -> str:
|
|||
)
|
||||
components = ", ".join(outdated_components)
|
||||
return (
|
||||
f"The flow contains {count} outdated components. "
|
||||
f"We recommend updating the following components: {components}."
|
||||
f"The flow contains {count} outdated components. We recommend updating the following components: {components}."
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -511,8 +511,7 @@ async def process() -> None:
|
|||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="The /process endpoint is deprecated and will be removed in a future version. "
|
||||
"Please use /run instead.",
|
||||
detail="The /process endpoint is deprecated and will be removed in a future version. Please use /run instead.",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ def parse_context(curl_command):
|
|||
|
||||
for curl_header in parsed_args.headers:
|
||||
if curl_header.startswith(":"):
|
||||
occurrence = [m.start() for m in re.finditer(":", curl_header)]
|
||||
occurrence = [m.start() for m in re.finditer(r":", curl_header)]
|
||||
header_key, header_value = curl_header[: occurrence[1]], curl_header[occurrence[1] + 1 :]
|
||||
else:
|
||||
header_key, header_value = curl_header.split(":", 1)
|
||||
|
|
|
|||
|
|
@ -88,11 +88,7 @@ class BaseFileComponent(Component, ABC):
|
|||
text_preview = f"text_preview='{text_preview}'"
|
||||
else:
|
||||
text_preview = f"{len(self.data)} data objects"
|
||||
return (
|
||||
f"BaseFile(path={self.path}"
|
||||
f", delete_after_processing={self.delete_after_processing}"
|
||||
f", {text_preview}"
|
||||
)
|
||||
return f"BaseFile(path={self.path}, delete_after_processing={self.delete_after_processing}, {text_preview}"
|
||||
|
||||
# Subclasses can override these class variables
|
||||
VALID_EXTENSIONS: list[str] = [] # To be overridden by child classes
|
||||
|
|
|
|||
|
|
@ -233,7 +233,7 @@ class ComposioAPIComponent(LCToolComponent):
|
|||
build_config[field]["show"] = True
|
||||
build_config[field]["advanced"] = False
|
||||
|
||||
if field_name in {"app_names"} and hasattr(self, "api_key") and self.api_key != "":
|
||||
if field_name == "app_names" and hasattr(self, "api_key") and self.api_key != "":
|
||||
# app_name = self._get_normalized_app_name()
|
||||
app_name = self.app_names
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ class MergeDataComponent(Component):
|
|||
# Use the existing value if the key exists, otherwise use an empty string
|
||||
value = data_input.data.get(key, "")
|
||||
if key not in data_input.data:
|
||||
log_message = f"Key '{key}' missing in data input at index {idx}. " "Assigning empty string."
|
||||
log_message = f"Key '{key}' missing in data input at index {idx}. Assigning empty string."
|
||||
logger.debug(log_message)
|
||||
merged_data_dict[key] = value
|
||||
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ class StructuredOutputComponent(Component):
|
|||
"display_name": "Type",
|
||||
"type": "str",
|
||||
"description": (
|
||||
"Indicate the data type of the output field " "(e.g., str, int, float, bool, list, dict)."
|
||||
"Indicate the data type of the output field (e.g., str, int, float, bool, list, dict)."
|
||||
),
|
||||
"default": "text",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ class MergeDataComponent(Component):
|
|||
# Use the existing value if the key exists, otherwise use an empty string
|
||||
value = data_input.data.get(key, "")
|
||||
if key not in data_input.data:
|
||||
log_message = f"Key '{key}' missing in data input at index {idx}. " "Assigning empty string."
|
||||
log_message = f"Key '{key}' missing in data input at index {idx}. Assigning empty string."
|
||||
logger.debug(log_message)
|
||||
merged_data_dict[key] = value
|
||||
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ class UpdateDataComponent(Component):
|
|||
|
||||
if field_value_int > self.MAX_FIELDS:
|
||||
build_config["number_of_fields"]["value"] = self.MAX_FIELDS
|
||||
msg = f"Number of fields cannot exceed {self.MAX_FIELDS}. " "Try using a Component to combine two Data."
|
||||
msg = f"Number of fields cannot exceed {self.MAX_FIELDS}. Try using a Component to combine two Data."
|
||||
raise ValueError(msg)
|
||||
|
||||
existing_fields = {}
|
||||
|
|
@ -154,5 +154,5 @@ class UpdateDataComponent(Component):
|
|||
"""This function validates that the Text Key is one of the keys in the Data."""
|
||||
data_keys = data.data.keys()
|
||||
if self.text_key and self.text_key not in data_keys:
|
||||
msg = f"Text Key: '{self.text_key}' not found in the Data keys: " f"{', '.join(data_keys)}"
|
||||
msg = f"Text Key: '{self.text_key}' not found in the Data keys: {', '.join(data_keys)}"
|
||||
raise ValueError(msg)
|
||||
|
|
|
|||
|
|
@ -111,10 +111,10 @@ class AstraDBCQLToolComponent(LCToolComponent):
|
|||
elif self.static_filters[k] is not None:
|
||||
key.append(self.static_filters[k])
|
||||
|
||||
url = f'{astra_url}{"/".join(key)}?page-size={self.number_of_results}'
|
||||
url = f"{astra_url}{'/'.join(key)}?page-size={self.number_of_results}"
|
||||
|
||||
if self.projection_fields != "*":
|
||||
url += f'&fields={urllib.parse.quote(self.projection_fields.replace(" ", ""))}'
|
||||
url += f"&fields={urllib.parse.quote(self.projection_fields.replace(' ', ''))}"
|
||||
|
||||
res = requests.request("GET", url=url, headers=headers, timeout=10)
|
||||
|
||||
|
|
|
|||
|
|
@ -27,8 +27,7 @@ class YouTubeTranscriptsComponent(Component):
|
|||
display_name="Transcript Format",
|
||||
options=["text", "chunks"],
|
||||
value="text",
|
||||
info="The format of the transcripts. Either 'text' for a single output "
|
||||
"or 'chunks' for timestamped chunks.",
|
||||
info="The format of the transcripts. Either 'text' for a single output or 'chunks' for timestamped chunks.",
|
||||
advanced=True,
|
||||
),
|
||||
IntInput(
|
||||
|
|
@ -179,8 +178,7 @@ class YouTubeTranscriptsComponent(Component):
|
|||
],
|
||||
value="en",
|
||||
info=(
|
||||
"Specify to make sure the transcripts are retrieved in your desired language. "
|
||||
"Defaults to English: 'en'"
|
||||
"Specify to make sure the transcripts are retrieved in your desired language. Defaults to English: 'en'"
|
||||
),
|
||||
),
|
||||
DropdownInput(
|
||||
|
|
@ -188,7 +186,7 @@ class YouTubeTranscriptsComponent(Component):
|
|||
display_name="Translation Language",
|
||||
advanced=True,
|
||||
options=["", "en", "es", "fr", "de", "it", "pt", "ru", "ja", "ko", "hi", "ar", "id"],
|
||||
info="Translate the transcripts to the specified language. " "Leave empty for no translation.",
|
||||
info="Translate the transcripts to the specified language. Leave empty for no translation.",
|
||||
),
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -8,8 +8,7 @@ from langflow.schema import Data
|
|||
class UnstructuredComponent(BaseFileComponent):
|
||||
display_name = "Unstructured API"
|
||||
description = (
|
||||
"Uses Unstructured.io API to extract clean text from raw source documents. "
|
||||
"Supports a wide range of file types."
|
||||
"Uses Unstructured.io API to extract clean text from raw source documents. Supports a wide range of file types."
|
||||
)
|
||||
documentation = (
|
||||
"https://python.langchain.com/api_reference/unstructured/document_loaders/"
|
||||
|
|
|
|||
|
|
@ -625,10 +625,7 @@ class Component(CustomComponent):
|
|||
def _set_parameter_or_attribute(self, key, value) -> None:
|
||||
if isinstance(value, Component):
|
||||
methods = ", ".join([f"'{output.method}'" for output in value.outputs])
|
||||
msg = (
|
||||
f"You set {value.display_name} as value for `{key}`. "
|
||||
f"You should pass one of the following: {methods}"
|
||||
)
|
||||
msg = f"You set {value.display_name} as value for `{key}`. You should pass one of the following: {methods}"
|
||||
raise TypeError(msg)
|
||||
self._set_input_value(key, value)
|
||||
self._parameters[key] = value
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ class DirectoryReader:
|
|||
continue
|
||||
items.append({"name": menu["name"], "path": menu["path"], "components": components})
|
||||
filtered = [menu for menu in items if menu["components"]]
|
||||
logger.debug(f'Filtered components {"with errors" if with_errors else ""}: {len(filtered)}')
|
||||
logger.debug(f"Filtered components {'with errors' if with_errors else ''}: {len(filtered)}")
|
||||
return {"menu": filtered}
|
||||
|
||||
def validate_code(self, file_content) -> bool:
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ from langflow.settings import DEV
|
|||
VALID_LOG_LEVELS = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
||||
# Human-readable
|
||||
DEFAULT_LOG_FORMAT = (
|
||||
"<green>{time:YYYY-MM-DD HH:mm:ss}</green> - <level>" "{level: <8}</level> - {module} - <level>{message}</level>"
|
||||
"<green>{time:YYYY-MM-DD HH:mm:ss}</green> - <level>{level: <8}</level> - {module} - <level>{message}</level>"
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -74,13 +74,13 @@ class ArizePhoenixTracer(BaseTracer):
|
|||
|
||||
def setup_arize_phoenix(self) -> bool:
|
||||
"""Configures Arize/Phoenix specific environment variables and registers the tracer provider."""
|
||||
arize_phoenix_batch = os.getenv("ARIZE_PHOENIX_BATCH", "False").lower() in (
|
||||
arize_phoenix_batch = os.getenv("ARIZE_PHOENIX_BATCH", "False").lower() in {
|
||||
"true",
|
||||
"t",
|
||||
"yes",
|
||||
"y",
|
||||
"1",
|
||||
)
|
||||
}
|
||||
|
||||
# Arize Config
|
||||
arize_api_key = os.getenv("ARIZE_API_KEY", None)
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ class TestStructuredOutputComponent:
|
|||
"display_name": "Type",
|
||||
"type": "str",
|
||||
"description": (
|
||||
"Indicate the data type of the output field " "(e.g., str, int, float, bool, list, dict)."
|
||||
"Indicate the data type of the output field (e.g., str, int, float, bool, list, dict)."
|
||||
),
|
||||
},
|
||||
{
|
||||
|
|
@ -158,7 +158,7 @@ class TestStructuredOutputComponent:
|
|||
"display_name": "Type",
|
||||
"type": "str",
|
||||
"description": (
|
||||
"Indicate the data type of the output field " "(e.g., str, int, float, bool, list, dict)."
|
||||
"Indicate the data type of the output field (e.g., str, int, float, bool, list, dict)."
|
||||
),
|
||||
},
|
||||
{
|
||||
|
|
|
|||
|
|
@ -215,10 +215,9 @@ def test_vector_store_rag_add(ingestion_graph: Graph, rag_graph: Graph):
|
|||
f"Vertices mismatch: {len(ingestion_graph_copy.vertices)} "
|
||||
f"!= {len(ingestion_graph.vertices)} + {len(rag_graph.vertices)}"
|
||||
)
|
||||
assert len(ingestion_graph_copy.edges) == len(ingestion_graph.edges) + len(rag_graph.edges), (
|
||||
f"Edges mismatch: {len(ingestion_graph_copy.edges)} "
|
||||
f"!= {len(ingestion_graph.edges)} + {len(rag_graph.edges)}"
|
||||
)
|
||||
assert len(ingestion_graph_copy.edges) == len(ingestion_graph.edges) + len(
|
||||
rag_graph.edges
|
||||
), f"Edges mismatch: {len(ingestion_graph_copy.edges)} != {len(ingestion_graph.edges)} + {len(rag_graph.edges)}"
|
||||
|
||||
combined_graph_dump = ingestion_graph_copy.dump(
|
||||
name="Combined Graph", description="Graph for data ingestion and RAG", endpoint_name="combined"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue