diff --git a/src/backend/base/langflow/base/vectorstores/utils.py b/src/backend/base/langflow/base/vectorstores/utils.py index c2af08702..50ab3492a 100644 --- a/src/backend/base/langflow/base/vectorstores/utils.py +++ b/src/backend/base/langflow/base/vectorstores/utils.py @@ -18,7 +18,6 @@ def chroma_collection_to_data(collection_dict: dict): "text": doc, } if ("metadatas" in collection_dict) and collection_dict["metadatas"][i]: - for key, value in collection_dict["metadatas"][i].items(): - data_dict[key] = value + data_dict.update(collection_dict["metadatas"][i].items()) data.append(Data(**data_dict)) return data diff --git a/src/backend/base/langflow/components/deactivated/SplitText.py b/src/backend/base/langflow/components/deactivated/SplitText.py index b83aaa0d4..36157538e 100644 --- a/src/backend/base/langflow/components/deactivated/SplitText.py +++ b/src/backend/base/langflow/components/deactivated/SplitText.py @@ -45,18 +45,12 @@ class SplitTextComponent(Component): ] def _docs_to_data(self, docs): - data = [] - for doc in docs: - data.append(Data(text=doc.page_content, data=doc.metadata)) - return data + return [Data(text=doc.page_content, data=doc.metadata) for doc in docs] def split_text(self) -> list[Data]: separator = unescape_string(self.separator) - documents = [] - for _input in self.data_inputs: - if isinstance(_input, Data): - documents.append(_input.to_lc_document()) + documents = [_input.to_lc_document() for _input in self.data_inputs if isinstance(_input, Data)] splitter = CharacterTextSplitter( chunk_overlap=self.chunk_overlap, diff --git a/src/backend/base/langflow/components/helpers/CSVtoData.py b/src/backend/base/langflow/components/helpers/CSVtoData.py index 2fc1332af..61555f938 100644 --- a/src/backend/base/langflow/components/helpers/CSVtoData.py +++ b/src/backend/base/langflow/components/helpers/CSVtoData.py @@ -69,10 +69,8 @@ class CSVToDataComponent(Component): msg = "No CSV data provided." raise ValueError(msg) - result = [] csv_reader = csv.DictReader(io.StringIO(csv_data)) - for row in csv_reader: - result.append(Data(data=row)) + result = [Data(data=row) for row in csv_reader] if not result: self.status = "The CSV data is empty." diff --git a/src/backend/base/langflow/components/helpers/SplitText.py b/src/backend/base/langflow/components/helpers/SplitText.py index b83aaa0d4..36157538e 100644 --- a/src/backend/base/langflow/components/helpers/SplitText.py +++ b/src/backend/base/langflow/components/helpers/SplitText.py @@ -45,18 +45,12 @@ class SplitTextComponent(Component): ] def _docs_to_data(self, docs): - data = [] - for doc in docs: - data.append(Data(text=doc.page_content, data=doc.metadata)) - return data + return [Data(text=doc.page_content, data=doc.metadata) for doc in docs] def split_text(self) -> list[Data]: separator = unescape_string(self.separator) - documents = [] - for _input in self.data_inputs: - if isinstance(_input, Data): - documents.append(_input.to_lc_document()) + documents = [_input.to_lc_document() for _input in self.data_inputs if isinstance(_input, Data)] splitter = CharacterTextSplitter( chunk_overlap=self.chunk_overlap, diff --git a/src/backend/base/langflow/components/prompts/LangChainHubPrompt.py b/src/backend/base/langflow/components/prompts/LangChainHubPrompt.py index 5048abc04..6e8cae4ae 100644 --- a/src/backend/base/langflow/components/prompts/LangChainHubPrompt.py +++ b/src/backend/base/langflow/components/prompts/LangChainHubPrompt.py @@ -51,9 +51,7 @@ class LangChainHubPromptComponent(Component): template_messages = [HumanMessagePromptTemplate(prompt=template)] # Extract the messages from the prompt data - prompt_template = [] - for message_data in template_messages: - prompt_template.append(message_data.prompt) + prompt_template = [message_data.prompt for message_data in template_messages] # Regular expression to find all instances of {} pattern = r"\{(.*?)\}" @@ -77,7 +75,7 @@ class LangChainHubPromptComponent(Component): build_config["langchain_hub_prompt"]["info"] = full_template # Remove old parameter inputs if any - for key, _ in build_config.copy().items(): + for key in build_config.copy(): if key.startswith("param_"): del build_config[key] diff --git a/src/backend/base/langflow/components/tools/GleanSearchAPI.py b/src/backend/base/langflow/components/tools/GleanSearchAPI.py index 8408e8503..a20866f36 100644 --- a/src/backend/base/langflow/components/tools/GleanSearchAPI.py +++ b/src/backend/base/langflow/components/tools/GleanSearchAPI.py @@ -138,10 +138,7 @@ class GleanSearchAPIComponent(LCToolComponent): ) # Build the data - data = [] - for result in results: - data.append(Data(data=result, text=result["snippets"][0]["text"])) - + data = [Data(data=result, text=result["snippets"][0]["text"]) for result in results] self.status = data return data diff --git a/src/backend/base/langflow/components/tools/PythonCodeStructuredTool.py b/src/backend/base/langflow/components/tools/PythonCodeStructuredTool.py index c27632619..be5c7c237 100644 --- a/src/backend/base/langflow/components/tools/PythonCodeStructuredTool.py +++ b/src/backend/base/langflow/components/tools/PythonCodeStructuredTool.py @@ -292,13 +292,12 @@ class PythonCodeStructuredTool(LCToolComponent): return classes, functions def _find_imports(self, code: str) -> dotdict: - imports = [] + imports: list[str] = [] from_imports = [] parsed_code = ast.parse(code) for node in parsed_code.body: if isinstance(node, ast.Import): - for alias in node.names: - imports.append(alias.name) + imports.extend(alias.name for alias in node.names) elif isinstance(node, ast.ImportFrom): from_imports.append(node) return dotdict({"imports": imports, "from_imports": from_imports}) diff --git a/src/backend/base/langflow/components/tools/SearXNGTool.py b/src/backend/base/langflow/components/tools/SearXNGTool.py index a6a4bb3f0..94a34024b 100644 --- a/src/backend/base/langflow/components/tools/SearXNGTool.py +++ b/src/backend/base/langflow/components/tools/SearXNGTool.py @@ -70,9 +70,7 @@ class SearXNGToolComponent(LCToolComponent): for selected_category in build_config["categories"]["value"]: if selected_category not in build_config["categories"]["options"]: build_config["categories"]["value"].remove(selected_category) - languages = [] - for language in data["locales"]: - languages.append(language) + languages = list(data["locales"]) build_config["language"]["options"] = languages.copy() except Exception as e: self.status = f"Failed to extract names: {e}" @@ -107,11 +105,8 @@ class SearXNGToolComponent(LCToolComponent): }, ).json() - results = [] num_results = min(SearxSearch._max_results, len(response["results"])) - for i in range(num_results): - results.append(response["results"][i]) - return results + return [response["results"][i] for i in range(num_results)] except Exception as e: return [f"Failed to search: {e}"] diff --git a/src/backend/base/langflow/custom/code_parser/code_parser.py b/src/backend/base/langflow/custom/code_parser/code_parser.py index 6d3adea1c..c6802bc5d 100644 --- a/src/backend/base/langflow/custom/code_parser/code_parser.py +++ b/src/backend/base/langflow/custom/code_parser/code_parser.py @@ -389,8 +389,7 @@ class CodeParser: bases = [] for base in dunder_class.__bases__: bases.append(base) - for bases_base in base.__bases__: - bases.append(bases_base) + bases.extend(base.__bases__) return bases def parse_code(self) -> dict[str, Any]: diff --git a/src/backend/base/langflow/custom/custom_component/component.py b/src/backend/base/langflow/custom/custom_component/component.py index 75c2b3e79..8f4795ea3 100644 --- a/src/backend/base/langflow/custom/custom_component/component.py +++ b/src/backend/base/langflow/custom/custom_component/component.py @@ -404,11 +404,13 @@ class Component(CustomComponent): # Get the input object from the current component input_ = self._inputs[input_name] # Iterate over outputs to find matches based on types - for output in outputs: - for output_type in output.types: - # Check if the output type matches the input's accepted types - if input_.input_types and output_type in input_.input_types: - matching_pairs.append((output, input_)) + matching_pairs = [ + (output, input_) + for output in outputs + for output_type in output.types + # Check if the output type matches the input's accepted types + if input_.input_types and output_type in input_.input_types + ] # If multiple matches are found, raise an error indicating ambiguity if len(matching_pairs) > 1: matching_pairs_str = self._build_error_string_from_matching_pairs(matching_pairs) @@ -593,7 +595,7 @@ class Component(CustomComponent): #! works and then update this later field_config = self.get_template_config(self) frontend_node = ComponentFrontendNode.from_inputs(**field_config) - for key, _value in self._inputs.items(): + for key in self._inputs: frontend_node.set_field_load_from_db_in_template(key, False) self._map_parameters_on_frontend_node(frontend_node) diff --git a/src/backend/base/langflow/custom/utils.py b/src/backend/base/langflow/custom/utils.py index 979dc17c1..f2cb68bcc 100644 --- a/src/backend/base/langflow/custom/utils.py +++ b/src/backend/base/langflow/custom/utils.py @@ -64,9 +64,7 @@ def reorder_fields(frontend_node: CustomComponentFrontendNode, field_order: list field_dict = {field.name: field for field in frontend_node.template.fields} reordered_fields = [field_dict[name] for name in field_order if name in field_dict] # Add any fields that are not in the field_order list - for field in frontend_node.template.fields: - if field.name not in field_order: - reordered_fields.append(field) + reordered_fields.extend(field for field in frontend_node.template.fields if field.name not in field_order) frontend_node.template.fields = reordered_fields frontend_node.field_order = field_order diff --git a/src/backend/base/langflow/graph/graph/utils.py b/src/backend/base/langflow/graph/graph/utils.py index eb0a31859..29b743c01 100644 --- a/src/backend/base/langflow/graph/graph/utils.py +++ b/src/backend/base/langflow/graph/graph/utils.py @@ -118,7 +118,7 @@ def update_template(template, g_nodes): Returns: None """ - for _, value in template.items(): + for value in template.values(): if not value.get("proxy"): continue proxy_dict = value["proxy"] diff --git a/src/backend/base/langflow/helpers/flow.py b/src/backend/base/langflow/helpers/flow.py index eeb77a99f..964a553c2 100644 --- a/src/backend/base/langflow/helpers/flow.py +++ b/src/backend/base/langflow/helpers/flow.py @@ -228,11 +228,7 @@ def get_flow_inputs(graph: Graph) -> list[Vertex]: Returns: List[Data]: A list of input data, where each record contains the ID, name, and description of the input vertex. """ - inputs = [] - for vertex in graph.vertices: - if vertex.is_input: - inputs.append(vertex) - return inputs + return [vertex for vertex in graph.vertices if vertex.is_input] def build_schema_from_inputs(name: str, inputs: list[Vertex]) -> type[BaseModel]: diff --git a/src/backend/base/langflow/logging/logger.py b/src/backend/base/langflow/logging/logger.py index e534b68bc..dc8998855 100644 --- a/src/backend/base/langflow/logging/logger.py +++ b/src/backend/base/langflow/logging/logger.py @@ -98,10 +98,7 @@ class SizedLogBuffer: try: with self._wlock: as_list = list(self.buffer) - rc = {} - for ts, msg in as_list[-last_idx:]: - rc[ts] = msg - return rc + return dict(as_list[-last_idx:]) finally: self._rsemaphore.release() diff --git a/src/backend/base/langflow/memory.py b/src/backend/base/langflow/memory.py index e477e07b4..1244d82da 100644 --- a/src/backend/base/langflow/memory.py +++ b/src/backend/base/langflow/memory.py @@ -66,9 +66,7 @@ def add_messages(messages: Message | list[Message], flow_id: str | None = None): msg = f"The messages must be instances of Message. Found: {types}" raise ValueError(msg) - messages_models: list[MessageTable] = [] - for msg in messages: - messages_models.append(MessageTable.from_message(msg, flow_id=flow_id)) + messages_models = [MessageTable.from_message(msg, flow_id=flow_id) for msg in messages] with session_scope() as session: messages_models = add_messagetables(messages_models, session) return [Message(**message.model_dump()) for message in messages_models] diff --git a/src/backend/base/langflow/services/factory.py b/src/backend/base/langflow/services/factory.py index c4f03e766..84800ac11 100644 --- a/src/backend/base/langflow/services/factory.py +++ b/src/backend/base/langflow/services/factory.py @@ -74,10 +74,13 @@ def import_all_services_into_a_dict(): service_name = ServiceType(service_type).value.replace("_service", "") module_name = f"langflow.services.{service_name}.service" module = importlib.import_module(module_name) - for name, obj in inspect.getmembers(module, inspect.isclass): - if issubclass(obj, Service) and obj is not Service: - services[name] = obj - # break + services.update( + { + name: obj + for name, obj in inspect.getmembers(module, inspect.isclass) + if issubclass(obj, Service) and obj is not Service + } + ) except Exception as exc: logger.exception(exc) msg = "Could not initialize services. Please check your settings." diff --git a/src/backend/base/pyproject.toml b/src/backend/base/pyproject.toml index 56d57d10e..e375019d1 100644 --- a/src/backend/base/pyproject.toml +++ b/src/backend/base/pyproject.toml @@ -62,6 +62,7 @@ select = [ "LOG", "NPY", "PD", + "PERF", "PIE", "PT", "PTH", @@ -82,6 +83,7 @@ select = [ ignore = [ "COM812", # Messes with the formatter "ISC001", # Messes with the formatter + "PERF203", # Rarely useful "RUF006", # TODO (Store a reference to the return value of `asyncio.create_task`) "RUF012", # TODO (Mutable class attributes should be annotated with `typing.ClassVar`) ]