diff --git a/src/backend/langflow/api/v1/endpoints.py b/src/backend/langflow/api/v1/endpoints.py index af8772757..8e3f66805 100644 --- a/src/backend/langflow/api/v1/endpoints.py +++ b/src/backend/langflow/api/v1/endpoints.py @@ -1,5 +1,5 @@ from http import HTTPStatus -from typing import Optional +from typing import Annotated, Optional from langflow.cache.utils import save_uploaded_file from langflow.database.models.flow import Flow @@ -7,7 +7,7 @@ from langflow.processing.process import process_graph_cached, process_tweaks from langflow.utils.logger import logger from langflow.settings import settings -from fastapi import APIRouter, Depends, HTTPException, UploadFile +from fastapi import APIRouter, Depends, HTTPException, UploadFile, Body from langflow.interface.custom.custom_component import CustomComponent @@ -96,6 +96,7 @@ async def process_flow( flow_id: str, inputs: Optional[dict] = None, tweaks: Optional[dict] = None, + clear_cache: Annotated[bool, Body(embed=True)] = False, # noqa: F821 session: Session = Depends(get_session), ): """ @@ -115,7 +116,7 @@ async def process_flow( graph_data = process_tweaks(graph_data, tweaks) except Exception as exc: logger.error(f"Error processing tweaks: {exc}") - response = process_graph_cached(graph_data, inputs) + response = process_graph_cached(graph_data, inputs, clear_cache) return ProcessResponse( result=response, ) diff --git a/src/backend/langflow/processing/process.py b/src/backend/langflow/processing/process.py index 03e6e4c35..8cefb1f44 100644 --- a/src/backend/langflow/processing/process.py +++ b/src/backend/langflow/processing/process.py @@ -85,12 +85,17 @@ def get_input_str_if_only_one_input(inputs: dict) -> Optional[str]: return list(inputs.values())[0] if len(inputs) == 1 else None -def process_graph_cached(data_graph: Dict[str, Any], inputs: Optional[dict] = None): +def process_graph_cached( + data_graph: Dict[str, Any], inputs: Optional[dict] = None, clear_cache=False +): """ Process graph by extracting input variables and replacing ZeroShotPrompt with PromptTemplate,then run the graph and return the result and thought. """ # Load langchain object + if clear_cache: + build_sorted_vertices_with_caching.clear_cache() + logger.debug("Cleared cache") langchain_object, artifacts = build_sorted_vertices_with_caching(data_graph) logger.debug("Loaded LangChain object") if inputs is None: