🐛 fix(endpoints.py): change import statement for typing module to import Annotated from typing module to fix type hinting error

 feat(endpoints.py): add support for clear_cache parameter in process_flow endpoint to allow clearing the cache before processing the flow
🐛 fix(process.py): add logic to clear cache if clear_cache parameter is True in process_graph_cached function to fix caching issue
This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-07-25 15:04:41 -03:00
commit a928005b7b
2 changed files with 10 additions and 4 deletions

View file

@ -1,5 +1,5 @@
from http import HTTPStatus
from typing import Optional
from typing import Annotated, Optional
from langflow.cache.utils import save_uploaded_file
from langflow.database.models.flow import Flow
@ -7,7 +7,7 @@ from langflow.processing.process import process_graph_cached, process_tweaks
from langflow.utils.logger import logger
from langflow.settings import settings
from fastapi import APIRouter, Depends, HTTPException, UploadFile
from fastapi import APIRouter, Depends, HTTPException, UploadFile, Body
from langflow.interface.custom.custom_component import CustomComponent
@ -96,6 +96,7 @@ async def process_flow(
flow_id: str,
inputs: Optional[dict] = None,
tweaks: Optional[dict] = None,
clear_cache: Annotated[bool, Body(embed=True)] = False, # noqa: F821
session: Session = Depends(get_session),
):
"""
@ -115,7 +116,7 @@ async def process_flow(
graph_data = process_tweaks(graph_data, tweaks)
except Exception as exc:
logger.error(f"Error processing tweaks: {exc}")
response = process_graph_cached(graph_data, inputs)
response = process_graph_cached(graph_data, inputs, clear_cache)
return ProcessResponse(
result=response,
)

View file

@ -85,12 +85,17 @@ def get_input_str_if_only_one_input(inputs: dict) -> Optional[str]:
return list(inputs.values())[0] if len(inputs) == 1 else None
def process_graph_cached(data_graph: Dict[str, Any], inputs: Optional[dict] = None):
def process_graph_cached(
data_graph: Dict[str, Any], inputs: Optional[dict] = None, clear_cache=False
):
"""
Process graph by extracting input variables and replacing ZeroShotPrompt
with PromptTemplate,then run the graph and return the result and thought.
"""
# Load langchain object
if clear_cache:
build_sorted_vertices_with_caching.clear_cache()
logger.debug("Cleared cache")
langchain_object, artifacts = build_sorted_vertices_with_caching(data_graph)
logger.debug("Loaded LangChain object")
if inputs is None: