🔧 chore(utils.py): add setup_llm_caching function to set up LLM caching

 feat(main.py): call setup_llm_caching function on app startup
The `setup_llm_caching` function is added to `utils.py` to set up LLM caching. The function is then called on app startup in `main.py` using the `app.on_event("startup")` method. This improves the performance of the application by caching LLM objects.
This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-06-25 09:12:41 -03:00
commit 29542f4cf8
2 changed files with 12 additions and 0 deletions

View file

@ -7,6 +7,7 @@ import re
import yaml
from langchain.base_language import BaseLanguageModel
from PIL.Image import Image
from langflow.utils.logger import logger
def load_file_into_dict(file_path: str) -> dict:
@ -58,3 +59,12 @@ def try_setting_streaming_options(langchain_object, websocket):
def extract_input_variables_from_prompt(prompt: str) -> list[str]:
"""Extract input variables from prompt."""
return re.findall(r"{(.*?)}", prompt)
def setup_llm_caching():
"""Setup LLM caching."""
import langchain
from langchain.cache import SQLiteCache
logger.debug("Setting up LLM caching")
langchain.llm_cache = SQLiteCache()

View file

@ -3,6 +3,7 @@ from fastapi.middleware.cors import CORSMiddleware
from langflow.api import router
from langflow.database.base import create_db_and_tables
from langflow.interface.utils import setup_llm_caching
def create_app():
@ -28,6 +29,7 @@ def create_app():
app.include_router(router)
app.on_event("startup")(create_db_and_tables)
app.on_event("startup")(setup_llm_caching)
return app