🚀 feat(utils.py): add support for configurable LLM caching

This commit adds support for configurable LLM caching. The `setup_llm_caching` function now imports the cache class from the `langchain.cache` module based on the `settings.cache` value. If the import is successful, the `langchain.llm_cache` is set to an instance of the cache class. If the import fails, a warning is logged. If an exception is raised during the setup, a warning is logged with the error message.
This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-06-25 18:19:59 -03:00
commit 89c2e5b064

View file

@ -64,7 +64,16 @@ def extract_input_variables_from_prompt(prompt: str) -> list[str]:
def setup_llm_caching():
"""Setup LLM caching."""
import langchain
from langchain.cache import SQLiteCache
from langflow.settings import settings
from langflow.interface.importing.utils import import_class
logger.debug("Setting up LLM caching")
langchain.llm_cache = SQLiteCache()
try:
cache_class = import_class(f"langchain.cache.{settings.cache}")
logger.debug(f"Setting up LLM caching with {cache_class.__name__}")
langchain.llm_cache = cache_class()
logger.info(f"LLM caching setup with {cache_class.__name__}")
except ImportError:
logger.warning(f"Could not import {settings.cache}. ")
except Exception as exc:
logger.warning(f"Could not setup LLM caching. Error: {exc}")