refactor(langflow): add verbose and fix_memory_inputs to langchain_object

Add verbose and fix_memory_inputs to langchain_object in load_flow_from_json function. The verbose attribute is set to True if it exists in the langchain_object. The fix_memory_inputs function is called on the langchain_object.
This commit is contained in:
Gabriel Almeida 2023-04-25 23:12:44 -03:00
commit aacfa378f3
2 changed files with 15 additions and 1 deletions

View file

@ -55,6 +55,7 @@ class CacheManager(Subject):
super().__init__()
self.CACHE = {}
self.current_client_id = None
self.current_cache = {}
@contextmanager
def set_client_id(self, client_id: str):

View file

@ -20,6 +20,7 @@ from langchain.llms.loading import load_llm_from_config
from langflow.interface.agents.custom import CUSTOM_AGENTS
from langflow.interface.importing.utils import import_by_type
from langflow.interface.run import fix_memory_inputs
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.types import get_type_list
from langflow.interface.utils import load_file_into_dict
@ -106,7 +107,19 @@ def load_flow_from_json(path: str, build=True):
# Nodes, edges and root node
edges = data_graph["edges"]
graph = Graph(nodes, edges)
return graph.build() if build else graph
if build:
langchain_object = graph.build()
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
return langchain_object
return graph
def replace_zero_shot_prompt_with_prompt_template(nodes):