diff --git a/src/backend/langflow/utils/graph.py b/src/backend/langflow/utils/graph.py index 55d9bc0d3..f7df6cc7a 100644 --- a/src/backend/langflow/utils/graph.py +++ b/src/backend/langflow/utils/graph.py @@ -10,6 +10,26 @@ class Node: def _parse_data(self) -> None: self.data = self._data["data"] + # Data dict: + # {'type': 'LLMChain', 'node': {'template': {'_type': 'llm_chain', 'memory': {'type': 'BaseMemory', 'required': False, 'placeholder': '', 'list': False, 'show': True, 'password': False, 'multiline': False, 'value': None}, 'verbose': {'type': 'bool', 'required': False, 'placeholder': '', 'list': False, 'show': False, 'password': False, 'multiline': False, 'value': False}, 'prompt': {'type': 'BasePromptTemplate', 'required': True, 'placeholder': '', 'list': False, 'show': True, 'password': False, 'multiline': False}, 'llm': {'type': 'BaseLanguageModel', 'required': True, 'placeholder': '', 'list': False, 'show': True, 'password': False, 'multiline': False}, 'output_key': {'type': 'str', 'required': False, 'placeholder': '', 'list': False, 'show': False, 'password': True, 'multiline': False, 'value': 'text'}}, 'description': 'Chain to run queries against LLMs.', 'base_classes': ['Chain']}, 'id': 'dndnode_1', 'value': None} + # base_classes are the classes that the node can be cast to + self.output = self.data["node"]["base_classes"] + template_dict = { + key: value + for key, value in self.data["node"]["template"].items() + if isinstance(value, dict) + } + + self.required_inputs = [ + template_dict[key]["type"] + for key, value in template_dict.items() + if value["required"] + ] + self.optional_inputs = [ + template_dict[key]["type"] + for key, value in template_dict.items() + if not value["required"] + ] def add_edge(self, edge: "Edge") -> None: self.edges.append(edge) @@ -28,9 +48,23 @@ class Edge: def __init__(self, source: "Node", target: "Node"): self.source: "Node" = source self.target: "Node" = target + self.validate_edge() + + def validate_edge(self) -> None: + # Validate that the outputs of the source node are valid inputs for the target node + self.coming_out = self.source.output + self.going_in = self.target.required_inputs + self.target.optional_inputs + # Both lists contain strings and sometimes a string contains the value we are looking for + # e.g. comgin_out=["Chain"] and going_in=["LLMChain"] + # so we need to check if any of the strings in coming_out is in going_in + self.valid = any( + output in going_in + for output in self.coming_out + for going_in in self.going_in + ) def __repr__(self) -> str: - return f"Edge(source={self.source.id}, target={self.target.id})" + return f"Edge(source={self.source.id}, target={self.target.id}, valid={self.valid}, coming_out={self.coming_out}, going_in={self.going_in})" class Graph: @@ -92,3 +126,12 @@ class Graph: def _build_nodes(self) -> List[Node]: return [Node(node) for node in self._nodes] + + def get_children_by_module_type(self, node: Node, module_type: str) -> List[Node]: + children = [] + module_types = [node.data["type"]] + if "node" in node.data: + module_types += node.data["node"]["base_classes"] + if module_type in module_types: + children.append(node) + return children diff --git a/src/backend/langflow/utils/payload.py b/src/backend/langflow/utils/payload.py index 902dd9af9..6ee7152ce 100644 --- a/src/backend/langflow/utils/payload.py +++ b/src/backend/langflow/utils/payload.py @@ -72,11 +72,10 @@ def build_json(root: Node, graph: Graph) -> Dict: # Otherwise, recursively build the child nodes children = [] for local_node in local_nodes: - module_types = [local_node.data["type"]] - if "node" in local_node.data: - module_types += local_node.data["node"]["base_classes"] - if module_type in module_types: - children.append(local_node) + node_children = graph.get_children_by_module_type( + local_node, module_type + ) + children.extend(node_children) if value["required"] and not children: raise ValueError(f"No child with type {module_type} found") diff --git a/tests/data/basic_example.json b/tests/data/basic_example.json index de3893b00..369272a6f 100644 --- a/tests/data/basic_example.json +++ b/tests/data/basic_example.json @@ -1,15 +1,16 @@ { - "name": "New Flow 9", - "id": "1", + "name": "New Flow ", + "id": "0", "data": { - "nodes": [{ + "nodes": [ + { "width": 384, "height": 391, - "id": "dndnode_61", + "id": "dndnode_1", "type": "genericNode", "position": { - "x": 764, - "y": 382 + "x": 644, + "y": 348 }, "data": { "type": "LLMChain", @@ -17,7 +18,7 @@ "template": { "_type": "llm_chain", "memory": { - "type": "Memory", + "type": "BaseMemory", "required": false, "placeholder": "", "list": false, @@ -34,7 +35,7 @@ "show": false, "password": false, "multiline": false, - "value": true + "value": false }, "prompt": { "type": "BasePromptTemplate", @@ -46,7 +47,7 @@ "multiline": false }, "llm": { - "type": "BaseLLM", + "type": "BaseLanguageModel", "required": true, "placeholder": "", "list": false, @@ -70,24 +71,22 @@ "Chain" ] }, - "id": "dndnode_61", + "id": "dndnode_1", "value": null }, - "selected": false, "positionAbsolute": { - "x": 764, - "y": 382 - }, - "dragging": false + "x": 644, + "y": 348 + } }, { "width": 384, "height": 351, - "id": "dndnode_62", + "id": "dndnode_4", "type": "genericNode", "position": { - "x": 1488, - "y": 550 + "x": 1236, + "y": 39.59999999999957 }, "data": { "type": "ZeroShotAgent", @@ -131,22 +130,22 @@ "Agent" ] }, - "id": "dndnode_62", + "id": "dndnode_4", "value": null }, "positionAbsolute": { - "x": 1488, - "y": 550 + "x": 1236, + "y": 39.59999999999957 } }, { "width": 384, "height": 529, - "id": "dndnode_63", + "id": "dndnode_5", "type": "genericNode", "position": { - "x": 206, - "y": 210 + "x": 96, + "y": -299.2000000000003 }, "data": { "type": "ZeroShotPrompt", @@ -186,24 +185,24 @@ "BasePromptTemplate" ] }, - "id": "dndnode_63", + "id": "dndnode_5", "value": null }, "selected": false, "positionAbsolute": { - "x": 206, - "y": 210 + "x": 96, + "y": -299.2000000000003 }, "dragging": false }, { "width": 384, - "height": 501, - "id": "dndnode_64", + "height": 477, + "id": "dndnode_8", "type": "genericNode", "position": { - "x": 210, - "y": 818 + "x": 75, + "y": 328 }, "data": { "type": "OpenAI", @@ -228,7 +227,7 @@ "show": false, "password": false, "multiline": false, - "value": true + "value": null }, "client": { "type": "Any", @@ -250,7 +249,6 @@ "multiline": false, "value": "text-davinci-003", "options": [ - "gpt-3.5-turbo", "text-davinci-003", "text-davinci-002" ] @@ -343,7 +341,7 @@ "show": true, "password": true, "multiline": false, - "value": "---" + "value": "sk-" }, "batch_size": { "type": "int", @@ -399,27 +397,28 @@ "description": "Generic OpenAI class that uses model name.", "base_classes": [ "BaseOpenAI", - "BaseLLM" + "BaseLLM", + "BaseLanguageModel" ] }, - "id": "dndnode_64", + "id": "dndnode_8", "value": null }, "selected": false, "positionAbsolute": { - "x": 210, - "y": 818 + "x": 75, + "y": 328 }, "dragging": false }, { "width": 384, "height": 397, - "id": "dndnode_65", + "id": "dndnode_9", "type": "genericNode", "position": { - "x": 776, - "y": 922 + "x": 643, + "y": 824 }, "data": { "type": "Serper Search", @@ -427,7 +426,7 @@ "template": { "serper_api_key": { "type": "str", - "required": false, + "required": true, "list": false, "show": true, "placeholder": "", @@ -441,101 +440,66 @@ "Tool" ] }, - "id": "dndnode_65", + "id": "dndnode_9", "value": null }, - "selected": false, + "selected": true, "positionAbsolute": { - "x": 776, - "y": 922 + "x": 643, + "y": 824 }, "dragging": false } ], - "edges": [{ - "source": "dndnode_63", - "sourceHandle": "ZeroShotPrompt|dndnode_63|BasePromptTemplate", - "target": "dndnode_61", - "targetHandle": "BasePromptTemplate|prompt|dndnode_61", + "edges": [ + { + "source": "dndnode_5", + "sourceHandle": "ZeroShotPrompt|dndnode_5|BasePromptTemplate", + "target": "dndnode_1", + "targetHandle": "BasePromptTemplate|prompt|dndnode_1", "className": "animate-pulse", - "id": "reactflow__edge-dndnode_63ZeroShotPrompt|dndnode_63|BasePromptTemplate-dndnode_61BasePromptTemplate|prompt|dndnode_61" + "id": "reactflow__edge-dndnode_5ZeroShotPrompt|dndnode_5|BasePromptTemplate-dndnode_1BasePromptTemplate|prompt|dndnode_1" }, { - "source": "dndnode_64", - "sourceHandle": "OpenAI|dndnode_64|BaseOpenAI,|BaseLLM", - "target": "dndnode_61", - "targetHandle": "BaseLLM|llm|dndnode_61", + "source": "dndnode_1", + "sourceHandle": "LLMChain|dndnode_1|Chain", + "target": "dndnode_4", + "targetHandle": "LLMChain|llm_chain|dndnode_4", "className": "animate-pulse", - "id": "reactflow__edge-dndnode_64OpenAI|dndnode_64|BaseOpenAI,|BaseLLM-dndnode_61BaseLLM|llm|dndnode_61" + "id": "reactflow__edge-dndnode_1LLMChain|dndnode_1|Chain-dndnode_4LLMChain|llm_chain|dndnode_4" }, { - "source": "dndnode_65", - "sourceHandle": "Serper Search|dndnode_65|Tool", - "target": "dndnode_62", - "targetHandle": "Tool|allowed_tools|dndnode_62", + "source": "dndnode_8", + "sourceHandle": "OpenAI|dndnode_8|BaseOpenAI|BaseLLM|BaseLanguageModel", + "target": "dndnode_1", + "targetHandle": "BaseLanguageModel|llm|dndnode_1", "className": "animate-pulse", - "id": "reactflow__edge-dndnode_65Serper Search|dndnode_65|Tool-dndnode_62Tool|allowed_tools|dndnode_62" + "id": "reactflow__edge-dndnode_8OpenAI|dndnode_8|BaseOpenAI|BaseLLM|BaseLanguageModel-dndnode_1BaseLanguageModel|llm|dndnode_1" }, { - "source": "dndnode_61", - "sourceHandle": "LLMChain|dndnode_61|Chain", - "target": "dndnode_62", - "targetHandle": "LLMChain|llm_chain|dndnode_62", + "source": "dndnode_9", + "sourceHandle": "Serper Search|dndnode_9|Tool", + "target": "dndnode_4", + "targetHandle": "Tool|allowed_tools|dndnode_4", "className": "animate-pulse", - "id": "reactflow__edge-dndnode_61LLMChain|dndnode_61|Chain-dndnode_62LLMChain|llm_chain|dndnode_62" + "id": "reactflow__edge-dndnode_9Serper Search|dndnode_9|Tool-dndnode_4Tool|allowed_tools|dndnode_4" } ], "viewport": { - "x": -103, - "y": -135, - "zoom": 1 + "x": 22.5, + "y": 42, + "zoom": 0.5 } }, - "chat": [{ - "message": "What is the new llm Prismer?", + "chat": [ + { + "message": "Langflow. What is it?", "isSend": true }, { - "message": "What is the prismer llm?", - "isSend": true - }, - { - "message": "What is the prismer llm?", - "isSend": true - }, - { - "message": "What is the prismer llm?", - "isSend": true - }, - { - "message": "What is the prismer llm?", - "isSend": true - }, - { - "message": "What is the prismer llm?", - "isSend": true - }, - { - "message": "What is the prismer llm?", - "isSend": true - }, - { - "message": "What is the prismer llm?", - "isSend": true - }, - { - "message": "Prismer LLM is a vision-language model with an ensemble of experts.", + "message": "Langflow is a GUI for LangChain, designed with react-flow to provide an effortless way to experiment and prototype flows with drag-and-drop components and a chrome extension powered by GPT-3.", "isSend": false, - "thought": "Thought: I should research what this is\nAction: Serper Search\nAction Input: \"prismer llm\"\nObservation: Announcing Prismer, my team's latest vision-language AI, empowered by domain-expert models in depth, surface normal, segmentation, etc. My team's work, Prismer, is a representative example. We use a textual LM as the backbone, and plug in many visual domain experts through a neural adapter ... “Prismer is able to achieve superior data efficiency compared to ... New OpenAI post about future of ChatGPT's (and its other LLM) alignment. Prismer: A Vision-Language Model with An Ensemble of Experts ... UDAPDR: Unsupervised Domain Adaptation via LLM Prompting and Distillation of Rerankers. Emory Law's LLM program offers graduates of US and foreign law schools the opportunity to pursue the master of laws degree from one of the top nationally ranked ... Missing: prismer | Must include:prismer. Prismer: A Vision-Language Model with An Ensemble of Experts ... github: github.com/NVlabs/prismer ... into LLM embedding space viously to their being sworn, the prisoner, Michael. Solomon, standing at the bar, challenged two ... a« wa« brought before- them.\" The prisoner ?llM'eow*. RWKV is a RNN with transformer-level LLM performance. ... The implementation of \"Prismer: A Vision-Language Model with An Ensemble of Experts\". Announcing Prismer, our NVIDIA team's latest vision-language #ai empowered by domain-expert models in depth, surface normal, segmentation, etc. No paywall.\nThought:\n\n> Finished chain." - }, - { - "message": "What is the prismer llm?", - "isSend": true - }, - { - "message": "Prismer is an AI vision-language model with an ensemble of experts developed by a NVIDIA team. It is capable of achieving superior data efficiency compared to other models and can be directly trained like a GPT.", - "isSend": false, - "thought": "Thought: I need to search for the term\nAction: Serper Search\nAction Input: \"prismer llm\"\nObservation: Announcing Prismer, my team's latest vision-language AI, empowered by domain-expert models in depth, surface normal, segmentation, etc. My team's work, Prismer, is a representative example. We use a textual LM as the backbone, and plug in many visual domain experts through a neural adapter ... “Prismer is able to achieve superior data efficiency compared to ... New OpenAI post about future of ChatGPT's (and its other LLM) alignment. Prismer: A Vision-Language Model with An Ensemble of Experts ... UDAPDR: Unsupervised Domain Adaptation via LLM Prompting and Distillation of Rerankers. A very insightful post about the potential issues with LLM chatbots for general ... Announcing Prismer, our NVIDIA team's latest vision-language #ai ... As technology continues to transform our economy and culture, businesses need a new breed of lawyers who understand the legal and commercial aspects of new ... Missing: prismer | Must include:prismer. RWKV is a RNN with transformer-level LLM performance. ... The implementation of \"Prismer: A Vision-Language Model with An Ensemble of Experts\". ... Bendix vorad collision warning system, Vegglampe med prismer, Ok magazine ... Alabama llm, Colt 25 caliber magazine, Poppie clinch, Mcds upper school, ... RWKV is an RNN with transformer-level LLM performance. It can be directly trained like a GPT (parallelizable). So it's combining the best of RNN ...\nThought:\n\n> Finished chain." + "thought": "> Entering new AgentExecutor chain...\n I need to research what Langflow is.\nAction: Serper Search\nAction Input: \"What is Langflow?\"\nObservation: Researchers introduce LangFlow, a graphical user interface (GUI) for LangChain that simplifies testing and creation of smart applications. The drag-and-drop feature provides a quick and effortless way to experiment and prototype, and the built-in chat interface enables real-time ... LangFlow is a GUI for LangChain, designed with react-flow to provide an effortless way to experiment and prototype flows with drag-and-drop ... LangFlow is a GUI for LangChain, designed with react-flow to provide an effortless way to experiment and prototype flows with drag-and-drop components and a ... an open-source chrome extension powered by GPT-3 to get the explanation of a piece of code on any site (Stackoverflow, Github, etc). ChatGPT correctly identified Mona Lisa but failed to guess the cartoon character Michael was thinking of, suggesting Bugs Bunny holding a ... Introducing LangFlow! An open-source UI for. @LangChainAI. that enables seamless combination of multiple deep learning models to perform ... See student reviews, school photos, Chinese Mandarin courses, and housing options available at Langflow Education Centre (Macau, Macau) - Reviews - Language ... Longfellow Elementary students play on the playground during recess. Longfellow Elementary students pause for a photo during class.\nThought: Langflow is a GUI for LangChain, designed with react-flow to provide an effortless way to experiment and prototype flows with drag-and-drop components and a chrome extension powered by GPT-3.\nFinal Answer: Langflow is a GUI for LangChain, designed with react-flow to provide an effortless way to experiment and prototype flows with drag-and-drop components and a chrome extension powered by GPT-3.\n\n> Finished chain." } ] } \ No newline at end of file diff --git a/tests/test_graph.py b/tests/test_graph.py index 4580809a9..c33d387da 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -28,7 +28,7 @@ def test_get_nodes_with_target(): assert connected_nodes is not None -def test_get_node_neighbors(): +def test_get_node_neighbors_basic(): """Test getting node neighbors""" graph = get_graph(basic=True) @@ -72,6 +72,46 @@ def test_get_node_neighbors(): ) +def test_get_node_neighbors_complex(): + """Test getting node neighbors""" + + graph = get_graph(basic=False) + assert isinstance(graph, Graph) + # Get root node + root = get_root_node(graph) + assert root is not None + neighbors = graph.get_node_neighbors(root) + assert neighbors is not None + assert isinstance(neighbors, dict) + # Root Node is an Agent, it requires an LLMChain and tools + # We need to check if there is a Chain in the one of the neighbors' + # data attribute in the type key + assert any( + "Chain" in neighbor.data["type"] for neighbor, val in neighbors.items() if val + ) + # assert BaseTool is in the neighbors + assert any( + "BaseTool" in neighbor.data["type"] + for neighbor, val in neighbors.items() + if val + ) + # Now on to the BaseTool's neighbors + base_tool = next( + neighbor + for neighbor, val in neighbors.items() + if "BaseTool" in neighbor.data["type"] and val + ) + base_tool_neighbors = graph.get_node_neighbors(base_tool) + assert base_tool_neighbors is not None + assert isinstance(base_tool_neighbors, dict) + # Check if there is an ZeroShotAgent in the base_tool's neighbors + assert any( + "ZeroShotAgent" in neighbor.data["type"] + for neighbor, val in base_tool_neighbors.items() + if val + ) + + def test_get_node(): """Test getting a single node""" graph = get_graph() @@ -127,7 +167,7 @@ def test_build_json(): assert isinstance(json_data["llm_chain"], dict) assert json_data["llm_chain"]["_type"] == "llm_chain" assert json_data["llm_chain"]["memory"] is None - assert json_data["llm_chain"]["verbose"] is True + assert json_data["llm_chain"]["verbose"] is False assert isinstance(json_data["llm_chain"]["prompt"], dict) assert isinstance(json_data["llm_chain"]["llm"], dict) assert json_data["llm_chain"]["output_key"] == "text" @@ -135,3 +175,11 @@ def test_build_json(): assert all(isinstance(tool, dict) for tool in json_data["allowed_tools"]) assert isinstance(json_data["return_values"], list) assert all(isinstance(val, str) for val in json_data["return_values"]) + + +def test_validate_edges(): + """Test validating edges""" + graph = get_graph() + assert isinstance(graph, Graph) + # all edges should be valid + assert all(edge.valid for edge in graph.edges)