Fix bugs, add tests, and refactor code (#138)

This commit is contained in:
Ibis Prevedello 2023-04-08 14:09:26 -03:00 committed by GitHub
commit d3c1e25e8d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 1623 additions and 93 deletions

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.0.54"
version = "0.0.55"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [

View file

@ -1,6 +1,7 @@
from langflow.graph.utils import extract_input_variables_from_prompt
from pydantic import BaseModel, validator
from langflow.graph.utils import extract_input_variables_from_prompt
class Code(BaseModel):
code: str

View file

@ -7,7 +7,6 @@ from langflow.api.base import (
PromptValidationResponse,
validate_prompt,
)
from langflow.graph.utils import extract_input_variables_from_prompt
from langflow.utils.logger import logger
from langflow.utils.validate import validate_code

View file

@ -24,15 +24,15 @@ prompts:
llms:
- OpenAI
- AzureOpenAI
# - AzureOpenAI
- ChatOpenAI
- HuggingFaceHub
tools:
# - Search
- Search
- PAL-MATH
- Calculator
# - Serper Search
- Serper Search
- Tool
- PythonFunction
- JsonSpec

View file

@ -4,6 +4,7 @@
# - Build each inner agent first, then build the outer agent
import types
import warnings
from copy import deepcopy
from typing import Any, Dict, List, Optional
@ -119,7 +120,13 @@ class Node:
params[key] = edges[0].source
elif value["required"] or value.get("value"):
params[key] = value["value"]
# If value does not have value this still passes
# but then gives a keyError
# so we need to check if value has value
new_value = value.get("value")
if new_value is None:
warnings.warn(f"Value for {key} in {self.node_type} is None. ")
params[key] = new_value
# Add _type to params
self.params = params

View file

@ -1 +1 @@
DIRECT_TYPES = ["str", "bool", "code", "int", "float", "Any"]
DIRECT_TYPES = ["str", "bool", "code", "int", "float", "Any", "prompt"]

View file

@ -1,4 +1,4 @@
from typing import Dict, List, Union
from typing import Dict, List, Type, Union
from langflow.graph.base import Edge, Node
from langflow.graph.nodes import (
@ -15,13 +15,12 @@ from langflow.graph.nodes import (
from langflow.interface.agents.base import agent_creator
from langflow.interface.chains.base import chain_creator
from langflow.interface.llms.base import llm_creator
from langflow.interface.memories.base import memory_creator
from langflow.interface.prompts.base import prompt_creator
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.tools.base import tool_creator
from langflow.interface.tools.constants import FILE_TOOLS
from langflow.interface.tools.util import get_tools_dict
from langflow.interface.wrappers.base import wrapper_creator
from langflow.interface.memories.base import memory_creator
from langflow.utils import payload
@ -108,6 +107,26 @@ class Graph:
edges.append(Edge(source, target))
return edges
def _get_node_class(self, node_type: str, node_lc_type: str) -> Type[Node]:
node_type_map: Dict[str, Type[Node]] = {
**{t: PromptNode for t in prompt_creator.to_list()},
**{t: AgentNode for t in agent_creator.to_list()},
**{t: ChainNode for t in chain_creator.to_list()},
**{t: ToolNode for t in tool_creator.to_list()},
**{t: ToolkitNode for t in toolkits_creator.to_list()},
**{t: WrapperNode for t in wrapper_creator.to_list()},
**{t: LLMNode for t in llm_creator.to_list()},
**{t: MemoryNode for t in memory_creator.to_list()},
}
if node_type in FILE_TOOLS:
return FileToolNode
if node_type in node_type_map:
return node_type_map[node_type]
if node_lc_type in node_type_map:
return node_type_map[node_lc_type]
return Node
def _build_nodes(self) -> List[Node]:
nodes: List[Node] = []
for node in self._nodes:
@ -115,38 +134,9 @@ class Graph:
node_type: str = node_data["type"] # type: ignore
node_lc_type: str = node_data["node"]["template"]["_type"] # type: ignore
if node_type in prompt_creator.to_list():
nodes.append(PromptNode(node))
elif (
node_type in agent_creator.to_list()
or node_lc_type in agent_creator.to_list()
):
nodes.append(AgentNode(node))
elif node_type in chain_creator.to_list():
nodes.append(ChainNode(node))
elif (
node_type in tool_creator.to_list()
or node_lc_type in get_tools_dict().keys()
):
if node_type in FILE_TOOLS:
nodes.append(FileToolNode(node))
nodes.append(ToolNode(node))
elif node_type in toolkits_creator.to_list():
nodes.append(ToolkitNode(node))
elif node_type in wrapper_creator.to_list():
nodes.append(WrapperNode(node))
elif (
node_type in llm_creator.to_list()
or node_lc_type in llm_creator.to_list()
):
nodes.append(LLMNode(node))
elif (
node_type in memory_creator.to_list()
or node_lc_type in memory_creator.to_list()
):
nodes.append(MemoryNode(node))
else:
nodes.append(Node(node))
NodeClass = self._get_node_class(node_type, node_lc_type)
nodes.append(NodeClass(node))
return nodes
def get_children_by_node_type(self, node: Node, node_type: str) -> List[Node]:

View file

@ -55,7 +55,10 @@ class PromptNode(Node):
tools: Optional[Union[List[Node], List[ToolNode]]] = None,
) -> Any:
if not self._built or force:
if "input_variables" not in self.params:
if (
"input_variables" not in self.params
or self.params["input_variables"] is None
):
self.params["input_variables"] = []
# Check if it is a ZeroShotPrompt and needs a tool
if "ShotPrompt" in self.node_type:
@ -75,7 +78,6 @@ class PromptNode(Node):
for param in prompt_params:
prompt_text = self.params[param]
variables = extract_input_variables_from_prompt(prompt_text)
self.params["input_variables"].extend(variables)
self.params["input_variables"] = list(set(self.params["input_variables"]))

View file

@ -1,9 +1,10 @@
from typing import Dict, List, Optional
from typing import Dict, List, Optional, Type
from langflow.custom.customs import get_custom_nodes
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.custom_lists import chain_type_to_cls_dict
from langflow.settings import settings
from langflow.template.nodes import ChainFrontendNode
from langflow.utils.util import build_template_from_class
# Assuming necessary imports for Field, Template, and FrontendNode classes
@ -12,6 +13,10 @@ from langflow.utils.util import build_template_from_class
class ChainCreator(LangChainTypeCreator):
type_name: str = "chains"
@property
def frontend_node_class(self) -> Type[ChainFrontendNode]:
return ChainFrontendNode
@property
def type_to_loader_dict(self) -> Dict:
if self.type_dict is None:

View file

@ -19,7 +19,7 @@ class BaseCustomChain(ConversationChain):
template: Optional[str]
ai_prefix_key: Optional[str]
ai_prefix_value: Optional[str]
"""Field to use as the ai_prefix. It needs to be set and has to be in the template"""
@root_validator(pre=False)
@ -27,13 +27,13 @@ class BaseCustomChain(ConversationChain):
format_dict = {}
input_variables = extract_input_variables_from_prompt(values["template"])
if values.get("ai_prefix_key", None) is None:
values["ai_prefix_key"] = values["memory"].ai_prefix
if values.get("ai_prefix_value", None) is None:
values["ai_prefix_value"] = values["memory"].ai_prefix
for key in input_variables:
new_value = values.get(key, f"{{{key}}}")
format_dict[key] = new_value
if key == values.get("ai_prefix_key", None):
if key == values.get("ai_prefix_value", None):
values["memory"].ai_prefix = new_value
values["template"] = values["template"].format(**format_dict)
@ -62,7 +62,7 @@ Current conversation:
Human: {input}
{character}:"""
memory: BaseMemory = Field(default_factory=ConversationBufferMemory)
ai_prefix_key: Optional[str] = "character"
ai_prefix_value: Optional[str] = "character"
"""Default memory store."""

View file

@ -73,9 +73,14 @@ class ToolCreator(LangChainTypeCreator):
base_classes = ["Tool"]
all_tools = {}
for tool in self.type_to_loader_dict.keys():
if tool_params := get_tool_params(get_tool_by_name(tool)):
tool_fcn = get_tool_by_name(tool)
if tool_params := get_tool_params(tool_fcn):
tool_name = tool_params.get("name") or str(tool)
all_tools[tool_name] = {"type": tool, "params": tool_params}
all_tools[tool_name] = {
"type": tool,
"params": tool_params,
"fcn": tool_fcn,
}
# Raise error if name is not in tools
if name not in all_tools.keys():
@ -83,15 +88,21 @@ class ToolCreator(LangChainTypeCreator):
tool_type: str = all_tools[name]["type"] # type: ignore
if tool_type in _BASE_TOOLS:
if all_tools[tool_type]["fcn"] in _BASE_TOOLS.values():
params = []
elif tool_type in _LLM_TOOLS:
elif all_tools[tool_type]["fcn"] in _LLM_TOOLS.values():
params = ["llm"]
elif tool_type in _EXTRA_LLM_TOOLS:
_, extra_keys = _EXTRA_LLM_TOOLS[tool_type]
elif all_tools[tool_type]["fcn"] in [
val[0] for val in _EXTRA_LLM_TOOLS.values()
]:
n_dict = {val[0]: val[1] for val in _EXTRA_LLM_TOOLS.values()}
extra_keys = n_dict[all_tools[tool_type]["fcn"]]
params = ["llm"] + extra_keys
elif tool_type in _EXTRA_OPTIONAL_TOOLS:
_, extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type]
elif all_tools[tool_type]["fcn"] in [
val[0] for val in _EXTRA_OPTIONAL_TOOLS.values()
]:
n_dict = {val[0]: val[1] for val in _EXTRA_OPTIONAL_TOOLS.values()} # type: ignore
extra_keys = n_dict[all_tools[tool_type]["fcn"]]
params = extra_keys
elif tool_type == "Tool":
params = ["name", "description", "func"]
@ -104,7 +115,6 @@ class ToolCreator(LangChainTypeCreator):
elif tool_type in FILE_TOOLS:
params = all_tools[name]["params"] # type: ignore
base_classes += [name]
else:
params = []

View file

@ -179,12 +179,13 @@ class FrontendNode(BaseModel):
(field.required and key not in ["input_variables"])
or key in FORCE_SHOW_FIELDS
or "api" in key
or "key" in key
or ("key" in key and "input" not in key and "output" not in key)
)
# Add password field
field.password = any(
text in key.lower() for text in {"password", "token", "api", "key"}
field.password = (
any(text in key.lower() for text in {"password", "token", "api", "key"})
and field.show
)
# Add multline

View file

@ -289,3 +289,13 @@ class MemoryFrontendNode(FrontendNode):
field.field_type = "int"
field.value = 10
field.display_name = "Memory Size"
class ChainFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
FrontendNode.format_field(field, name)
if "key" in field.name:
field.password = False
field.show = False

View file

@ -14,10 +14,11 @@ import {
} from "react";
import { sendAll } from "../../controllers/API";
import { alertContext } from "../../contexts/alertContext";
import { classNames, nodeColors } from "../../utils";
import { classNames, nodeColors, snakeToNormalCase } from "../../utils";
import { TabsContext } from "../../contexts/tabsContext";
import { ChatType } from "../../types/chat";
import ChatMessage from "./chatMessage";
import { NodeType } from "../../types/flow";
const _ = require("lodash");
@ -28,7 +29,7 @@ export default function Chat({ flow, reactFlowInstance }: ChatType) {
const [open, setOpen] = useState(true);
const [chatValue, setChatValue] = useState("");
const [chatHistory, setChatHistory] = useState(flow.chat);
const { setErrorData } = useContext(alertContext);
const { setErrorData, setNoticeData } = useContext(alertContext);
const addChatHistory = (
message: string,
isSend: boolean,
@ -73,36 +74,58 @@ export default function Chat({ flow, reactFlowInstance }: ChatType) {
useEffect(() => {
if (ref.current) ref.current.scrollIntoView({ behavior: "smooth" });
}, [chatHistory]);
function validateNodes() {
if (
reactFlowInstance.getNodes().some(
(n) =>
n.data.node &&
Object.keys(n.data.node.template).some((t: any) => {
return (
n.data.node.template[t].required &&
(!n.data.node.template[t].value ||
n.data.node.template[t].value === "") &&
!reactFlowInstance
.getEdges()
.some(
(e) =>
e.targetHandle.split("|")[1] === t &&
e.targetHandle.split("|")[2] === n.id
)
);
})
)
) {
return false;
function validateNode(n: NodeType): Array<string> {
if (!n.data?.node?.template || !Object.keys(n.data.node.template)) {
setNoticeData({
title:
"We've noticed a potential issue with a node in the flow. Please review it and, if necessary, submit a bug report with your exported flow file. Thank you for your help!",
});
return [];
}
return true;
const {
type,
node: { template },
} = n.data;
return Object.keys(template).reduce(
(errors: Array<string>, t) =>
errors.concat(
(template[t].required && template[t].show) &&
(!template[t].value || template[t].value === "") &&
!reactFlowInstance
.getEdges()
.some(
(e) =>
e.targetHandle.split("|")[1] === t &&
e.targetHandle.split("|")[2] === n.id
)
? [
`${type} is missing ${
template.display_name
? template.display_name
: snakeToNormalCase(template[t].name)
}.`,
]
: []
),
[] as string[]
);
}
function validateNodes() {
return reactFlowInstance
.getNodes()
.flatMap((n: NodeType) => validateNode(n));
}
const ref = useRef(null);
function sendMessage() {
if (chatValue !== "") {
if (validateNodes()) {
let nodeValidationErrors = validateNodes();
if (nodeValidationErrors.length === 0) {
setLockChat(true);
let message = chatValue;
setChatValue("");
@ -136,10 +159,8 @@ export default function Chat({ flow, reactFlowInstance }: ChatType) {
});
} else {
setErrorData({
title: "Error sending message",
list: [
"Oops! Looks like you missed some required information. Please fill in all the required fields before continuing.",
],
title: "Oops! Looks like you missed some required information:",
list: nodeValidationErrors,
});
}
} else {

View file

@ -0,0 +1,176 @@
from fastapi.testclient import TestClient
from langflow.settings import settings
# check that all agents are in settings.agents
# are in json_response["agents"]
def test_agents_settings(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
assert set(agents.keys()) == set(settings.agents)
def test_zero_shot_agent(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
zero_shot_agent = agents["ZeroShotAgent"]
assert set(zero_shot_agent["base_classes"]) == {
"ZeroShotAgent",
"BaseSingleActionAgent",
"Agent",
"function",
}
template = zero_shot_agent["template"]
assert template["llm_chain"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm_chain",
"type": "LLMChain",
"list": False,
}
assert template["allowed_tools"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "allowed_tools",
"type": "Tool",
"list": True,
}
def test_json_agent(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
json_agent = agents["JsonAgent"]
assert json_agent["base_classes"] == ["AgentExecutor"]
template = json_agent["template"]
assert template["toolkit"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "toolkit",
"type": "BaseToolkit",
"list": False,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
}
def test_csv_agent(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
csv_agent = agents["CSVAgent"]
assert csv_agent["base_classes"] == ["AgentExecutor"]
template = csv_agent["template"]
assert template["path"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"value": "",
"suffixes": [".csv"],
"fileTypes": ["csv"],
"password": False,
"name": "path",
"type": "file",
"list": False,
"content": None,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
}
def test_initialize_agent(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
agents = json_response["agents"]
initialize_agent = agents["initialize_agent"]
assert initialize_agent["base_classes"] == ["AgentExecutor"]
template = initialize_agent["template"]
assert template["agent"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"value": "zero-shot-react-description",
"password": False,
"options": [
"zero-shot-react-description",
"react-docstore",
"self-ask-with-search",
"conversational-react-description",
],
"name": "agent",
"type": "str",
"list": True,
}
assert template["memory"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "memory",
"type": "BaseChatMemory",
"list": False,
}
assert template["tools"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "tools",
"type": "Tool",
"list": True,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
}

View file

@ -0,0 +1,664 @@
from fastapi.testclient import TestClient
from langflow.settings import settings
def test_chains_settings(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
assert set(chains.keys()) == set(settings.chains)
# Test the ConversationChain object
def test_conversation_chain(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
chain = chains["ConversationChain"]
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {"LLMChain", "ConversationChain", "Chain"}
template = chain["template"]
assert template["memory"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "memory",
"type": "BaseMemory",
"list": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
}
assert template["input_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "input",
"password": False,
"name": "input_key",
"type": "str",
"list": False,
}
assert template["output_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "response",
"password": False,
"name": "output_key",
"type": "str",
"list": False,
}
assert template["_type"] == "ConversationChain"
# Test the description object
assert (
chain["description"]
== "Chain to have a conversation and load context from memory."
)
def test_llm_chain(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
chain = chains["LLMChain"]
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {"LLMChain", "Chain"}
template = chain["template"]
assert template["memory"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "memory",
"type": "BaseMemory",
"list": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
}
assert template["output_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "text",
"password": False,
"name": "output_key",
"type": "str",
"list": False,
}
def test_llm_checker_chain(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
chain = chains["LLMCheckerChain"]
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {"LLMCheckerChain", "Chain"}
template = chain["template"]
assert template["memory"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "memory",
"type": "BaseMemory",
"list": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLLM",
"list": False,
}
assert template["input_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "query",
"password": False,
"name": "input_key",
"type": "str",
"list": False,
}
assert template["output_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "result",
"password": False,
"name": "output_key",
"type": "str",
"list": False,
}
assert template["_type"] == "LLMCheckerChain"
# Test the description object
assert (
chain["description"] == "Chain for question-answering with self-verification."
)
def test_llm_math_chain(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
chain = chains["LLMMathChain"]
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {"LLMMathChain", "Chain"}
template = chain["template"]
assert template["memory"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "memory",
"type": "BaseMemory",
"list": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
}
assert template["input_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "question",
"password": False,
"name": "input_key",
"type": "str",
"list": False,
}
assert template["output_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "answer",
"password": False,
"name": "output_key",
"type": "str",
"list": False,
}
assert template["_type"] == "LLMMathChain"
# Test the description object
assert (
chain["description"]
== "Chain that interprets a prompt and executes python code to do math."
)
def test_series_character_chain(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
chain = chains["SeriesCharacterChain"]
# Test the base classes, template, memory, verbose, llm, input_key, output_key, and _type objects
assert set(chain["base_classes"]) == {
"LLMChain",
"BaseCustomChain",
"Chain",
"ConversationChain",
"SeriesCharacterChain",
}
template = chain["template"]
assert template["memory"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": {
"chat_memory": {"messages": []},
"output_key": None,
"input_key": None,
"return_messages": False,
"human_prefix": "Human",
"ai_prefix": "AI",
"memory_key": "history",
},
"password": False,
"name": "memory",
"type": "BaseMemory",
"list": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
}
assert template["input_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "input",
"password": False,
"name": "input_key",
"type": "str",
"list": False,
}
assert template["output_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "response",
"password": False,
"name": "output_key",
"type": "str",
"list": False,
}
assert template["template"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": True,
"value": "I want you to act like {character} from {series}.\nI want you to respond and answer like {character}. do not write any explanations. only answer like {character}.\nYou must know all of the knowledge of {character}.\nCurrent conversation:\n{history}\nHuman: {input}\n{character}:", # noqa: E501
"password": False,
"name": "template",
"type": "str",
"list": False,
}
assert template["ai_prefix_value"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "character",
"password": False,
"name": "ai_prefix_value",
"type": "str",
"list": False,
}
assert template["character"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "character",
"type": "str",
"list": False,
}
assert template["series"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "series",
"type": "str",
"list": False,
}
assert template["_type"] == "SeriesCharacterChain"
# Test the description object
assert (
chain["description"]
== "SeriesCharacterChain is a chain you can use to have a conversation with a character from a series."
)
def test_mid_journey_prompt_chain(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
chain = chains["MidJourneyPromptChain"]
assert isinstance(chain, dict)
# Test the base_classes object
assert set(chain["base_classes"]) == {
"LLMChain",
"BaseCustomChain",
"Chain",
"ConversationChain",
"MidJourneyPromptChain",
}
# Test the template object
template = chain["template"]
assert template["memory"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": {
"chat_memory": {"messages": []},
"output_key": None,
"input_key": None,
"return_messages": False,
"human_prefix": "Human",
"ai_prefix": "AI",
"memory_key": "history",
},
"password": False,
"name": "memory",
"type": "BaseMemory",
"list": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
# Continue with other template object assertions
assert template["prompt"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": {
"input_variables": ["history", "input"],
"output_parser": None,
"partial_variables": {},
"template": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:", # noqa: E501
"template_format": "f-string",
"validate_template": True,
"_type": "prompt",
},
"password": False,
"name": "prompt",
"type": "BasePromptTemplate",
"list": False,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
}
assert template["output_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "response",
"password": False,
"name": "output_key",
"type": "str",
"list": False,
}
assert template["input_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "input",
"password": False,
"name": "input_key",
"type": "str",
"list": False,
}
assert template["template"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": True,
"value": 'I want you to act as a prompt generator for Midjourney\'s artificial intelligence program.\n Your job is to provide detailed and creative descriptions that will inspire unique and interesting images from the AI.\n Keep in mind that the AI is capable of understanding a wide range of language and can interpret abstract concepts, so feel free to be as imaginative and descriptive as possible.\n For example, you could describe a scene from a futuristic city, or a surreal landscape filled with strange creatures.\n The more detailed and imaginative your description, the more interesting the resulting image will be. Here is your first prompt:\n "A field of wildflowers stretches out as far as the eye can see, each one a different color and shape. In the distance, a massive tree towers over the landscape, its branches reaching up to the sky like tentacles."\n\n Current conversation:\n {history}\n Human: {input}\n AI:', # noqa: E501
"password": False,
"name": "template",
"type": "str",
"list": False,
}
assert template["ai_prefix_value"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "ai_prefix_value",
"type": "str",
"list": False,
}
# Test the description object
assert (
chain["description"]
== "MidJourneyPromptChain is a chain you can use to generate new MidJourney prompts."
)
def test_time_travel_guide_chain(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
chains = json_response["chains"]
chain = chains["TimeTravelGuideChain"]
assert isinstance(chain, dict)
# Test the base_classes object
assert set(chain["base_classes"]) == {
"LLMChain",
"BaseCustomChain",
"TimeTravelGuideChain",
"Chain",
"ConversationChain",
}
# Test the template object
template = chain["template"]
assert template["memory"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": {
"chat_memory": {"messages": []},
"output_key": None,
"input_key": None,
"return_messages": False,
"human_prefix": "Human",
"ai_prefix": "AI",
"memory_key": "history",
},
"password": False,
"name": "memory",
"type": "BaseMemory",
"list": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
assert template["prompt"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": {
"input_variables": ["history", "input"],
"output_parser": None,
"partial_variables": {},
"template": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:", # noqa: E501
"template_format": "f-string",
"validate_template": True,
"_type": "prompt",
},
"password": False,
"name": "prompt",
"type": "BasePromptTemplate",
"list": False,
}
assert template["llm"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "llm",
"type": "BaseLanguageModel",
"list": False,
}
assert template["output_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "response",
"password": False,
"name": "output_key",
"type": "str",
"list": False,
}
assert template["input_key"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "input",
"password": False,
"name": "input_key",
"type": "str",
"list": False,
}
assert template["template"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": True,
"value": "I want you to act as my time travel guide. You are helpful and creative. I will provide you with the historical period or future time I want to visit and you will suggest the best events, sights, or people to experience. Provide the suggestions and any necessary information.\n Current conversation:\n {history}\n Human: {input}\n AI:", # noqa: E501
"password": False,
"name": "template",
"type": "str",
"list": False,
}
assert template["ai_prefix_value"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "ai_prefix_value",
"type": "str",
"list": False,
}
assert chain["description"] == ""

447
tests/test_llms_template.py Normal file
View file

@ -0,0 +1,447 @@
from fastapi.testclient import TestClient
from langflow.settings import settings
def test_llms_settings(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
llms = json_response["llms"]
assert set(llms.keys()) == set(settings.llms)
def test_hugging_face_hub(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
language_models = json_response["llms"]
model = language_models["HuggingFaceHub"]
template = model["template"]
assert template["cache"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "cache",
"type": "bool",
"list": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
assert template["client"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "client",
"type": "Any",
"list": False,
}
assert template["repo_id"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "gpt2",
"password": False,
"name": "repo_id",
"type": "str",
"list": False,
}
assert template["task"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "task",
"type": "str",
"list": False,
}
assert template["model_kwargs"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "model_kwargs",
"type": "code",
"list": False,
}
assert template["huggingfacehub_api_token"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": True,
"name": "huggingfacehub_api_token",
"type": "str",
"list": False,
}
def test_openai(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
language_models = json_response["llms"]
model = language_models["OpenAI"]
template = model["template"]
assert template["cache"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "cache",
"type": "bool",
"list": False,
}
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
assert template["client"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "client",
"type": "Any",
"list": False,
}
assert template["model_name"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": "text-davinci-003",
"password": False,
"options": [
"text-davinci-003",
"text-davinci-002",
"text-curie-001",
"text-babbage-001",
"text-ada-001",
],
"name": "model_name",
"type": "str",
"list": True,
}
# Add more assertions for other properties here
assert template["temperature"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": 0.7,
"password": False,
"name": "temperature",
"type": "float",
"list": False,
}
assert template["max_tokens"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": 256,
"password": True,
"name": "max_tokens",
"type": "int",
"list": False,
}
assert template["top_p"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 1,
"password": False,
"name": "top_p",
"type": "float",
"list": False,
}
assert template["frequency_penalty"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 0,
"password": False,
"name": "frequency_penalty",
"type": "float",
"list": False,
}
assert template["presence_penalty"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 0,
"password": False,
"name": "presence_penalty",
"type": "float",
"list": False,
}
assert template["n"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 1,
"password": False,
"name": "n",
"type": "int",
"list": False,
}
assert template["best_of"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 1,
"password": False,
"name": "best_of",
"type": "int",
"list": False,
}
assert template["model_kwargs"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "model_kwargs",
"type": "code",
"list": False,
}
assert template["openai_api_key"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"value": "",
"password": True,
"name": "openai_api_key",
"display_name": "OpenAI API Key",
"type": "str",
"list": False,
}
assert template["batch_size"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 20,
"password": False,
"name": "batch_size",
"type": "int",
"list": False,
}
assert template["request_timeout"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "request_timeout",
"type": "Union[float, Tuple[float, float], NoneType]",
"list": False,
}
assert template["logit_bias"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "logit_bias",
"type": "code",
"list": False,
}
assert template["max_retries"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 6,
"password": False,
"name": "max_retries",
"type": "int",
"list": False,
}
assert template["streaming"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": False,
"password": False,
"name": "streaming",
"type": "bool",
"list": False,
}
def test_chat_open_ai(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
language_models = json_response["llms"]
model = language_models["ChatOpenAI"]
template = model["template"]
assert template["verbose"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": False,
"password": False,
"name": "verbose",
"type": "bool",
"list": False,
}
assert template["client"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "client",
"type": "Any",
"list": False,
}
assert template["model_name"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": "gpt-3.5-turbo",
"password": False,
"options": ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"],
"name": "model_name",
"type": "str",
"list": True,
}
assert template["temperature"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"value": 0.7,
"password": False,
"name": "temperature",
"type": "float",
"list": False,
}
assert template["model_kwargs"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "model_kwargs",
"type": "code",
"list": False,
}
assert template["openai_api_key"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"value": "",
"password": True,
"name": "openai_api_key",
"display_name": "OpenAI API Key",
"type": "str",
"list": False,
}
assert template["request_timeout"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 60,
"password": False,
"name": "request_timeout",
"type": "int",
"list": False,
}
assert template["max_retries"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 6,
"password": False,
"name": "max_retries",
"type": "int",
"list": False,
}
assert template["streaming"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": False,
"password": False,
"name": "streaming",
"type": "bool",
"list": False,
}
assert template["n"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": 1,
"password": False,
"name": "n",
"type": "int",
"list": False,
}
assert template["max_tokens"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": False,
"password": True,
"name": "max_tokens",
"type": "int",
"list": False,
}
assert template["_type"] == "ChatOpenAI"
assert (
model["description"]
== "Wrapper around OpenAI Chat large language models.To use, you should have the ``openai`` python package installed, and theenvironment variable ``OPENAI_API_KEY`` set with your API key.Any parameters that are valid to be passed to the openai.create call can be passedin, even if not explicitly saved on this class." # noqa E501
)
assert set(model["base_classes"]) == {
"BaseChatModel",
"ChatOpenAI",
"BaseLanguageModel",
}

View file

@ -0,0 +1,197 @@
from fastapi.testclient import TestClient
from langflow.settings import settings
def test_prompts_settings(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
assert set(prompts.keys()) == set(settings.prompts)
def test_prompt_template(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
prompt = prompts["PromptTemplate"]
template = prompt["template"]
assert template["input_variables"] == {
"required": True,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "input_variables",
"type": "str",
"list": True,
}
assert template["output_parser"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "output_parser",
"type": "BaseOutputParser",
"list": False,
}
assert template["partial_variables"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "partial_variables",
"type": "code",
"list": False,
}
assert template["template"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": True,
"password": False,
"name": "template",
"type": "prompt",
"list": False,
}
assert template["template_format"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "f-string",
"password": False,
"name": "template_format",
"type": "str",
"list": False,
}
assert template["validate_template"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": True,
"password": False,
"name": "validate_template",
"type": "bool",
"list": False,
}
def test_few_shot_prompt_template(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
prompt = prompts["FewShotPromptTemplate"]
template = prompt["template"]
# Test other fields in the template similar to PromptTemplate
assert template["examples"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": True,
"password": False,
"name": "examples",
"type": "prompt",
"list": True,
}
assert template["example_selector"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"password": False,
"name": "example_selector",
"type": "BaseExampleSelector",
"list": False,
}
assert template["example_prompt"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": False,
"password": False,
"name": "example_prompt",
"type": "PromptTemplate",
"list": False,
}
assert template["suffix"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": True,
"password": False,
"name": "suffix",
"type": "prompt",
"list": False,
}
assert template["example_separator"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "\n\n",
"password": False,
"name": "example_separator",
"type": "str",
"list": False,
}
assert template["prefix"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": True,
"value": "",
"password": False,
"name": "prefix",
"type": "prompt",
"list": False,
}
def test_zero_shot_prompt(client: TestClient):
response = client.get("/all")
assert response.status_code == 200
json_response = response.json()
prompts = json_response["prompts"]
prompt = prompts["ZeroShotPrompt"]
template = prompt["template"]
assert template["prefix"] == {
"required": False,
"placeholder": "",
"show": True,
"multiline": True,
"value": "Answer the following questions as best you can. You have access to the following tools:", # noqa: E501
"password": False,
"name": "prefix",
"type": "str",
"list": False,
}
assert template["suffix"] == {
"required": True,
"placeholder": "",
"show": True,
"multiline": True,
"value": "Begin!\n\nQuestion: {input}\nThought:{agent_scratchpad}",
"password": False,
"name": "suffix",
"type": "str",
"list": False,
}
assert template["format_instructions"] == {
"required": False,
"placeholder": "",
"show": False,
"multiline": False,
"value": "Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question", # noqa: E501
"password": False,
"name": "format_instructions",
"type": "str",
"list": False,
}