Merge branch 'form_io' of github.com:logspace-ai/langflow into form_io

This commit is contained in:
Lucas Oliveira 2023-07-06 15:51:51 -03:00
commit 4616ca68bd
9 changed files with 133 additions and 41 deletions

View file

@ -77,7 +77,7 @@ def validate_prompt(template: str):
def check_input_variables(input_variables: list):
invalid_chars = []
fixed_variables = []
wrong_variables = set()
wrong_variables = []
empty_variables = []
for variable in input_variables:
new_var = variable
@ -92,17 +92,14 @@ def check_input_variables(input_variables: list):
if variable[0].isdigit():
invalid_chars.append(variable[0])
new_var = new_var.replace(variable[0], "")
wrong_variables.add(variable)
for char in INVALID_CHARACTERS:
if char in variable:
invalid_chars.append(char)
new_var = new_var.replace(char, "")
wrong_variables.add(variable)
wrong_variables.append(variable)
else:
for char in INVALID_CHARACTERS:
if char in variable:
invalid_chars.append(char)
new_var = new_var.replace(char, "")
wrong_variables.append(variable)
fixed_variables.append(new_var)
# if new_var != variable and new_var not in input_variables:
# input_variables.remove(variable)
# input_variables.append(new_var)
# If any of the input_variables is not in the fixed_variables, then it means that
# there are invalid characters in the input_variables
@ -122,17 +119,20 @@ def build_error_message(
input_variables, invalid_chars, wrong_variables, fixed_variables, empty_variables
):
input_variables_str = ", ".join([f"'{var}'" for var in input_variables])
error_string = f"Invalid input variables: {input_variables_str}."
error_string = f"Invalid input variables: {input_variables_str}. "
if wrong_variables and invalid_chars:
", ".join([f"'{var}'" for var in wrong_variables])
invalid_chars_str = ", ".join([f"'{char}'" for char in invalid_chars])
error_string += (
f" Please, remove the invalid characters: {invalid_chars_str}"
" from the variables: {wrong_variables_str}."
)
# fix the wrong variables replacing invalid chars and find them in the fixed variables
error_string_vars = "You can fix them by replacing the invalid characters: "
wvars = wrong_variables.copy()
for i, wrong_var in enumerate(wvars):
for char in invalid_chars:
wrong_var = wrong_var.replace(char, "")
if wrong_var in fixed_variables:
error_string_vars += f"'{wrong_variables[i]}' -> '{wrong_var}'"
error_string += error_string_vars
elif empty_variables:
error_string += f" There are {len(empty_variables)} empty variable{'s' if len(empty_variables) > 1 else ''}."
elif len(set(fixed_variables)) != len(fixed_variables):
error_string += " There are duplicate variables."
error_string += "There are duplicate variables."
return error_string

View file

@ -1,22 +1,107 @@
import asyncio
from typing import Any
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langflow.api.v1.schemas import ChatResponse
from typing import Any, Dict, List, Union
from fastapi import WebSocket
from langchain.schema import AgentAction, LLMResult, AgentFinish
# https://github.com/hwchase17/chat-langchain/blob/master/callback.py
class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
"""Callback handler for streaming LLM responses."""
def __init__(self, websocket):
def __init__(self, websocket: WebSocket):
self.websocket = websocket
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
resp = ChatResponse(message=token, type="stream", intermediate_steps="")
await self.websocket.send_json(resp.dict())
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
"""Run when LLM starts running."""
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
async def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when LLM errors."""
async def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
"""Run when chain starts running."""
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
"""Run when chain ends running."""
async def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when chain errors."""
async def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
"""Run when tool starts running."""
resp = ChatResponse(
message="",
type="stream",
intermediate_steps=f"Tool input: {input_str}",
)
await self.websocket.send_json(resp.dict())
async def on_tool_end(self, output: str, **kwargs: Any) -> Any:
"""Run when tool ends running."""
resp = ChatResponse(
message="",
type="stream",
intermediate_steps=f"Tool output: {output}",
)
await self.websocket.send_json(resp.dict())
async def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when tool errors."""
async def on_text(self, text: str, **kwargs: Any) -> Any:
"""Run on arbitrary text."""
# This runs when first sending the prompt
# to the LLM, adding it will send the final prompt
# to the frontend
async def on_agent_action(self, action: AgentAction, **kwargs: Any):
log = f"Thought: {action.log}"
# if there are line breaks, split them and send them
# as separate messages
if "\n" in log:
logs = log.split("\n")
for log in logs:
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
await self.websocket.send_json(resp.dict())
else:
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
await self.websocket.send_json(resp.dict())
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
resp = ChatResponse(
message="",
type="stream",
intermediate_steps=finish.log,
)
await self.websocket.send_json(resp.dict())
class StreamingLLMCallbackHandler(BaseCallbackHandler):
"""Callback handler for streaming LLM responses."""

View file

@ -23,7 +23,7 @@ async def process_graph(
try:
logger.debug("Generating result and thought")
result, intermediate_steps = await get_result_and_steps(
langchain_object, chat_inputs.message or "", websocket=websocket
langchain_object, chat_inputs.message, websocket=websocket
)
logger.debug("Generated result and intermediate_steps")
return result, intermediate_steps

View file

@ -226,4 +226,5 @@ class Vertex:
return id(self)
def _built_object_repr(self):
return "Built sucessfully" if self._built_object else "Not built yet"
# Add a message with an emoji, stars for sucess,
return "Built sucessfully ✨" if self._built_object else "Failed to build 😵‍💫"

View file

@ -201,6 +201,15 @@ class PromptVertex(Vertex):
self._build()
return self._built_object
def _built_object_repr(self):
if self.artifacts and hasattr(self._built_object, "format"):
# We'll build the prompt with the artifacts
# to show the user what the prompt looks like
# with the variables filled in
return self._built_object.format(**self.artifacts)
else:
super()._built_object_repr()
class OutputParserVertex(Vertex):
def __init__(self, data: Dict):

View file

@ -111,6 +111,12 @@ def instantiate_llm(node_type, class_object, params: Dict):
def instantiate_memory(node_type, class_object, params):
# process input_key and output_key to remove them if
# they are empty strings
for key in ["input_key", "output_key"]:
if key in params and not params[key]:
params.pop(key)
try:
if "retriever" in params and hasattr(params["retriever"], "as_retriever"):
params["retriever"] = params["retriever"].as_retriever()

View file

@ -66,7 +66,6 @@ export default function GenericNode({
deleteNode(data.id);
return;
}
console.log(data);
useEffect(() => {}, [closePopUp, data.node.template]);
return (
<>
@ -121,10 +120,11 @@ export default function GenericNode({
"Validating..."
) : (
<div className="max-h-96 overflow-auto">
{validationStatus.params ||
""
.split("\n")
.map((line, index) => <div key={index}>{line}</div>)}
{validationStatus.params
? validationStatus.params
.split("\n")
.map((line, index) => <div key={index}>{line}</div>)
: ""}
</div>
)
}

View file

@ -8,7 +8,6 @@ import { useSSE } from "../../../contexts/SSEContext";
import { typesContext } from "../../../contexts/typesContext";
import { alertContext } from "../../../contexts/alertContext";
import { postBuildInit } from "../../../controllers/API";
import ShadTooltip from "../../ShadTooltipComponent";
import RadialProgressComponent from "../../RadialProgress";
import { TabsContext } from "../../../contexts/tabsContext";
@ -76,7 +75,6 @@ export default function BuildTrigger({
const eventSource = new EventSource(apiUrl);
eventSource.onmessage = (event) => {
console.log(event);
// If the event is parseable, return
if (!event.data) {
return;

View file

@ -30,14 +30,7 @@ import {
varHighlightHTML,
} from "../../utils";
import { Badge } from "../../components/ui/badge";
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "../../components/ui/tooltip";
import ShadTooltip from "../../components/ShadTooltipComponent";
import { set } from "lodash";
import DOMPurify from "dompurify";
export default function GenericModal({
@ -68,7 +61,8 @@ export default function GenericModal({
const [wordsHighlightInvalid, setWordsHighlightInvalid] = useState([]);
const [wordsHighlight, setWordsHighlight] = useState([]);
const { dark } = useContext(darkContext);
const { setErrorData, setSuccessData } = useContext(alertContext);
const { setErrorData, setSuccessData, setNoticeData } =
useContext(alertContext);
const { closePopUp, setCloseEdit } = useContext(PopUpContext);
const ref = useRef();
function setModalOpen(x: boolean) {
@ -149,9 +143,8 @@ export default function GenericModal({
let inputVariables = apiReturn.data.input_variables;
if (inputVariables.length === 0) {
setIsEdit(true);
setErrorData({
title:
"The template you are attempting to use does not contain any variables for data entry.",
setNoticeData({
title: "Your template does not have any variables.",
});
} else {
setIsEdit(false);