Merge remote-tracking branch 'origin/main' into dev

This commit is contained in:
Gabriel Luiz Freitas Almeida 2023-06-19 17:17:07 -03:00
commit f511ddc20f
11 changed files with 233 additions and 102 deletions

View file

@ -44,13 +44,18 @@ install_backend:
backend:
make install_backend
poetry run uvicorn langflow.main:app --port 7860 --reload --log-level debug
poetry run uvicorn src.backend.langflow.main:app --port 7860 --reload --log-level debug
build_and_run:
echo 'Removing dist folder'
rm -rf dist
make build && poetry run pip install dist/*.tar.gz && poetry run langflow
build_and_install:
echo 'Removing dist folder'
rm -rf dist
make build && poetry run pip install dist/*.tar.gz
build_frontend:
cd src/frontend && CI='' npm run build
cp -r src/frontend/build src/backend/langflow/frontend

View file

@ -56,11 +56,11 @@ Alternatively, click the **"Open in Cloud Shell"** button below to launch Google
Langflow integrates with langchain-serve to provide a one-command deployment to Jina AI Cloud.
Start by installing `langchain-serve` with
Start by installing `langchain-serve` with
```bash
pip install -U langchain-serve
```
```
Then, run:
@ -115,24 +115,38 @@ You can use Langflow directly on your browser, or use the API endpoints on Jina
<summary>Show API usage (with python)</summary>
```python
import json
import requests
import requests
FLOW_PATH = "Time_traveller.json"
BASE_API_URL = "https://langflow-e3dd8820ec.wolf.jina.ai/api/v1/predict"
FLOW_ID = "864c4f98-2e59-468b-8e13-79cd8da07468"
# You can tweak the flow by adding a tweaks dictionary
# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}}
TWEAKS = {
"ChatOpenAI-g4jEr": {},
"ConversationChain-UidfJ": {}
}
# HOST = 'http://localhost:7860'
HOST = 'https://langflow-f1ed20e309.wolf.jina.ai'
API_URL = f'{HOST}/predict'
def run_flow(message: str, flow_id: str, tweaks: dict = None) -> dict:
"""
Run a flow with a given message and optional tweaks.
def predict(message):
with open(FLOW_PATH, "r") as f:
json_data = json.load(f)
payload = {'exported_flow': json_data, 'message': message}
response = requests.post(API_URL, json=payload)
return response.json()
:param message: The message to send to the flow
:param flow_id: The ID of the flow to run
:param tweaks: Optional tweaks to customize the flow
:return: The JSON response from the flow
"""
api_url = f"{BASE_API_URL}/{flow_id}"
payload = {"message": message}
predict('Take me to 1920s Bangalore')
if tweaks:
payload["tweaks"] = tweaks
response = requests.post(api_url, json=payload)
return response.json()
# Setup any tweaks you want to apply to the flow
print(run_flow("Your message", flow_id=FLOW_ID, tweaks=TWEAKS))
```
```json

24
poetry.lock generated
View file

@ -909,14 +909,14 @@ test-randomorder = ["pytest-randomly"]
[[package]]
name = "ctransformers"
version = "0.2.8"
version = "0.2.9"
description = "Python bindings for the Transformer models implemented in C/C++ using GGML library."
category = "main"
optional = false
python-versions = "*"
files = [
{file = "ctransformers-0.2.8-py3-none-any.whl", hash = "sha256:9804640364c13d93d58bfb6a9a1fa90d34b6438955d842c68ab05e5f8f15e023"},
{file = "ctransformers-0.2.8.tar.gz", hash = "sha256:81c0436d8b5315211496566294d51e7bbd07cf6e4305608262eab04603b74b65"},
{file = "ctransformers-0.2.9-py3-none-any.whl", hash = "sha256:ff0183ccf2bf157102cffacea13476cb78b8a2ffc2e1fdd46b57f8682a8da8ac"},
{file = "ctransformers-0.2.9.tar.gz", hash = "sha256:2165c512ee153f763c3d4ab133d666f86460010330d6bc75c0a6db6310ec9fc8"},
]
[package.dependencies]
@ -2444,14 +2444,14 @@ test = ["psutil", "pytest", "pytest-asyncio"]
[[package]]
name = "langchainplus-sdk"
version = "0.0.10"
version = "0.0.11"
description = "Client library to connect to the LangChainPlus LLM Tracing and Evaluation Platform."
category = "main"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "langchainplus_sdk-0.0.10-py3-none-any.whl", hash = "sha256:6ea4013a92a4c33a61d22deb49620577c592a79ee44038b2c751032a71cbc7b6"},
{file = "langchainplus_sdk-0.0.10.tar.gz", hash = "sha256:4f810b38df74a99d01e5723e653da02f05df3ee922971cccabc365d00c33dbf6"},
{file = "langchainplus_sdk-0.0.11-py3-none-any.whl", hash = "sha256:fbe3482ffe253e439ec8386a2904594a875b590e29e4adcbd938452a69a6c7c6"},
{file = "langchainplus_sdk-0.0.11.tar.gz", hash = "sha256:e50679309a31d9526f467aa13d4dbcfba0dc00a295cea72ffcc9972865ecac1b"},
]
[package.dependencies]
@ -4265,14 +4265,14 @@ files = [
[[package]]
name = "pyparsing"
version = "3.0.9"
version = "3.1.0"
description = "pyparsing module - Classes and methods to define and execute parsing grammars"
category = "main"
optional = false
python-versions = ">=3.6.8"
files = [
{file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
{file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
{file = "pyparsing-3.1.0-py3-none-any.whl", hash = "sha256:d554a96d1a7d3ddaf7183104485bc19fd80543ad6ac5bdb6426719d766fb06c1"},
{file = "pyparsing-3.1.0.tar.gz", hash = "sha256:edb662d6fe322d6e990b1594b5feaeadf806803359e3d4d42f11e295e588f0ea"},
]
[package.extras]
@ -5036,14 +5036,14 @@ files = [
[[package]]
name = "setuptools"
version = "67.8.0"
version = "68.0.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
category = "main"
optional = false
python-versions = ">=3.7"
files = [
{file = "setuptools-67.8.0-py3-none-any.whl", hash = "sha256:5df61bf30bb10c6f756eb19e7c9f3b473051f48db77fddbe06ff2ca307df9a6f"},
{file = "setuptools-67.8.0.tar.gz", hash = "sha256:62642358adc77ffa87233bc4d2354c4b2682d214048f500964dbe760ccedf102"},
{file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"},
{file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"},
]
[package.extras]

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "langflow"
version = "0.1.2"
version = "0.1.4"
description = "A Python package with a built-in web application"
authors = ["Logspace <contact@logspace.ai>"]
maintainers = [

View file

@ -152,6 +152,17 @@ def serve(
"timeout": timeout,
}
if platform.system() in ["Windows"]:
# Run using uvicorn on MacOS and Windows
# Windows doesn't support gunicorn
# MacOS requires an env variable to be set to use gunicorn
run_on_windows(host, port, log_level, options, app)
else:
# Run using gunicorn on Linux
run_on_mac_or_linux(host, port, log_level, options, app, open_browser)
def run_on_mac_or_linux(host, port, log_level, options, app, open_browser=True):
webapp_process = Process(
target=run_langflow, args=(host, port, log_level, options, app)
)
@ -169,6 +180,14 @@ def serve(
webbrowser.open(f"http://{host}:{port}")
def run_on_windows(host, port, log_level, options, app):
"""
Run the Langflow server on Windows.
"""
print_banner(host, port)
run_langflow(host, port, log_level, options, app)
def setup_static_files(app: FastAPI, static_files_dir: Path):
"""
Setup the static files directory.

View file

@ -11,6 +11,7 @@ from langflow.graph.vertex.types import (
from langflow.interface.tools.constants import FILE_TOOLS
from langflow.utils import payload
from langflow.utils.logger import logger
from langchain.chains.base import Chain
class Graph:
@ -99,7 +100,7 @@ class Graph:
]
return connected_nodes
def build(self) -> List[Vertex]:
def build(self) -> Chain:
"""Builds the graph."""
# Get root node
root_node = payload.get_root_node(self)

View file

@ -1,5 +1,6 @@
import contextlib
import io
from pathlib import Path
from langchain.schema import AgentAction
import json
from langflow.interface.run import (
@ -10,8 +11,7 @@ from langflow.interface.run import (
from langflow.utils.logger import logger
from langflow.graph import Graph
from typing import Any, Dict, List, Tuple
from typing import Any, Dict, List, Optional, Tuple, Union
def fix_memory_inputs(langchain_object):
@ -20,22 +20,23 @@ def fix_memory_inputs(langchain_object):
object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the
get_memory_key function and updates the memory keys using the update_memory_keys function.
"""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
try:
if langchain_object.memory.memory_key in langchain_object.input_variables:
return
except AttributeError:
input_variables = (
langchain_object.prompt.input_variables
if hasattr(langchain_object, "prompt")
else langchain_object.input_keys
)
if langchain_object.memory.memory_key in input_variables:
return
if not hasattr(langchain_object, "memory") or langchain_object.memory is None:
return
try:
if langchain_object.memory.memory_key in langchain_object.input_variables:
return
except AttributeError:
input_variables = (
langchain_object.prompt.input_variables
if hasattr(langchain_object, "prompt")
else langchain_object.input_keys
)
if langchain_object.memory.memory_key in input_variables:
return
possible_new_mem_key = get_memory_key(langchain_object)
if possible_new_mem_key is not None:
update_memory_keys(langchain_object, possible_new_mem_key)
possible_new_mem_key = get_memory_key(langchain_object)
if possible_new_mem_key is not None:
update_memory_keys(langchain_object, possible_new_mem_key)
def format_actions(actions: List[Tuple[AgentAction, str]]) -> str:
@ -131,57 +132,108 @@ def process_graph_cached(data_graph: Dict[str, Any], message: str):
return {"result": str(result), "thought": thought.strip()}
def load_flow_from_json(path: str, build=True):
"""Load flow from json file"""
# This is done to avoid circular imports
def load_flow_from_json(
input: Union[Path, str, dict], tweaks: Optional[dict] = None, build=True
):
"""
Load flow from a JSON file or a JSON object.
with open(path, "r", encoding="utf-8") as f:
flow_graph = json.load(f)
data_graph = flow_graph["data"]
nodes = data_graph["nodes"]
# Substitute ZeroShotPrompt with PromptTemplate
# nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
# Add input variables
# nodes = payload.extract_input_variables(nodes)
:param input: JSON file path or JSON object
:param tweaks: Optional tweaks to be processed
:param build: If True, build the graph, otherwise return the graph object
:return: Langchain object or Graph object depending on the build parameter
"""
# If input is a file path, load JSON from the file
if isinstance(input, (str, Path)):
with open(input, "r", encoding="utf-8") as f:
flow_graph = json.load(f)
# If input is a dictionary, assume it's a JSON object
elif isinstance(input, dict):
flow_graph = input
else:
raise TypeError(
"Input must be either a file path (str) or a JSON object (dict)"
)
# Nodes, edges and root node
edges = data_graph["edges"]
graph_data = flow_graph["data"]
if tweaks is not None:
graph_data = process_tweaks(graph_data, tweaks)
nodes = graph_data["nodes"]
edges = graph_data["edges"]
graph = Graph(nodes, edges)
if build:
langchain_object = graph.build()
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
return langchain_object
return graph
def process_tweaks(graph_data: Dict, tweaks: Dict):
"""This function is used to tweak the graph data using the node id and the tweaks dict"""
# the tweaks dict is a dict of dicts
# the key is the node id and the value is a dict of the tweaks
# the dict of tweaks contains the name of a certain parameter and the value to be tweaked
def validate_input(
graph_data: Dict[str, Any], tweaks: Dict[str, Dict[str, Any]]
) -> List[Dict[str, Any]]:
if not isinstance(graph_data, dict) or not isinstance(tweaks, dict):
raise ValueError("graph_data and tweaks should be dictionaries")
nodes = graph_data.get("data", {}).get("nodes") or graph_data.get("nodes")
if not isinstance(nodes, list):
raise ValueError(
"graph_data should contain a list of nodes under 'data' key or directly under 'nodes' key"
)
return nodes
def apply_tweaks(node: Dict[str, Any], node_tweaks: Dict[str, Any]) -> None:
template_data = node.get("data", {}).get("node", {}).get("template")
if not isinstance(template_data, dict):
logger.warning(
f"Template data for node {node.get('id')} should be a dictionary"
)
return
for tweak_name, tweak_value in node_tweaks.items():
if tweak_name and tweak_value and tweak_name in template_data:
template_data[tweak_name]["value"] = tweak_value
def process_tweaks(
graph_data: Dict[str, Any], tweaks: Dict[str, Dict[str, Any]]
) -> Dict[str, Any]:
"""
This function is used to tweak the graph data using the node id and the tweaks dict.
:param graph_data: The dictionary containing the graph data. It must contain a 'data' key with
'nodes' as its child or directly contain 'nodes' key. Each node should have an 'id' and 'data'.
:param tweaks: A dictionary where the key is the node id and the value is a dictionary of the tweaks.
The inner dictionary contains the name of a certain parameter as the key and the value to be tweaked.
:return: The modified graph_data dictionary.
:raises ValueError: If the input is not in the expected format.
"""
nodes = validate_input(graph_data, tweaks)
# We need to process the graph data to add the tweaks
if "data" not in graph_data and "nodes" in graph_data:
nodes = graph_data["nodes"]
else:
nodes = graph_data["data"]["nodes"]
for node in nodes:
node_id = node["id"]
if node_id in tweaks:
node_tweaks = tweaks[node_id]
template_data = node["data"]["node"]["template"]
for tweak_name, tweake_value in node_tweaks.items():
if tweak_name in template_data:
template_data[tweak_name]["value"] = tweake_value
print(
f"Something changed in node {node_id} with tweak {tweak_name} and value {tweake_value}"
)
if isinstance(node, dict) and isinstance(node.get("id"), str):
node_id = node["id"]
if node_tweaks := tweaks.get(node_id):
apply_tweaks(node, node_tweaks)
else:
logger.warning(
"Each node should be a dictionary with an 'id' key of type str"
)
return graph_data

View file

@ -1,6 +1,6 @@
import { BellIcon, Home, Users2 } from "lucide-react";
import { useContext } from "react";
import { FaGithub } from "react-icons/fa";
import { useContext, useEffect, useState } from "react";
import { FaDiscord, FaGithub, FaTwitter } from "react-icons/fa";
import { Button } from "../ui/button";
import { TabsContext } from "../../contexts/tabsContext";
import AlertDropdown from "../../alerts/alertDropDown";
@ -11,6 +11,7 @@ import { typesContext } from "../../contexts/typesContext";
import MenuBar from "./components/menuBar";
import { Link, useLocation, useParams } from "react-router-dom";
import { USER_PROJECTS_HEADER } from "../../constants";
import { getRepoStars } from "../../controllers/API";
export default function Header() {
const { flows, addFlow, tabId } = useContext(TabsContext);
@ -22,6 +23,16 @@ export default function Header() {
const { notificationCenter, setNotificationCenter, setErrorData } =
useContext(alertContext);
const location = useLocation();
const [stars, setStars] = useState(null);
useEffect(() => {
async function fetchStars() {
const starsCount = await getRepoStars("logspace-ai", "langflow");
setStars(starsCount);
}
fetchStars();
}, []);
return (
<div className="w-full h-12 flex justify-between items-center border-b bg-muted">
<div className="flex gap-2 justify-start items-center w-96">
@ -57,22 +68,35 @@ export default function Header() {
</Link>
</div>
<div className="flex justify-end px-2 w-96">
<div className="ml-auto mr-2 flex gap-5">
<Button
asChild
variant="outline"
className="text-gray-600 dark:text-gray-300 "
>
<div className="ml-auto mr-2 flex gap-5 items-center">
<a
href="https://github.com/logspace-ai/langflow"
target="_blank"
rel="noreferrer"
className="flex"
className="inline-flex items-center justify-center text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:opacity-50 disabled:pointer-events-none ring-offset-background text-gray-600 dark:text-gray-300 border border-input hover:bg-accent hover:text-accent-foreground h-9 px-3 pr-0 rounded-md"
>
<FaGithub className="h-5 w-5 mr-2" />
Join The Community
Star
<div className="ml-2 flex text-sm bg-background rounded-md rounded-l-none border px-2 h-9 -mr-px items-center justify-center">
{stars}
</div>
</a>
<a
href="https://twitter.com/logspace_ai"
target="_blank"
rel="noreferrer"
className="text-muted-foreground"
>
<FaTwitter className="h-5 w-5" />
</a>
<a
href="https://discord.gg/EqksyE2EX9"
target="_blank"
rel="noreferrer"
className="text-muted-foreground"
>
<FaDiscord className="h-5 w-5" />
</a>
</Button>
{/* <button
className="text-gray-600 hover:text-gray-500 dark:text-gray-300 dark:hover:text-gray-200"
onClick={() => {
@ -110,12 +134,6 @@ export default function Header() {
)}
<BellIcon className="h-5 w-5" aria-hidden="true" />
</button>
{/* <button>
<img
src="https://github.com/shadcn.png"
className="rounded-full w-8"
/>
</button> */}
</div>
</div>
</div>

View file

@ -121,11 +121,12 @@ export const getCurlCode = (flow: FlowType): string => {
*/
export const getPythonCode = (flow: FlowType): string => {
const flowName = flow.name;
const tweaks = buildTweaks(flow);
return `from langflow import load_flow_from_json
flow = load_flow_from_json("${flowName}.json")
# Now you can use it like any chain
flow("Hey, have you heard of LangFlow?")`;
TWEAKS = ${JSON.stringify(tweaks, null, 2)}
flow = load_flow_from_json("${flowName}.json", tweaks=TWEAKS)
# Now you can use it like any chain
flow("Hey, have you heard of LangFlow?")`;
};
/**

View file

@ -18,6 +18,18 @@ export async function getAll(): Promise<AxiosResponse<APIObjectType>> {
return await axios.get(`/api/v1/all`);
}
const GITHUB_API_URL = "https://api.github.com";
export async function getRepoStars(owner, repo) {
try {
const response = await axios.get(`${GITHUB_API_URL}/repos/${owner}/${repo}`);
return response.data.stargazers_count;
} catch (error) {
console.error("Error fetching repository data:", error);
return null;
}
}
/**
* Sends data to the API for prediction.
*

View file

@ -14,6 +14,15 @@ def test_load_flow_from_json():
assert isinstance(loaded, Chain)
def test_load_flow_from_json_with_tweaks():
"""Test loading a flow from a json file and applying tweaks"""
tweaks = {"dndnode_82": {"model_name": "test model"}}
loaded = load_flow_from_json(pytest.BASIC_EXAMPLE_PATH, tweaks=tweaks)
assert loaded is not None
assert isinstance(loaded, Chain)
assert loaded.llm.model_name == "test model"
def test_get_root_node():
with open(pytest.BASIC_EXAMPLE_PATH, "r") as f:
flow_graph = json.load(f)