fix: update message reload and update langchain-core (#3054)
* refactor: update langchain-core to version 0.2.24 * refactor: convert inner messages to BaseMessage in load_lc_prompt method * refactor: update ChatPromptTemplate instantiation in message.py * refactor: update langflow-base dependency to use local path for development * [autofix.ci] apply automated fixes * refactor: update ChatPromptTemplate instantiation in message.py * refactor: add async_from_template_and_variables and sync from_template_and_variables * feat(tests): add unit test for Message schema serialization and prompt loading * refactor: update langchain-core dependency to version 0.2.24 * chore: new lock * mypy * chore: format pyproject * refactor: rename async_from_template_and_variables to from_template_and_variables in Message class * refactor: Rename async_from_template_and_variables to from_template_and_variables in Message class --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Nicolò Boschi <boschi1997@gmail.com>
This commit is contained in:
parent
3e6d3dc326
commit
4382e42d77
7 changed files with 565 additions and 590 deletions
1063
poetry.lock
generated
1063
poetry.lock
generated
File diff suppressed because it is too large
Load diff
|
|
@ -27,7 +27,7 @@ langflow = "langflow.__main__:main"
|
|||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<3.13"
|
||||
langflow-base = { path = "./src/backend/base", develop = true }
|
||||
langflow-base ={ path = "./src/backend/base", develop = true }
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
google-search-results = "^2.4.1"
|
||||
google-api-python-client = "^2.130.0"
|
||||
|
|
|
|||
|
|
@ -1014,7 +1014,7 @@ class Graph:
|
|||
else:
|
||||
self.run_manager.add_to_vertices_being_run(next_v_id)
|
||||
if cache and self.flow_id:
|
||||
set_cache_coro = partial(get_chat_service().set_cache, key=self.flow_id)
|
||||
set_cache_coro = partial(get_chat_service().set_cache, self.flow_id)
|
||||
await set_cache_coro(self, lock)
|
||||
return next_runnable_vertices
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import asyncio
|
||||
from datetime import datetime, timezone
|
||||
from typing import Annotated, Any, AsyncIterator, Iterator, List, Optional
|
||||
from uuid import UUID
|
||||
|
|
@ -15,10 +16,10 @@ from langflow.base.prompts.utils import dict_values_to_string
|
|||
from langflow.schema.data import Data
|
||||
from langflow.schema.image import Image, get_file_paths, is_image_file
|
||||
from langflow.utils.constants import (
|
||||
MESSAGE_SENDER_USER,
|
||||
MESSAGE_SENDER_NAME_USER,
|
||||
MESSAGE_SENDER_NAME_AI,
|
||||
MESSAGE_SENDER_AI,
|
||||
MESSAGE_SENDER_NAME_AI,
|
||||
MESSAGE_SENDER_NAME_USER,
|
||||
MESSAGE_SENDER_USER,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -171,20 +172,23 @@ class Message(Data):
|
|||
def load_lc_prompt(self):
|
||||
if "prompt" not in self:
|
||||
raise ValueError("Prompt is required.")
|
||||
loaded_prompt = load(self.prompt)
|
||||
# Rebuild HumanMessages if they are instance of BaseMessage
|
||||
if isinstance(loaded_prompt, ChatPromptTemplate):
|
||||
messages = []
|
||||
for message in loaded_prompt.messages:
|
||||
if isinstance(message, HumanMessage):
|
||||
# self.prompt was passed through jsonable_encoder
|
||||
# so inner messages are not BaseMessage
|
||||
# we need to convert them to BaseMessage
|
||||
messages = []
|
||||
for message in self.prompt.get("kwargs", {}).get("messages", []):
|
||||
match message:
|
||||
case HumanMessage():
|
||||
messages.append(message)
|
||||
elif message.type == "human":
|
||||
messages.append(HumanMessage(content=message.content))
|
||||
elif message.type == "system":
|
||||
messages.append(SystemMessage(content=message.content))
|
||||
elif message.type == "ai":
|
||||
messages.append(AIMessage(content=message.content))
|
||||
loaded_prompt.messages = messages
|
||||
case _ if message.get("type") == "human":
|
||||
messages.append(HumanMessage(content=message.get("content")))
|
||||
case _ if message.get("type") == "system":
|
||||
messages.append(SystemMessage(content=message.get("content")))
|
||||
case _ if message.get("type") == "ai":
|
||||
messages.append(AIMessage(content=message.get("content")))
|
||||
|
||||
self.prompt["kwargs"]["messages"] = messages
|
||||
loaded_prompt = load(self.prompt)
|
||||
return loaded_prompt
|
||||
|
||||
@classmethod
|
||||
|
|
@ -216,7 +220,17 @@ class Message(Data):
|
|||
if contents:
|
||||
message = HumanMessage(content=[{"type": "text", "text": text}] + contents)
|
||||
|
||||
prompt_template = ChatPromptTemplate.from_messages([message]) # type: ignore
|
||||
prompt_template = ChatPromptTemplate(messages=[message]) # type: ignore
|
||||
instance.prompt = jsonable_encoder(prompt_template.to_json())
|
||||
instance.messages = instance.prompt.get("kwargs", {}).get("messages", [])
|
||||
return instance
|
||||
|
||||
@classmethod
|
||||
def sync_from_template_and_variables(cls, template: str, **variables):
|
||||
# Run the async version in a sync way
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
return asyncio.run(cls.from_template_and_variables(template, **variables))
|
||||
else:
|
||||
return loop.run_until_complete(cls.from_template_and_variables(template, **variables))
|
||||
|
|
|
|||
8
src/backend/base/poetry.lock
generated
8
src/backend/base/poetry.lock
generated
|
|
@ -1314,13 +1314,13 @@ tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0"
|
|||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.23"
|
||||
version = "0.2.24"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8.1"
|
||||
files = [
|
||||
{file = "langchain_core-0.2.23-py3-none-any.whl", hash = "sha256:ef0b4184b37e356a27182514aedcc8c41ffacbd6348a801bc775c1ce1f608637"},
|
||||
{file = "langchain_core-0.2.23.tar.gz", hash = "sha256:ac8165f283d8f5214576ffc38387106ef0de7eb8d2c52576d06e8dd3285294b0"},
|
||||
{file = "langchain_core-0.2.24-py3-none-any.whl", hash = "sha256:9444fc082d21ef075d925590a684a73fe1f9688a3d90087580ec929751be55e7"},
|
||||
{file = "langchain_core-0.2.24.tar.gz", hash = "sha256:f2e3fa200b124e8c45d270da9bf836bed9c09532612c96ff3225e59b9a232f5a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -3597,4 +3597,4 @@ local = []
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.10,<3.13"
|
||||
content-hash = "ec75a891f635b67266af56a85c699abe72a73dad69a05a9d5750d8dce56b4bf8"
|
||||
content-hash = "824e91c3465ec9eb51bcd5f26297c45232f1fce0298fe2b5673fd5cbb2503734"
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ httpx = "*"
|
|||
uvicorn = "^0.30.0"
|
||||
gunicorn = "^22.0.0"
|
||||
langchain = "~0.2.0"
|
||||
langchain-core = "0.2.23"
|
||||
langchain-core = "^0.2.24"
|
||||
langchainhub = "~0.1.15"
|
||||
sqlmodel = "^0.0.18"
|
||||
loguru = "^0.7.1"
|
||||
|
|
|
|||
30
src/backend/tests/unit/schema/test_schema_message.py
Normal file
30
src/backend/tests/unit/schema/test_schema_message.py
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
import pytest
|
||||
from langchain_core.prompts.chat import ChatPromptTemplate
|
||||
|
||||
from langflow.schema.message import Message
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client():
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_message_async_prompt_serialization():
|
||||
template = "Hello, {name}!"
|
||||
message = await Message.from_template_and_variables(template, name="Langflow")
|
||||
assert message.text == "Hello, Langflow!"
|
||||
|
||||
prompt = message.load_lc_prompt()
|
||||
assert isinstance(prompt, ChatPromptTemplate)
|
||||
assert prompt.messages[0].content == "Hello, Langflow!"
|
||||
|
||||
|
||||
def test_message_prompt_serialization():
|
||||
template = "Hello, {name}!"
|
||||
message = Message.sync_from_template_and_variables(template, name="Langflow")
|
||||
assert message.text == "Hello, Langflow!"
|
||||
|
||||
prompt = message.load_lc_prompt()
|
||||
assert isinstance(prompt, ChatPromptTemplate)
|
||||
assert prompt.messages[0].content == "Hello, Langflow!"
|
||||
Loading…
Add table
Add a link
Reference in a new issue