Refactor utils.py and message.py modules

This commit refactors the `utils.py` module in the `base/prompts` directory and the `message.py` module in the `schema` directory. The changes include:
- Importing the `Message` class from `langflow.schema.message` in `utils.py`
- Importing various modules and classes from `langchain_core` in `message.py`
- Adding a new method `load_lc_prompt` to the `Message` class in `message.py`
- Adding a new class method `from_lc_prompt` to the `Message` class in `message.py`
- Adding a new method `format_text` to the `Message` class in `message.py`
- Adding a new class method `from_template_and_variables` to the `Message` class in `message.py`

These changes improve the organization and functionality of the code in the mentioned modules.
This commit is contained in:
ogabrielluiz 2024-06-17 10:08:59 -03:00
commit d1ab180040
2 changed files with 39 additions and 1 deletions

View file

@ -3,7 +3,6 @@ from copy import deepcopy
from langchain_core.documents import Document
from langflow.schema import Data
from langflow.schema.message import Message
def data_to_string(record: Data) -> str:
@ -29,6 +28,8 @@ def dict_values_to_string(d: dict) -> dict:
Returns:
dict: The dictionary with values converted to strings.
"""
from langflow.schema.message import Message
# Do something similar to the above
d_copy = deepcopy(d)
for key, value in d_copy.items():

View file

@ -1,11 +1,14 @@
from datetime import datetime, timezone
from typing import Annotated, Any, AsyncIterator, Iterator, Optional
from langchain_core.load import load
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.prompt_values import ImagePromptValue
from langchain_core.prompts import BaseChatPromptTemplate, ChatPromptTemplate, PromptTemplate
from langchain_core.prompts.image import ImagePromptTemplate
from pydantic import BeforeValidator, ConfigDict, Field, field_serializer
from langflow.base.prompts.utils import dict_values_to_string
from langflow.schema.data import Data
from langflow.schema.image import Image, get_file_paths, is_image_file
@ -110,3 +113,37 @@ class Message(Data):
image_prompt_value: ImagePromptValue = image_template.invoke(input={"path": file})
content_dicts.append({"type": "image_url", "image_url": image_prompt_value.image_url})
return content_dicts
def load_lc_prompt(self):
if "prompt" not in self:
raise ValueError("Prompt is required.")
return load(self.prompt)
@classmethod
def from_lc_prompt(
cls,
prompt: BaseChatPromptTemplate,
):
prompt_json = prompt.to_json()
return cls(prompt=prompt_json)
def format_text(self):
prompt_template = PromptTemplate.from_template(self.template)
variables_with_str_values = dict_values_to_string(self.variables)
formatted_prompt = prompt_template.format(**variables_with_str_values)
self.text = formatted_prompt
return formatted_prompt
@classmethod
async def from_template_and_variables(cls, template: str, variables: dict):
instance = cls(template=template, variables=variables)
contents = [{"type": "text", "text": instance.format_text()}]
# Get all Message instances from the kwargs
for value in variables.values():
if isinstance(value, cls):
content_dicts = await value.get_file_content_dicts()
contents.extend(content_dicts)
prompt_template = ChatPromptTemplate.from_messages([HumanMessage(content=contents)])
instance.messages = prompt_template.messages
instance.prompt = prompt_template.to_json()
return instance