AntropicModel: add prefill field for structured outputs, AmazonBedrock: New component format

This commit is contained in:
namastex888 2024-06-14 22:18:59 +00:00
commit d9eb3decf1
2 changed files with 151 additions and 157 deletions

View file

@ -1,88 +1,78 @@
from typing import Optional
from langchain_community.chat_models.bedrock import BedrockChat
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
from langflow.field_typing import BaseLanguageModel, Text
from langflow.inputs import BoolInput, DictInput, DropdownInput, StrInput
from langflow.template import Output
class AmazonBedrockComponent(LCModelComponent):
display_name: str = "Amazon Bedrock"
description: str = "Generate text using Amazon Bedrock LLMs."
icon = "Amazon"
field_order = [
"model_id",
"credentials_profile_name",
"region_name",
"model_kwargs",
"endpoint_url",
"cache",
"stream",
"input_value",
"system_message",
inputs = [
StrInput(name="input_value", display_name="Input", input_types=["Text", "Data", "Prompt"]),
DropdownInput(
name="model_id",
display_name="Model Id",
options=[
"amazon.titan-text-express-v1",
"amazon.titan-text-lite-v1",
"amazon.titan-embed-text-v1",
"amazon.titan-embed-image-v1",
"amazon.titan-image-generator-v1",
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-instant-v1",
"ai21.j2-mid-v1",
"ai21.j2-ultra-v1",
"cohere.command-text-v14",
"cohere.command-light-text-v14",
"cohere.embed-english-v3",
"cohere.embed-multilingual-v3",
"meta.llama2-13b-chat-v1",
"meta.llama2-70b-chat-v1",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mixtral-8x7b-instruct-v0:1",
],
value="anthropic.claude-instant-v1",
),
StrInput(name="credentials_profile_name", display_name="Credentials Profile Name"),
StrInput(name="region_name", display_name="Region Name"),
DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True),
StrInput(name="endpoint_url", display_name="Endpoint URL"),
BoolInput(name="cache", display_name="Cache"),
StrInput(
name="system_message",
display_name="System Message",
info="System message to pass to the model.",
advanced=True,
),
BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True),
]
outputs = [
Output(display_name="Text", name="text_output", method="text_response"),
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def build_config(self):
return {
"model_id": {
"display_name": "Model Id",
"options": [
"amazon.titan-text-express-v1",
"amazon.titan-text-lite-v1",
"amazon.titan-embed-text-v1",
"amazon.titan-embed-image-v1",
"amazon.titan-image-generator-v1",
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-instant-v1",
"ai21.j2-mid-v1",
"ai21.j2-ultra-v1",
"cohere.command-text-v14",
"cohere.command-light-text-v14",
"cohere.embed-english-v3",
"cohere.embed-multilingual-v3",
"meta.llama2-13b-chat-v1",
"meta.llama2-70b-chat-v1",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mixtral-8x7b-instruct-v0:1",
],
},
"credentials_profile_name": {"display_name": "Credentials Profile Name"},
"endpoint_url": {"display_name": "Endpoint URL"},
"region_name": {"display_name": "Region Name"},
"model_kwargs": {
"display_name": "Model Kwargs",
"advanced": True,
},
"cache": {"display_name": "Cache"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Data", "Prompt"]},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",
"advanced": True,
},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
"advanced": True,
},
}
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = self.get_chat_result(output, stream, input_value, system_message)
self.status = result
return result
def build(
self,
input_value: Text,
system_message: Optional[str] = None,
model_id: str = "anthropic.claude-instant-v1",
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
model_kwargs: Optional[dict] = None,
endpoint_url: Optional[str] = None,
cache: Optional[bool] = None,
stream: bool = False,
) -> Text:
def build_model(self) -> BaseLanguageModel:
model_id = self.model_id
credentials_profile_name = self.credentials_profile_name
region_name = self.region_name
model_kwargs = self.model_kwargs
endpoint_url = self.endpoint_url
cache = self.cache
stream = self.stream
try:
output = BedrockChat(
credentials_profile_name=credentials_profile_name,
@ -92,8 +82,8 @@ class AmazonBedrockComponent(LCModelComponent):
endpoint_url=endpoint_url,
streaming=stream,
cache=cache,
) # type: ignore
)
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e
return self.get_chat_result(output, stream, input_value, system_message)
return output

View file

@ -5,95 +5,98 @@ from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
from langflow.field_typing import BaseLanguageModel, Text
from langflow.inputs import BoolInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput
from langflow.template import Output
class AnthropicLLM(LCModelComponent):
display_name: str = "Anthropic"
description: str = "Generate text using Anthropic Chat&Completion LLMs."
class AnthropicModelComponent(LCModelComponent):
display_name = "Anthropic"
description = "Generate text using Anthropic Chat&Completion LLMs with prefill support."
icon = "Anthropic"
field_order = [
"model",
"anthropic_api_key",
"max_tokens",
"temperature",
"anthropic_api_url",
"input_value",
"system_message",
"stream",
inputs = [
StrInput(
name="input_value",
display_name="Input",
input_types=["Text", "Data", "Prompt", "Message"]),
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
),
DropdownInput(
name="model",
display_name="Model Name",
options=[
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
"claude-2.1",
"claude-2.0",
"claude-instant-1.2",
"claude-instant-1",
],
info="https://python.langchain.com/docs/integrations/chat/anthropic",
value="claude-3-opus-20240229",
),
SecretStrInput(
name="anthropic_api_key",
display_name="Anthropic API Key",
info="Your Anthropic API key.",
),
FloatInput(name="temperature", display_name="Temperature", value=0.1),
StrInput(
name="anthropic_api_url",
display_name="Anthropic API URL",
advanced=True,
info="Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
),
BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True),
StrInput(
name="system_message",
display_name="System Message",
info="System message to pass to the model.",
advanced=True,
),
StrInput(
name="prefill",
display_name="Prefill",
info="Prefill text to guide the model's response.",
advanced=True,
),
]
outputs = [
Output(display_name="Text", name="text_output", method="text_response"),
Output(display_name="Language Model", name="model_output", method="build_model"),
]
def build_config(self):
return {
"model": {
"display_name": "Model Name",
"options": [
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
"claude-2.1",
"claude-2.0",
"claude-instant-1.2",
"claude-instant-1",
],
"info": "https://python.langchain.com/docs/integrations/chat/anthropic",
"required": True,
"value": "claude-3-opus-20240229",
},
"anthropic_api_key": {
"display_name": "Anthropic API Key",
"required": True,
"password": True,
"info": "Your Anthropic API key.",
},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"temperature": {
"display_name": "Temperature",
"field_type": "float",
"value": 0.1,
},
"anthropic_api_url": {
"display_name": "Anthropic API URL",
"advanced": True,
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
"input_value": {"display_name": "Input", "input_types": ["Text", "Data", "Prompt"]},
"stream": {
"display_name": "Stream",
"advanced": True,
"info": STREAM_INFO_TEXT,
},
"system_message": {
"display_name": "System Message",
"advanced": True,
"info": "System message to pass to the model.",
},
}
def text_response(self) -> Text:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
prefill = self.prefill
output = self.build_model()
messages = [
("system", system_message),
("human", input_value),
("assistant", prefill),
]
result = output.invoke(messages)
self.status = prefill + result.content
return prefill + result.content
def build(
self,
model: str,
input_value: Text,
system_message: Optional[str] = None,
anthropic_api_key: Optional[str] = None,
max_tokens: Optional[int] = 1000,
temperature: Optional[float] = None,
anthropic_api_url: Optional[str] = None,
stream: bool = False,
) -> Text:
# Set default API endpoint if not provided
if not anthropic_api_url:
anthropic_api_url = "https://api.anthropic.com"
def build_model(self) -> BaseLanguageModel:
model = self.model
anthropic_api_key = self.anthropic_api_key
max_tokens = self.max_tokens
temperature = self.temperature
anthropic_api_url = self.anthropic_api_url or "https://api.anthropic.com"
try:
output = ChatAnthropic(
model_name=model,
model=model,
anthropic_api_key=(SecretStr(anthropic_api_key) if anthropic_api_key else None),
max_tokens_to_sample=max_tokens, # type: ignore
temperature=temperature,
@ -102,4 +105,5 @@ class AnthropicLLM(LCModelComponent):
except Exception as e:
raise ValueError("Could not connect to Anthropic API.") from e
return self.get_chat_result(output, stream, input_value, system_message)
return output