fix: llm components

This commit is contained in:
namastex888 2024-06-14 23:34:39 +00:00
commit aff0784280
4 changed files with 38 additions and 26 deletions

View file

@ -9,6 +9,7 @@ class AmazonBedrockComponent(LCModelComponent):
display_name: str = "Amazon Bedrock"
description: str = "Generate text using Amazon Bedrock LLMs."
icon = "Amazon"
inputs = [
StrInput(name="input_value", display_name="Input", input_types=["Text", "Data", "Prompt"]),
DropdownInput(
@ -17,24 +18,35 @@ class AmazonBedrockComponent(LCModelComponent):
options=[
"amazon.titan-text-express-v1",
"amazon.titan-text-lite-v1",
"amazon.titan-text-premier-v1:0",
"amazon.titan-embed-text-v1",
"amazon.titan-embed-text-v2:0",
"amazon.titan-embed-image-v1",
"amazon.titan-image-generator-v1",
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-opus-20240229-v1:0",
"anthropic.claude-instant-v1",
"ai21.j2-mid-v1",
"ai21.j2-ultra-v1",
"cohere.command-text-v14",
"cohere.command-light-text-v14",
"cohere.command-r-v1:0",
"cohere.command-r-plus-v1:0",
"cohere.embed-english-v3",
"cohere.embed-multilingual-v3",
"meta.llama2-13b-chat-v1",
"meta.llama2-70b-chat-v1",
"meta.llama3-8b-instruct-v1:0",
"meta.llama3-70b-instruct-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-small-2402-v1:0",
"stability.stable-diffusion-xl-v0",
"stability.stable-diffusion-xl-v1",
],
value="anthropic.claude-instant-v1",
),
@ -83,7 +95,6 @@ class AmazonBedrockComponent(LCModelComponent):
streaming=stream,
cache=cache,
)
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e
return output

View file

@ -81,10 +81,11 @@ class AnthropicModelComponent(LCModelComponent):
messages = [
("system", system_message),
("human", input_value),
("assistant", prefill),
]
if prefill:
messages.append(("assistant", prefill))
result = output.invoke(messages)
self.status = prefill + result.content
self.status = result.content
return prefill + result.content
def build_model(self) -> BaseLanguageModel:

View file

@ -1,13 +1,11 @@
from typing import Optional
from langchain_openai import AzureChatOpenAI
from pydantic.v1 import SecretStr
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import BaseLanguageModel, Text
from langflow.template import Input, Output
from langflow.inputs import BoolInput, DropdownInput, FloatInput, IntInput, StrInput
from langflow.template import Output
class AzureChatOpenAIComponent(LCModelComponent):
display_name: str = "Azure OpenAI"
@ -22,7 +20,8 @@ class AzureChatOpenAIComponent(LCModelComponent):
"gpt-35-turbo-instruct",
"gpt-4",
"gpt-4-32k",
"gpt-4-vision",
"gpt-4o",
"gpt-4-turbo",
]
AZURE_OPENAI_API_VERSIONS = [
@ -33,41 +32,42 @@ class AzureChatOpenAIComponent(LCModelComponent):
"2023-08-01-preview",
"2023-09-01-preview",
"2023-12-01-preview",
"2024-04-09",
"2024-05-13",
]
inputs = [
Input(
name="model", type=str, display_name="Model Name", options=AZURE_OPENAI_MODELS, value=AZURE_OPENAI_MODELS[0]
DropdownInput(
name="model",
display_name="Model Name",
options=AZURE_OPENAI_MODELS,
value=AZURE_OPENAI_MODELS[0],
),
Input(
StrInput(
name="azure_endpoint",
type=str,
display_name="Azure Endpoint",
info="Your Azure endpoint, including the resource.. Example: `https://example-resource.azure.openai.com/`",
info="Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/`",
),
Input(name="azure_deployment", type=str, display_name="Deployment Name"),
Input(
StrInput(name="azure_deployment", display_name="Deployment Name"),
DropdownInput(
name="api_version",
type=str,
display_name="API Version",
options=AZURE_OPENAI_API_VERSIONS,
value=AZURE_OPENAI_API_VERSIONS[-1],
advanced=True,
),
Input(name="api_key", type=str, display_name="API Key", password=True),
Input(name="temperature", type=float, display_name="Temperature", default=0.7),
Input(
StrInput(name="api_key", display_name="API Key", password=True),
FloatInput(name="temperature", display_name="Temperature", value=0.7),
IntInput(
name="max_tokens",
type=Optional[int],
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
),
Input(name="input_value", type=str, display_name="Input", input_types=["Text", "Data", "Prompt"]),
Input(name="stream", type=bool, display_name="Stream", info=STREAM_INFO_TEXT, advanced=True),
Input(
StrInput(name="input_value", display_name="Input", input_types=["Text", "Data", "Prompt"]),
BoolInput(name="stream", display_name="Stream", info=STREAM_INFO_TEXT, advanced=True),
StrInput(
name="system_message",
type=Optional[str],
display_name="System Message",
advanced=True,
info="System message to pass to the model.",
@ -116,4 +116,4 @@ class AzureChatOpenAIComponent(LCModelComponent):
except Exception as e:
raise ValueError("Could not connect to AzureOpenAI API.") from e
return output
return output

View file

@ -91,7 +91,7 @@ class OpenAIModelComponent(LCModelComponent):
api_key = None
response_format = None
if json_mode:
response_format = {"type": "json_object"}
response_format = {"type": "json_object"}
output = ChatOpenAI(
max_tokens=max_tokens or None,
model_kwargs=model_kwargs or {},