fix: azure openai model component: resource not found (#2426)
* fix azure openai model component: resource not found * type * type * [autofix.ci] apply automated fixes --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
This commit is contained in:
parent
6ef7776004
commit
0494bc2122
9 changed files with 23 additions and 74 deletions
|
|
@ -1,5 +1,6 @@
|
|||
import json
|
||||
import warnings
|
||||
from abc import abstractmethod
|
||||
from typing import Optional, Union
|
||||
|
||||
from langchain_core.language_models.llms import LLM
|
||||
|
|
@ -164,3 +165,9 @@ class LCModelComponent(Component):
|
|||
if message := self._get_exception_message(e):
|
||||
raise ValueError(message) from e
|
||||
raise e
|
||||
|
||||
@abstractmethod
|
||||
def build_model(self) -> LanguageModel:
|
||||
"""
|
||||
Implement this method to build the model.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from pydantic.v1 import SecretStr
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageTextInput, Output, SecretStrInput
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
|
||||
|
||||
|
||||
class AnthropicModelComponent(LCModelComponent):
|
||||
|
|
@ -59,10 +59,6 @@ class AnthropicModelComponent(LCModelComponent):
|
|||
advanced=True,
|
||||
),
|
||||
]
|
||||
outputs = [
|
||||
Output(display_name="Text", name="text_output", method="text_response"),
|
||||
Output(display_name="Language Model", name="model_output", method="build_model"),
|
||||
]
|
||||
|
||||
def build_model(self) -> LanguageModel: # type: ignore[type-var]
|
||||
model = self.model
|
||||
|
|
|
|||
|
|
@ -1,10 +1,9 @@
|
|||
from langchain_openai import AzureChatOpenAI
|
||||
from pydantic.v1 import SecretStr
|
||||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput
|
||||
from langflow.inputs import MessageTextInput
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, SecretStrInput, StrInput
|
||||
|
||||
|
||||
class AzureChatOpenAIComponent(LCModelComponent):
|
||||
|
|
@ -14,16 +13,6 @@ class AzureChatOpenAIComponent(LCModelComponent):
|
|||
beta = False
|
||||
icon = "Azure"
|
||||
|
||||
AZURE_OPENAI_MODELS = [
|
||||
"gpt-35-turbo",
|
||||
"gpt-35-turbo-16k",
|
||||
"gpt-35-turbo-instruct",
|
||||
"gpt-4",
|
||||
"gpt-4-32k",
|
||||
"gpt-4o",
|
||||
"gpt-4-turbo",
|
||||
]
|
||||
|
||||
AZURE_OPENAI_API_VERSIONS = [
|
||||
"2023-03-15-preview",
|
||||
"2023-05-15",
|
||||
|
|
@ -37,26 +26,20 @@ class AzureChatOpenAIComponent(LCModelComponent):
|
|||
]
|
||||
|
||||
inputs = [
|
||||
DropdownInput(
|
||||
name="model",
|
||||
display_name="Model Name",
|
||||
options=AZURE_OPENAI_MODELS,
|
||||
value=AZURE_OPENAI_MODELS[0],
|
||||
),
|
||||
StrInput(
|
||||
MessageTextInput(
|
||||
name="azure_endpoint",
|
||||
display_name="Azure Endpoint",
|
||||
info="Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/`",
|
||||
required=True,
|
||||
),
|
||||
StrInput(name="azure_deployment", display_name="Deployment Name"),
|
||||
MessageTextInput(name="azure_deployment", display_name="Deployment Name", required=True),
|
||||
SecretStrInput(name="api_key", display_name="API Key"),
|
||||
DropdownInput(
|
||||
name="api_version",
|
||||
display_name="API Version",
|
||||
options=AZURE_OPENAI_API_VERSIONS,
|
||||
value=AZURE_OPENAI_API_VERSIONS[-1],
|
||||
advanced=True,
|
||||
),
|
||||
SecretStrInput(name="api_key", display_name="API Key", password=True),
|
||||
FloatInput(name="temperature", display_name="Temperature", value=0.7),
|
||||
IntInput(
|
||||
name="max_tokens",
|
||||
|
|
@ -73,13 +56,8 @@ class AzureChatOpenAIComponent(LCModelComponent):
|
|||
info="System message to pass to the model.",
|
||||
),
|
||||
]
|
||||
outputs = [
|
||||
Output(display_name="Text", name="text_output", method="text_response"),
|
||||
Output(display_name="Language Model", name="model_output", method="model_response"),
|
||||
]
|
||||
|
||||
def model_response(self) -> LanguageModel: # type: ignore[type-var]
|
||||
model = self.model
|
||||
def build_model(self) -> LanguageModel: # type: ignore[type-var]
|
||||
azure_endpoint = self.azure_endpoint
|
||||
azure_deployment = self.azure_deployment
|
||||
api_version = self.api_version
|
||||
|
|
@ -88,23 +66,17 @@ class AzureChatOpenAIComponent(LCModelComponent):
|
|||
max_tokens = self.max_tokens
|
||||
stream = self.stream
|
||||
|
||||
if api_key:
|
||||
secret_api_key = SecretStr(api_key)
|
||||
else:
|
||||
secret_api_key = None
|
||||
|
||||
try:
|
||||
output = AzureChatOpenAI(
|
||||
model=model,
|
||||
azure_endpoint=azure_endpoint,
|
||||
azure_deployment=azure_deployment,
|
||||
api_version=api_version,
|
||||
api_key=secret_api_key,
|
||||
api_key=api_key,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens or None,
|
||||
streaming=stream,
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError("Could not connect to AzureOpenAI API.") from e
|
||||
raise ValueError(f"Could not connect to AzureOpenAI API: {str(e)}") from e
|
||||
|
||||
return output # type: ignore
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from pydantic.v1 import SecretStr
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing.constants import LanguageModel
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, MessageTextInput, Output, SecretStrInput
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, MessageTextInput, SecretStrInput
|
||||
|
||||
|
||||
class QianfanChatEndpointComponent(LCModelComponent):
|
||||
|
|
@ -84,10 +84,6 @@ class QianfanChatEndpointComponent(LCModelComponent):
|
|||
advanced=True,
|
||||
),
|
||||
]
|
||||
outputs = [
|
||||
Output(display_name="Text", name="text_output", method="text_response"),
|
||||
Output(display_name="Language Model", name="model_output", method="build_model"),
|
||||
]
|
||||
|
||||
def build_model(self) -> LanguageModel: # type: ignore[type-var]
|
||||
model = self.model
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from pydantic.v1 import SecretStr
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import BoolInput, FloatInput, MessageInput, Output, SecretStrInput, StrInput
|
||||
from langflow.io import BoolInput, FloatInput, MessageInput, SecretStrInput, StrInput
|
||||
|
||||
|
||||
class CohereComponent(LCModelComponent):
|
||||
|
|
@ -32,10 +32,6 @@ class CohereComponent(LCModelComponent):
|
|||
advanced=True,
|
||||
),
|
||||
]
|
||||
outputs = [
|
||||
Output(display_name="Text", name="text_output", method="text_response"),
|
||||
Output(display_name="Language Model", name="model_output", method="build_model"),
|
||||
]
|
||||
|
||||
def build_model(self) -> LanguageModel | BaseChatModel:
|
||||
cohere_api_key = self.cohere_api_key
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput, Output, SecretStrInput, StrInput
|
||||
from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput, SecretStrInput, StrInput
|
||||
|
||||
|
||||
class HuggingFaceEndpointsComponent(LCModelComponent):
|
||||
|
|
@ -31,11 +31,6 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
|
|||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
Output(display_name="Text", name="text_output", method="text_response"),
|
||||
Output(display_name="Language Model", name="model_output", method="build_model"),
|
||||
]
|
||||
|
||||
def build_model(self) -> LanguageModel: # type: ignore[type-var]
|
||||
endpoint_url = self.endpoint_url
|
||||
task = self.task
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from pydantic.v1 import SecretStr
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, SecretStrInput, StrInput
|
||||
|
||||
|
||||
class MistralAIModelComponent(LCModelComponent):
|
||||
|
|
@ -65,11 +65,6 @@ class MistralAIModelComponent(LCModelComponent):
|
|||
BoolInput(name="safe_mode", display_name="Safe Mode", advanced=True),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
Output(display_name="Text", name="text_output", method="text_response"),
|
||||
Output(display_name="Language Model", name="model_output", method="build_model"),
|
||||
]
|
||||
|
||||
def build_model(self) -> LanguageModel: # type: ignore[type-var]
|
||||
mistral_api_key = self.mistral_api_key
|
||||
temperature = self.temperature
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from langchain_community.chat_models import ChatOllama
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, StrInput
|
||||
from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageInput, StrInput
|
||||
|
||||
|
||||
class ChatOllamaComponent(LCModelComponent):
|
||||
|
|
@ -218,10 +218,6 @@ class ChatOllamaComponent(LCModelComponent):
|
|||
advanced=True,
|
||||
),
|
||||
]
|
||||
outputs = [
|
||||
Output(display_name="Text", name="text_output", method="text_response"),
|
||||
Output(display_name="Language Model", name="model_output", method="build_model"),
|
||||
]
|
||||
|
||||
def build_model(self) -> LanguageModel: # type: ignore[type-var]
|
||||
# Mapping mirostat settings to their corresponding values
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from langchain_google_vertexai import ChatVertexAI
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import BoolInput, FileInput, FloatInput, IntInput, MessageInput, MultilineInput, Output, StrInput
|
||||
from langflow.io import BoolInput, FileInput, FloatInput, IntInput, MessageInput, MultilineInput, StrInput
|
||||
|
||||
|
||||
class ChatVertexAIComponent(LCModelComponent):
|
||||
|
|
@ -47,10 +47,6 @@ class ChatVertexAIComponent(LCModelComponent):
|
|||
advanced=True,
|
||||
),
|
||||
]
|
||||
outputs = [
|
||||
Output(display_name="Text", name="text_output", method="text_response"),
|
||||
Output(display_name="Language Model", name="model_output", method="build_model"),
|
||||
]
|
||||
|
||||
def build_model(self) -> LanguageModel: # type: ignore[type-var]
|
||||
credentials = self.credentials
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue