feat: Add function to validate models with tool calling function and related fixes in agent component (#5720)
* Update nvidia.py * update agent experience with improving model selection update agent experience with improving model selection and making only the tool calling models available. * variable clean up * [autofix.ci] apply automated fixes * Update src/backend/base/langflow/base/models/model_input_constants.py Co-authored-by: Gabriel Luiz Freitas Almeida <gabriel@langflow.org> * Update src/backend/base/langflow/base/models/model_input_constants.py Co-authored-by: Gabriel Luiz Freitas Almeida <gabriel@langflow.org> * added default models * [autofix.ci] apply automated fixes * [autofix.ci] apply automated fixes (attempt 2/3) * format errors solved * [autofix.ci] apply automated fixes * Update model.py --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Gabriel Luiz Freitas Almeida <gabriel@langflow.org>
This commit is contained in:
parent
39ef9ba1f9
commit
778b74dfa8
14 changed files with 270 additions and 106 deletions
|
|
@ -3,6 +3,7 @@ import json
|
|||
import warnings
|
||||
from abc import abstractmethod
|
||||
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
from langchain_core.language_models.llms import LLM
|
||||
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
|
||||
from langchain_core.output_parsers import BaseOutputParser
|
||||
|
|
@ -43,6 +44,20 @@ class LCModelComponent(Component):
|
|||
def _get_exception_message(self, e: Exception):
|
||||
return str(e)
|
||||
|
||||
def supports_tool_calling(self, model: LanguageModel) -> bool:
|
||||
try:
|
||||
# Check if the bind_tools method is the same as the base class's method
|
||||
if model.bind_tools is BaseChatModel.bind_tools:
|
||||
return False
|
||||
|
||||
def test_tool(x: int) -> int:
|
||||
return x
|
||||
|
||||
model_with_tool = model.bind_tools([test_tool])
|
||||
return hasattr(model_with_tool, "tools") and len(model_with_tool.tools) > 0
|
||||
except (AttributeError, TypeError, ValueError):
|
||||
return False
|
||||
|
||||
def _validate_outputs(self) -> None:
|
||||
# At least these two outputs must be defined
|
||||
required_output_methods = ["text_response", "build_model"]
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ def process_inputs(component_data):
|
|||
if isinstance(component_data, SecretStrInput):
|
||||
component_data.value = ""
|
||||
component_data.load_from_db = False
|
||||
elif component_data.name == "temperature":
|
||||
elif component_data.name in {"temperature", "tool_model_enabled", "base_url"}:
|
||||
component_data = set_advanced_true(component_data)
|
||||
return component_data
|
||||
|
||||
|
|
@ -180,3 +180,11 @@ except ImportError:
|
|||
|
||||
MODEL_PROVIDERS = list(MODEL_PROVIDERS_DICT.keys())
|
||||
ALL_PROVIDER_FIELDS: list[str] = [field for provider in MODEL_PROVIDERS_DICT.values() for field in provider["fields"]]
|
||||
|
||||
MODEL_DYNAMIC_UPDATE_FIELDS = [
|
||||
"api_key",
|
||||
"model",
|
||||
"tool_model_enabled",
|
||||
"base_url",
|
||||
"model_name",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ from langchain_core.tools import StructuredTool
|
|||
from langflow.base.agents.agent import LCToolsAgentComponent
|
||||
from langflow.base.models.model_input_constants import (
|
||||
ALL_PROVIDER_FIELDS,
|
||||
MODEL_DYNAMIC_UPDATE_FIELDS,
|
||||
MODEL_PROVIDERS_DICT,
|
||||
)
|
||||
from langflow.base.models.model_utils import get_model_name
|
||||
|
|
@ -144,6 +145,16 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
model_kwargs = {input_.name: getattr(self, f"{prefix}{input_.name}") for input_ in inputs}
|
||||
return component.set(**model_kwargs).build_model()
|
||||
|
||||
def set_component_params(self, component):
|
||||
provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)
|
||||
if provider_info:
|
||||
inputs = provider_info.get("inputs")
|
||||
prefix = provider_info.get("prefix")
|
||||
model_kwargs = {input_.name: getattr(self, f"{prefix}{input_.name}") for input_ in inputs}
|
||||
|
||||
return component.set(**model_kwargs)
|
||||
return component
|
||||
|
||||
def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:
|
||||
"""Delete specified fields from build_config."""
|
||||
for field in fields:
|
||||
|
|
@ -164,7 +175,7 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
) -> dotdict:
|
||||
# Iterate over all providers in the MODEL_PROVIDERS_DICT
|
||||
# Existing logic for updating build_config
|
||||
if field_name == "agent_llm":
|
||||
if field_name in ("agent_llm",):
|
||||
provider_info = MODEL_PROVIDERS_DICT.get(field_value)
|
||||
if provider_info:
|
||||
component_class = provider_info.get("component_class")
|
||||
|
|
@ -233,10 +244,15 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
if missing_keys:
|
||||
msg = f"Missing required keys in build_config: {missing_keys}"
|
||||
raise ValueError(msg)
|
||||
if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:
|
||||
if (
|
||||
isinstance(self.agent_llm, str)
|
||||
and self.agent_llm in MODEL_PROVIDERS_DICT
|
||||
and field_name in MODEL_DYNAMIC_UPDATE_FIELDS
|
||||
):
|
||||
provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)
|
||||
if provider_info:
|
||||
component_class = provider_info.get("component_class")
|
||||
component_class = self.set_component_params(component_class)
|
||||
prefix = provider_info.get("prefix")
|
||||
if component_class and hasattr(component_class, "update_build_config"):
|
||||
# Call each component class's update_build_config method
|
||||
|
|
@ -246,5 +262,4 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
build_config = await update_component_build_config(
|
||||
component_class, build_config, field_value, field_name
|
||||
)
|
||||
|
||||
return build_config
|
||||
|
|
|
|||
|
|
@ -1,9 +1,14 @@
|
|||
from pydantic.v1 import SecretStr
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
from loguru import logger
|
||||
|
||||
from langflow.base.models.anthropic_constants import ANTHROPIC_MODELS
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
|
||||
from langflow.field_typing.range_spec import RangeSpec
|
||||
from langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput
|
||||
from langflow.schema.dotdict import dotdict
|
||||
|
||||
|
||||
class AnthropicModelComponent(LCModelComponent):
|
||||
|
|
@ -22,19 +27,42 @@ class AnthropicModelComponent(LCModelComponent):
|
|||
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
|
||||
),
|
||||
DropdownInput(
|
||||
name="model",
|
||||
name="model_name",
|
||||
display_name="Model Name",
|
||||
options=ANTHROPIC_MODELS,
|
||||
info="https://python.langchain.com/docs/integrations/chat/anthropic",
|
||||
value="claude-3-5-sonnet-latest",
|
||||
options=[],
|
||||
refresh_button=True,
|
||||
real_time_refresh=True,
|
||||
),
|
||||
SecretStrInput(
|
||||
name="api_key",
|
||||
display_name="Anthropic API Key",
|
||||
info="Your Anthropic API key.",
|
||||
value=None,
|
||||
real_time_refresh=True,
|
||||
),
|
||||
SliderInput(
|
||||
name="temperature",
|
||||
display_name="Temperature",
|
||||
value=0.1,
|
||||
info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
|
||||
range_spec=RangeSpec(min=0, max=1, step=0.01),
|
||||
),
|
||||
SecretStrInput(name="anthropic_api_key", display_name="Anthropic API Key", info="Your Anthropic API key."),
|
||||
FloatInput(name="temperature", display_name="Temperature", value=0.1),
|
||||
MessageTextInput(
|
||||
name="anthropic_api_url",
|
||||
name="base_url",
|
||||
display_name="Anthropic API URL",
|
||||
advanced=True,
|
||||
info="Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
|
||||
value="https://api.anthropic.com",
|
||||
real_time_refresh=True,
|
||||
),
|
||||
BoolInput(
|
||||
name="tool_model_enabled",
|
||||
display_name="Enable Tool Models",
|
||||
info=(
|
||||
"Select if you want to use models that can work with tools. If yes, only those models will be shown."
|
||||
),
|
||||
advanced=False,
|
||||
value=True,
|
||||
real_time_refresh=True,
|
||||
),
|
||||
MessageTextInput(
|
||||
name="prefill", display_name="Prefill", info="Prefill text to guide the model's response.", advanced=True
|
||||
|
|
@ -47,19 +75,13 @@ class AnthropicModelComponent(LCModelComponent):
|
|||
except ImportError as e:
|
||||
msg = "langchain_anthropic is not installed. Please install it with `pip install langchain_anthropic`."
|
||||
raise ImportError(msg) from e
|
||||
model = self.model
|
||||
anthropic_api_key = self.anthropic_api_key
|
||||
max_tokens = self.max_tokens
|
||||
temperature = self.temperature
|
||||
anthropic_api_url = self.anthropic_api_url or "https://api.anthropic.com"
|
||||
|
||||
try:
|
||||
output = ChatAnthropic(
|
||||
model=model,
|
||||
anthropic_api_key=(SecretStr(anthropic_api_key).get_secret_value() if anthropic_api_key else None),
|
||||
max_tokens_to_sample=max_tokens,
|
||||
temperature=temperature,
|
||||
anthropic_api_url=anthropic_api_url,
|
||||
model=self.model_name,
|
||||
anthropic_api_key=self.api_key,
|
||||
max_tokens_to_sample=self.max_tokens,
|
||||
temperature=self.temperature,
|
||||
anthropic_api_url=self.base_url,
|
||||
streaming=self.stream,
|
||||
)
|
||||
except Exception as e:
|
||||
|
|
@ -68,6 +90,32 @@ class AnthropicModelComponent(LCModelComponent):
|
|||
|
||||
return output
|
||||
|
||||
def get_models(self, tool_model_enabled: bool | None = None) -> list[str]:
|
||||
try:
|
||||
import anthropic
|
||||
|
||||
client = anthropic.Anthropic(api_key=self.api_key)
|
||||
models = client.models.list(limit=20).data
|
||||
model_ids = [model.id for model in models]
|
||||
except (ImportError, ValueError, requests.exceptions.RequestException) as e:
|
||||
logger.exception(f"Error getting model names: {e}")
|
||||
model_ids = ANTHROPIC_MODELS
|
||||
if tool_model_enabled:
|
||||
try:
|
||||
from langchain_anthropic.chat_models import ChatAnthropic
|
||||
except ImportError as e:
|
||||
msg = "langchain_anthropic is not installed. Please install it with `pip install langchain_anthropic`."
|
||||
raise ImportError(msg) from e
|
||||
for model in model_ids:
|
||||
model_with_tool = ChatAnthropic(
|
||||
model=self.model_name,
|
||||
anthropic_api_key=self.api_key,
|
||||
anthropic_api_url=self.base_url,
|
||||
)
|
||||
if not self.supports_tool_calling(model_with_tool):
|
||||
model_ids.remove(model)
|
||||
return model_ids
|
||||
|
||||
def _get_exception_message(self, exception: Exception) -> str | None:
|
||||
"""Get a message from an Anthropic exception.
|
||||
|
||||
|
|
@ -86,3 +134,19 @@ class AnthropicModelComponent(LCModelComponent):
|
|||
if message:
|
||||
return message
|
||||
return None
|
||||
|
||||
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
|
||||
if field_name in ("base_url", "model_name", "tool_model_enabled", "api_key") and field_value:
|
||||
try:
|
||||
if len(self.api_key) != 0:
|
||||
try:
|
||||
ids = self.get_models(tool_model_enabled=self.tool_model_enabled)
|
||||
except (ImportError, ValueError, requests.exceptions.RequestException) as e:
|
||||
logger.exception(f"Error getting model names: {e}")
|
||||
ids = ANTHROPIC_MODELS
|
||||
build_config["model_name"]["options"] = ids
|
||||
build_config["model_name"]["value"] = ids[0]
|
||||
except Exception as e:
|
||||
msg = f"Error getting model names: {e}"
|
||||
raise ValueError(msg) from e
|
||||
return build_config
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
import requests
|
||||
from loguru import logger
|
||||
from pydantic.v1 import SecretStr
|
||||
from typing_extensions import override
|
||||
|
||||
from langflow.base.models.groq_constants import GROQ_MODELS
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
|
||||
from langflow.field_typing.range_spec import RangeSpec
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput, SliderInput
|
||||
|
||||
|
||||
class GroqModel(LCModelComponent):
|
||||
|
|
@ -16,13 +17,16 @@ class GroqModel(LCModelComponent):
|
|||
|
||||
inputs = [
|
||||
*LCModelComponent._base_inputs,
|
||||
SecretStrInput(name="groq_api_key", display_name="Groq API Key", info="API key for the Groq API."),
|
||||
SecretStrInput(
|
||||
name="api_key", display_name="Groq API Key", info="API key for the Groq API.", real_time_refresh=True
|
||||
),
|
||||
MessageTextInput(
|
||||
name="groq_api_base",
|
||||
name="base_url",
|
||||
display_name="Groq API Base",
|
||||
info="Base URL path for API requests, leave blank if not using a proxy or service emulator.",
|
||||
advanced=True,
|
||||
value="https://api.groq.com",
|
||||
real_time_refresh=True,
|
||||
),
|
||||
IntInput(
|
||||
name="max_tokens",
|
||||
|
|
@ -36,6 +40,13 @@ class GroqModel(LCModelComponent):
|
|||
info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
|
||||
value=0.1,
|
||||
),
|
||||
SliderInput(
|
||||
name="temperature",
|
||||
display_name="Temperature",
|
||||
value=0.1,
|
||||
info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
|
||||
range_spec=RangeSpec(min=0, max=1, step=0.01),
|
||||
),
|
||||
IntInput(
|
||||
name="n",
|
||||
display_name="N",
|
||||
|
|
@ -47,33 +58,65 @@ class GroqModel(LCModelComponent):
|
|||
name="model_name",
|
||||
display_name="Model",
|
||||
info="The name of the model to use.",
|
||||
options=GROQ_MODELS,
|
||||
value="llama-3.1-8b-instant",
|
||||
options=[],
|
||||
refresh_button=True,
|
||||
real_time_refresh=True,
|
||||
),
|
||||
BoolInput(
|
||||
name="tool_model_enabled",
|
||||
display_name="Enable Tool Models",
|
||||
info=(
|
||||
"Select if you want to use models that can work with tools. If yes, only those models will be shown."
|
||||
),
|
||||
advanced=False,
|
||||
value=True,
|
||||
real_time_refresh=True,
|
||||
),
|
||||
]
|
||||
|
||||
def get_models(self) -> list[str]:
|
||||
api_key = self.groq_api_key
|
||||
base_url = self.groq_api_base or "https://api.groq.com"
|
||||
url = f"{base_url}/openai/v1/models"
|
||||
|
||||
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
||||
|
||||
def get_models(self, tool_model_enabled: bool | None = None) -> list[str]:
|
||||
try:
|
||||
url = f"{self.base_url}/openai/v1/models"
|
||||
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
||||
|
||||
response = requests.get(url, headers=headers, timeout=10)
|
||||
response.raise_for_status()
|
||||
model_list = response.json()
|
||||
return [model["id"] for model in model_list.get("data", [])]
|
||||
except requests.RequestException as e:
|
||||
self.status = f"Error fetching models: {e}"
|
||||
return GROQ_MODELS
|
||||
model_ids = [model["id"] for model in model_list.get("data", [])]
|
||||
except (ImportError, ValueError, requests.exceptions.RequestException) as e:
|
||||
logger.exception(f"Error getting model names: {e}")
|
||||
model_ids = GROQ_MODELS
|
||||
if tool_model_enabled:
|
||||
try:
|
||||
from langchain_groq import ChatGroq
|
||||
except ImportError as e:
|
||||
msg = "langchain_groq is not installed. Please install it with `pip install langchain_groq`."
|
||||
raise ImportError(msg) from e
|
||||
for model in model_ids:
|
||||
model_with_tool = ChatGroq(
|
||||
model=model,
|
||||
api_key=self.api_key,
|
||||
base_url=self.base_url,
|
||||
)
|
||||
if not self.supports_tool_calling(model_with_tool):
|
||||
model_ids.remove(model)
|
||||
return model_ids
|
||||
return model_ids
|
||||
|
||||
@override
|
||||
def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
|
||||
if field_name in {"groq_api_key", "groq_api_base", "model_name"}:
|
||||
models = self.get_models()
|
||||
build_config["model_name"]["options"] = models
|
||||
if field_name in ("base_url", "model_name", "tool_model_enabled", "api_key") and field_value:
|
||||
try:
|
||||
if len(self.api_key) != 0:
|
||||
try:
|
||||
ids = self.get_models(tool_model_enabled=self.tool_model_enabled)
|
||||
except (ImportError, ValueError, requests.exceptions.RequestException) as e:
|
||||
logger.exception(f"Error getting model names: {e}")
|
||||
ids = GROQ_MODELS
|
||||
build_config["model_name"]["options"] = ids
|
||||
build_config["model_name"]["value"] = ids[0]
|
||||
except Exception as e:
|
||||
msg = f"Error getting model names: {e}"
|
||||
raise ValueError(msg) from e
|
||||
return build_config
|
||||
|
||||
def build_model(self) -> LanguageModel: # type: ignore[type-var]
|
||||
|
|
@ -83,20 +126,12 @@ class GroqModel(LCModelComponent):
|
|||
msg = "langchain-groq is not installed. Please install it with `pip install langchain-groq`."
|
||||
raise ImportError(msg) from e
|
||||
|
||||
groq_api_key = self.groq_api_key
|
||||
model_name = self.model_name
|
||||
max_tokens = self.max_tokens
|
||||
temperature = self.temperature
|
||||
groq_api_base = self.groq_api_base
|
||||
n = self.n
|
||||
stream = self.stream
|
||||
|
||||
return ChatGroq(
|
||||
model=model_name,
|
||||
max_tokens=max_tokens or None,
|
||||
temperature=temperature,
|
||||
base_url=groq_api_base,
|
||||
n=n or 1,
|
||||
api_key=SecretStr(groq_api_key).get_secret_value(),
|
||||
streaming=stream,
|
||||
model=self.model_name,
|
||||
max_tokens=self.max_tokens or None,
|
||||
temperature=self.temperature,
|
||||
base_url=self.base_url,
|
||||
n=self.n or 1,
|
||||
api_key=SecretStr(self.api_key).get_secret_value(),
|
||||
streaming=self.stream,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@ from typing import Any
|
|||
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.inputs import BoolInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput
|
||||
from langflow.field_typing.range_spec import RangeSpec
|
||||
from langflow.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput
|
||||
from langflow.schema.dotdict import dotdict
|
||||
|
||||
|
||||
|
|
@ -23,15 +24,17 @@ class NVIDIAModelComponent(LCModelComponent):
|
|||
name="model_name",
|
||||
display_name="Model Name",
|
||||
advanced=False,
|
||||
options=["mistralai/mixtral-8x7b-instruct-v0.1"],
|
||||
value="mistralai/mixtral-8x7b-instruct-v0.1",
|
||||
options=[],
|
||||
real_time_refresh=True,
|
||||
refresh_button=True,
|
||||
),
|
||||
StrInput(
|
||||
MessageTextInput(
|
||||
name="base_url",
|
||||
display_name="NVIDIA Base URL",
|
||||
value="https://integrate.api.nvidia.com/v1",
|
||||
refresh_button=True,
|
||||
info="The base URL of the NVIDIA API. Defaults to https://integrate.api.nvidia.com/v1.",
|
||||
real_time_refresh=True,
|
||||
),
|
||||
BoolInput(
|
||||
name="tool_model_enabled",
|
||||
|
|
@ -41,15 +44,23 @@ class NVIDIAModelComponent(LCModelComponent):
|
|||
),
|
||||
advanced=False,
|
||||
value=True,
|
||||
real_time_refresh=True,
|
||||
),
|
||||
SecretStrInput(
|
||||
name="nvidia_api_key",
|
||||
name="api_key",
|
||||
display_name="NVIDIA API Key",
|
||||
info="The NVIDIA API Key.",
|
||||
advanced=False,
|
||||
value="NVIDIA_API_KEY",
|
||||
real_time_refresh=True,
|
||||
),
|
||||
SliderInput(
|
||||
name="temperature",
|
||||
display_name="Temperature",
|
||||
value=0.1,
|
||||
info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
|
||||
range_spec=RangeSpec(min=0, max=1, step=0.01),
|
||||
),
|
||||
FloatInput(name="temperature", display_name="Temperature", value=0.1),
|
||||
IntInput(
|
||||
name="seed",
|
||||
display_name="Seed",
|
||||
|
|
@ -67,7 +78,7 @@ class NVIDIAModelComponent(LCModelComponent):
|
|||
return [model.id for model in build_model.available_models]
|
||||
|
||||
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
|
||||
if field_name == "base_url" and field_value:
|
||||
if field_name in ("base_url", "model_name", "tool_model_enabled", "api_key") and field_value:
|
||||
try:
|
||||
ids = self.get_models(self.tool_model_enabled)
|
||||
build_config["model_name"]["options"] = ids
|
||||
|
|
@ -83,7 +94,7 @@ class NVIDIAModelComponent(LCModelComponent):
|
|||
except ImportError as e:
|
||||
msg = "Please install langchain-nvidia-ai-endpoints to use the NVIDIA model."
|
||||
raise ImportError(msg) from e
|
||||
nvidia_api_key = self.nvidia_api_key
|
||||
api_key = self.api_key
|
||||
temperature = self.temperature
|
||||
model_name: str = self.model_name
|
||||
max_tokens = self.max_tokens
|
||||
|
|
@ -92,7 +103,7 @@ class NVIDIAModelComponent(LCModelComponent):
|
|||
max_tokens=max_tokens or None,
|
||||
model=model_name,
|
||||
base_url=self.base_url,
|
||||
api_key=nvidia_api_key,
|
||||
api_key=api_key,
|
||||
temperature=temperature or 0.1,
|
||||
seed=seed,
|
||||
)
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Loading…
Add table
Add a link
Reference in a new issue