feat: add new openai reasoning models (#8786)

* Add new openai reasoning models

* [autofix.ci] apply automated fixes

* Updates language model, but FE doesn't send a POST for updating template atm

* use chatopenai constants

* [autofix.ci] apply automated fixes

* Add reasoning to language model test

* Remove temp from all reasoning models

* t [autofix.ci] apply automated fixes

* refactor: Update template notes (#8816)

* update templates

* small-changes

* template cleanup

---------

Co-authored-by: Mendon Kissling <59585235+mendonk@users.noreply.github.com>

* ruff

* uv lock

* starter projects update

* [autofix.ci] apply automated fixes

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Mike Fortman <michael.fortman@datastax.com>
Co-authored-by: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
This commit is contained in:
Jordan Frazier 2025-07-07 07:12:54 -07:00 committed by Gabriel Luiz Freitas Almeida
commit be18f6d03a
29 changed files with 3676 additions and 59 deletions

View file

@ -84,12 +84,9 @@ class LCModelComponent(Component):
raise ValueError(msg)
async def text_response(self) -> Message:
input_value = self.input_value
stream = self.stream
system_message = self.system_message
output = self.build_model()
result = await self.get_chat_result(
runnable=output, stream=stream, input_value=input_value, system_message=system_message
runnable=output, stream=self.stream, input_value=self.input_value, system_message=self.system_message
)
self.status = result
return result
@ -176,6 +173,7 @@ class LCModelComponent(Component):
input_value: str | Message,
system_message: str | None = None,
) -> Message:
# NVIDIA reasoning models use detailed thinking
if getattr(self, "detailed_thinking", False):
system_message = DETAILED_THINKING_PREFIX + (system_message or "")

View file

@ -17,6 +17,13 @@ OPENAI_MODELS_DETAILED = [
create_model_metadata(provider="OpenAI", name="gpt-3.5-turbo", icon="OpenAI", tool_calling=True),
# Reasoning Models
create_model_metadata(provider="OpenAI", name="o1", icon="OpenAI", reasoning=True),
create_model_metadata(provider="OpenAI", name="o1-mini", icon="OpenAI", reasoning=True),
create_model_metadata(provider="OpenAI", name="o1-pro", icon="OpenAI", reasoning=True),
create_model_metadata(provider="OpenAI", name="o3-mini", icon="OpenAI", reasoning=True),
create_model_metadata(provider="OpenAI", name="o3", icon="OpenAI", reasoning=True),
create_model_metadata(provider="OpenAI", name="o3-pro", icon="OpenAI", reasoning=True),
create_model_metadata(provider="OpenAI", name="o4-mini", icon="OpenAI", reasoning=True),
create_model_metadata(provider="OpenAI", name="o4-mini-high", icon="OpenAI", reasoning=True),
# Search Models
create_model_metadata(
provider="OpenAI",
@ -27,7 +34,12 @@ OPENAI_MODELS_DETAILED = [
preview=True,
),
create_model_metadata(
provider="OpenAI", name="gpt-4o-search-preview", icon="OpenAI", tool_calling=True, search=True, preview=True
provider="OpenAI",
name="gpt-4o-search-preview",
icon="OpenAI",
tool_calling=True,
search=True,
preview=True,
),
# Not Supported Models
create_model_metadata(
@ -45,16 +57,13 @@ OPENAI_MODELS_DETAILED = [
create_model_metadata(
provider="OpenAI", name="gpt-4o-mini-realtime-preview", icon="OpenAI", not_supported=True, preview=True
),
create_model_metadata(provider="OpenAI", name="o3-mini", icon="OpenAI", reasoning=True, not_supported=True),
create_model_metadata(provider="OpenAI", name="o1-mini", icon="OpenAI", reasoning=True, not_supported=True),
]
OPENAI_MODEL_NAMES = [
OPENAI_CHAT_MODEL_NAMES = [
metadata["name"]
for metadata in OPENAI_MODELS_DETAILED
if not metadata.get("reasoning", False)
if not metadata.get("not_supported", False)
and not metadata.get("reasoning", False)
and not metadata.get("search", False)
and not metadata.get("not_supported", False)
]
OPENAI_REASONING_MODEL_NAMES = [
@ -78,4 +87,4 @@ OPENAI_EMBEDDING_MODEL_NAMES = [
]
# Backwards compatibility
MODEL_NAMES = OPENAI_MODEL_NAMES
MODEL_NAMES = OPENAI_CHAT_MODEL_NAMES

View file

@ -1,7 +1,7 @@
import requests
from requests.auth import HTTPBasicAuth
from langflow.base.models.openai_constants import OPENAI_MODEL_NAMES
from langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES
from langflow.custom.custom_component.component import Component
from langflow.inputs.inputs import DropdownInput, SecretStrInput, StrInput
from langflow.io import MessageTextInput, Output
@ -43,8 +43,8 @@ class CombinatorialReasonerComponent(Component):
name="model_name",
display_name="Model Name",
advanced=False,
options=OPENAI_MODEL_NAMES,
value=OPENAI_MODEL_NAMES[0],
options=OPENAI_CHAT_MODEL_NAMES,
value=OPENAI_CHAT_MODEL_NAMES[0],
),
]

View file

@ -7,7 +7,7 @@ from langchain_openai import ChatOpenAI
from langflow.base.models.anthropic_constants import ANTHROPIC_MODELS
from langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS
from langflow.base.models.model import LCModelComponent
from langflow.base.models.openai_constants import OPENAI_MODEL_NAMES
from langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
from langflow.inputs.inputs import BoolInput
@ -36,9 +36,10 @@ class LanguageModelComponent(LCModelComponent):
DropdownInput(
name="model_name",
display_name="Model Name",
options=OPENAI_MODEL_NAMES,
value=OPENAI_MODEL_NAMES[0],
options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,
value=OPENAI_CHAT_MODEL_NAMES[0],
info="Select the model to use",
real_time_refresh=True,
),
SecretStrInput(
name="api_key",
@ -57,7 +58,7 @@ class LanguageModelComponent(LCModelComponent):
name="system_message",
display_name="System Message",
info="A system message that helps set the behavior of the assistant",
advanced=True,
advanced=False,
),
BoolInput(
name="stream",
@ -86,6 +87,11 @@ class LanguageModelComponent(LCModelComponent):
if not self.api_key:
msg = "OpenAI API key is required when using OpenAI provider"
raise ValueError(msg)
if model_name in OPENAI_REASONING_MODEL_NAMES:
# reasoning models do not support temperature (yet)
temperature = None
return ChatOpenAI(
model_name=model_name,
temperature=temperature,
@ -118,8 +124,8 @@ class LanguageModelComponent(LCModelComponent):
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:
if field_name == "provider":
if field_value == "OpenAI":
build_config["model_name"]["options"] = OPENAI_MODEL_NAMES
build_config["model_name"]["value"] = OPENAI_MODEL_NAMES[0]
build_config["model_name"]["options"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES
build_config["model_name"]["value"] = OPENAI_CHAT_MODEL_NAMES[0]
build_config["api_key"]["display_name"] = "OpenAI API Key"
elif field_value == "Anthropic":
build_config["model_name"]["options"] = ANTHROPIC_MODELS
@ -129,4 +135,10 @@ class LanguageModelComponent(LCModelComponent):
build_config["model_name"]["options"] = GOOGLE_GENERATIVE_AI_MODELS
build_config["model_name"]["value"] = GOOGLE_GENERATIVE_AI_MODELS[0]
build_config["api_key"]["display_name"] = "Google API Key"
elif field_name == "model_name" and field_value.startswith("o1") and self.provider == "OpenAI":
# Hide system_message for o1 models - currently unsupported
if "system_message" in build_config:
build_config["system_message"]["show"] = False
elif field_name == "model_name" and not field_value.startswith("o1") and "system_message" in build_config:
build_config["system_message"]["show"] = True
return build_config

View file

@ -5,7 +5,7 @@ from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.base.models.openai_constants import (
OPENAI_MODEL_NAMES,
OPENAI_CHAT_MODEL_NAMES,
OPENAI_REASONING_MODEL_NAMES,
)
from langflow.field_typing import LanguageModel
@ -45,8 +45,8 @@ class OpenAIModelComponent(LCModelComponent):
name="model_name",
display_name="Model Name",
advanced=False,
options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,
value=OPENAI_MODEL_NAMES[1],
options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,
value=OPENAI_CHAT_MODEL_NAMES[0],
combobox=True,
real_time_refresh=True,
),
@ -97,6 +97,7 @@ class OpenAIModelComponent(LCModelComponent):
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
logger.debug(f"Executing request with model: {self.model_name}")
parameters = {
"api_key": SecretStr(self.api_key).get_secret_value() if self.api_key else None,
"model_name": self.model_name,
@ -107,10 +108,15 @@ class OpenAIModelComponent(LCModelComponent):
"timeout": self.timeout,
}
logger.info(f"Model name: {self.model_name}")
# TODO: Revisit if/once parameters are supported for reasoning models
unsupported_params_for_reasoning_models = ["temperature", "seed"]
if self.model_name not in OPENAI_REASONING_MODEL_NAMES:
parameters["temperature"] = self.temperature if self.temperature is not None else 0.1
parameters["seed"] = self.seed
else:
params_str = ", ".join(unsupported_params_for_reasoning_models)
logger.debug(f"{self.model_name} is a reasoning model, {params_str} are not configurable. Ignoring.")
output = ChatOpenAI(**parameters)
if self.json_mode:
@ -141,7 +147,12 @@ class OpenAIModelComponent(LCModelComponent):
if field_name in {"base_url", "model_name", "api_key"} and field_value in OPENAI_REASONING_MODEL_NAMES:
build_config["temperature"]["show"] = False
build_config["seed"]["show"] = False
if field_name in {"base_url", "model_name", "api_key"} and field_value in OPENAI_MODEL_NAMES:
# Hide system_message for o1 models - currently unsupported
if field_value.startswith("o1") and "system_message" in build_config:
build_config["system_message"]["show"] = False
if field_name in {"base_url", "model_name", "api_key"} and field_value in OPENAI_CHAT_MODEL_NAMES:
build_config["temperature"]["show"] = True
build_config["seed"]["show"] = True
if "system_message" in build_config:
build_config["system_message"]["show"] = True
return build_config

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -9,6 +9,7 @@ OPENAI_MODELS = [
]
CHAT_OPENAI_MODELS = [
"gpt-4o",
"gpt-4o-mini",
"gpt-4-turbo-preview",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
@ -17,6 +18,16 @@ CHAT_OPENAI_MODELS = [
"gpt-3.5-turbo-1106",
]
REASONING_OPENAI_MODELS = [
"o1",
"o1-mini",
"o1-pro",
"o3-mini",
"o3",
"o3-pro",
"o4-mini",
"o4-mini-high",
]
ANTHROPIC_MODELS = [
# largest model, ideal for a wide range of more complex tasks.

View file

@ -382,6 +382,7 @@ def add_options_to_field(value: dict[str, Any], class_name: str | None, key: str
options_map = {
"OpenAI": constants.OPENAI_MODELS,
"ChatOpenAI": constants.CHAT_OPENAI_MODELS,
"ReasoningOpenAI": constants.REASONING_OPENAI_MODELS,
"Anthropic": constants.ANTHROPIC_MODELS,
"ChatAnthropic": constants.ANTHROPIC_MODELS,
}

View file

@ -8,7 +8,7 @@ from langflow.base.models.model_input_constants import (
MODEL_PROVIDERS,
)
from langflow.base.models.openai_constants import (
OPENAI_MODEL_NAMES,
OPENAI_CHAT_MODEL_NAMES,
OPENAI_REASONING_MODEL_NAMES,
)
from langflow.components.agents.agent import AgentComponent
@ -142,7 +142,7 @@ class TestAgentComponentWithClient(ComponentTestBaseWithClient):
# Iterate over all OpenAI models
failed_models = []
for model_name in OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES:
for model_name in OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES:
# Initialize the AgentComponent with mocked inputs
tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool
agent = AgentComponent(

View file

@ -6,7 +6,7 @@ from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_openai import ChatOpenAI
from langflow.base.models.anthropic_constants import ANTHROPIC_MODELS
from langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS
from langflow.base.models.openai_constants import OPENAI_MODEL_NAMES
from langflow.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES
from langflow.components.models.language_model import LanguageModelComponent
from tests.base import ComponentTestBaseWithoutClient
@ -66,8 +66,8 @@ class TestLanguageModelComponent(ComponentTestBaseWithoutClient):
"api_key": {"display_name": "API Key"},
}
updated_config = component.update_build_config(build_config, "OpenAI", "provider")
assert updated_config["model_name"]["options"] == OPENAI_MODEL_NAMES
assert updated_config["model_name"]["value"] == OPENAI_MODEL_NAMES[0]
assert updated_config["model_name"]["options"] == OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES
assert updated_config["model_name"]["value"] == OPENAI_CHAT_MODEL_NAMES[0]
assert updated_config["api_key"]["display_name"] == "OpenAI API Key"
async def test_update_build_config_anthropic(self, component_class, default_kwargs):