feat: enhance openai model component with o1 support (#7107)

* update models

* Delete package-lock.json

* Create package-lock.json

* [autofix.ci] apply automated fixes

* [autofix.ci] apply automated fixes (attempt 2/3)

* [autofix.ci] apply automated fixes

* updated templates

* update

* [autofix.ci] apply automated fixes

* Update Text Sentiment Analysis.json

* [autofix.ci] apply automated fixes

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Gabriel Luiz Freitas Almeida <gabriel@langflow.org>
This commit is contained in:
Edwin Jose 2025-04-14 14:17:27 -04:00 committed by GitHub
commit 56055043aa
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
25 changed files with 235 additions and 129 deletions

View file

@ -7,6 +7,26 @@ OPENAI_MODEL_NAMES = [
"gpt-4",
"gpt-3.5-turbo",
]
OPENAI_REASONING_MODEL_NAMES = [
"o1", # High-intelligence reasoning model
]
OPENAI_SEARCH_MODEL_NAMES = [
"gpt-4o-mini-search-preview",
"gpt-4o-search-preview",
]
NOT_SUPPORTED_MODELS = [
"computer-use-preview",
"gpt-4o-audio-preview",
"gpt-4o-realtime-preview",
"gpt-4o-mini-audio-preview",
"gpt-4o-mini-realtime-preview",
"o3-mini",
"o1-mini",
]
OPENAI_EMBEDDING_MODEL_NAMES = [
"text-embedding-3-small",
"text-embedding-3-large",

View file

@ -1,11 +1,17 @@
from typing import Any
from langchain_openai import ChatOpenAI
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.base.models.openai_constants import OPENAI_MODEL_NAMES
from langflow.base.models.openai_constants import (
OPENAI_MODEL_NAMES,
OPENAI_REASONING_MODEL_NAMES,
)
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
from langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput
from langflow.logging import logger
class OpenAIModelComponent(LCModelComponent):
@ -39,9 +45,10 @@ class OpenAIModelComponent(LCModelComponent):
name="model_name",
display_name="Model Name",
advanced=False,
options=OPENAI_MODEL_NAMES,
options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,
value=OPENAI_MODEL_NAMES[1],
combobox=True,
real_time_refresh=True,
),
StrInput(
name="openai_api_base",
@ -64,7 +71,7 @@ class OpenAIModelComponent(LCModelComponent):
display_name="Temperature",
value=0.1,
range_spec=RangeSpec(min=0, max=1, step=0.01),
advanced=True,
show=True,
),
IntInput(
name="seed",
@ -90,30 +97,25 @@ class OpenAIModelComponent(LCModelComponent):
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
openai_api_key = self.api_key
temperature = self.temperature
model_name: str = self.model_name
max_tokens = self.max_tokens
model_kwargs = self.model_kwargs or {}
openai_api_base = self.openai_api_base or "https://api.openai.com/v1"
json_mode = self.json_mode
seed = self.seed
max_retries = self.max_retries
timeout = self.timeout
parameters = {
"api_key": SecretStr(self.api_key).get_secret_value() if self.api_key else None,
"model_name": self.model_name,
"max_tokens": self.max_tokens or None,
"model_kwargs": self.model_kwargs or {},
"base_url": self.openai_api_base or "https://api.openai.com/v1",
"seed": self.seed,
"max_retries": self.max_retries,
"timeout": self.timeout,
"temperature": self.temperature if self.temperature is not None else 0.1,
}
api_key = SecretStr(openai_api_key).get_secret_value() if openai_api_key else None
output = ChatOpenAI(
max_tokens=max_tokens or None,
model_kwargs=model_kwargs,
model=model_name,
base_url=openai_api_base,
api_key=api_key,
temperature=temperature if temperature is not None else 0.1,
seed=seed,
max_retries=max_retries,
request_timeout=timeout,
)
if json_mode:
logger.info(f"Model name: {self.model_name}")
if self.model_name in OPENAI_REASONING_MODEL_NAMES:
logger.info("Getting reasoning model parameters")
parameters.pop("temperature")
parameters.pop("seed")
output = ChatOpenAI(**parameters)
if self.json_mode:
output = output.bind(response_format={"type": "json_object"})
return output
@ -136,3 +138,12 @@ class OpenAIModelComponent(LCModelComponent):
if message:
return message
return None
def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:
if field_name in {"base_url", "model_name", "api_key"} and field_value in OPENAI_REASONING_MODEL_NAMES:
build_config["temperature"]["show"] = False
build_config["seed"]["show"] = False
if field_name in {"base_url", "model_name", "api_key"} and field_value in OPENAI_MODEL_NAMES:
build_config["temperature"]["show"] = True
build_config["seed"]["show"] = True
return build_config

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1268,7 +1268,8 @@
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo"
"gpt-3.5-turbo",
"o1"
],
"options_metadata": [],
"placeholder": "",

View file

@ -1851,7 +1851,8 @@
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo"
"gpt-3.5-turbo",
"o1"
],
"options_metadata": [],
"placeholder": "",

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1003,7 +1003,8 @@
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo"
"gpt-3.5-turbo",
"o1"
],
"placeholder": "",
"required": false,

View file

@ -647,7 +647,8 @@
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo"
"gpt-3.5-turbo",
"o1"
],
"placeholder": "",
"required": false,
@ -1265,7 +1266,8 @@
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo"
"gpt-3.5-turbo",
"o1"
],
"placeholder": "",
"required": false,
@ -2705,7 +2707,8 @@
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo"
"gpt-3.5-turbo",
"o1"
],
"placeholder": "",
"required": false,

View file

@ -1033,7 +1033,8 @@
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo"
"gpt-3.5-turbo",
"o1"
],
"options_metadata": [],
"placeholder": "",

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -3,15 +3,22 @@ from typing import Any
from uuid import uuid4
import pytest
from dotenv import load_dotenv
from langflow.base.models.model_input_constants import MODEL_PROVIDERS_DICT
from langflow.base.models.openai_constants import (
OPENAI_MODEL_NAMES,
OPENAI_REASONING_MODEL_NAMES,
)
from langflow.components.agents.agent import AgentComponent
from langflow.components.tools.calculator import CalculatorToolComponent
from langflow.custom import Component
from langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI
from tests.base import ComponentTestBaseWithoutClient
from tests.base import ComponentTestBaseWithClient, ComponentTestBaseWithoutClient
from tests.unit.mock_language_model import MockLanguageModel
# Load environment variables from .env file
class TestAgentComponent(ComponentTestBaseWithoutClient):
@pytest.fixture
@ -95,26 +102,60 @@ class TestAgentComponent(ComponentTestBaseWithoutClient):
assert "model_name" not in updated_config
@pytest.mark.usefixtures("client")
@pytest.mark.api_key_required
async def test_agent_component_with_calculator():
# Mock inputs
tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool
input_value = "What is 2 + 2?"
class TestAgentComponentWithClient(ComponentTestBaseWithClient):
@pytest.fixture
def component_class(self):
return AgentComponent
api_key = os.environ["OPENAI_API_KEY"]
temperature = 0.1
@pytest.fixture
def file_names_mapping(self):
return []
# Initialize the AgentComponent with mocked inputs
agent = AgentComponent(
tools=tools,
input_value=input_value,
api_key=api_key,
model_name="gpt-4o",
llm_type="OpenAI",
temperature=temperature,
_session_id=str(uuid4()),
)
@pytest.mark.api_key_required
async def test_agent_component_with_calculator(self):
# Mock inputs
load_dotenv()
response = await agent.message_response()
assert "4" in response.data.get("text")
# Now you can access the environment variables
api_key = os.getenv("OPENAI_API_KEY")
tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool
input_value = "What is 2 + 2?"
temperature = 0.1
# Initialize the AgentComponent with mocked inputs
agent = AgentComponent(
tools=tools,
input_value=input_value,
api_key=api_key,
model_name="gpt-4o",
llm_type="OpenAI",
temperature=temperature,
_session_id=str(uuid4()),
)
response = await agent.message_response()
assert "4" in response.data.get("text")
@pytest.mark.api_key_required
async def test_agent_component_with_all_openai_models(self):
# Mock inputs
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool
input_value = "What is 2 + 2?"
# Iterate over all OpenAI models
for model_name in OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES:
# Initialize the AgentComponent with mocked inputs
agent = AgentComponent(
tools=tools,
input_value=input_value,
api_key=api_key,
model_name=model_name,
llm_type="OpenAI",
_session_id=str(uuid4()),
)
response = await agent.message_response()
assert "4" in response.data.get("text"), f"Failed for model: {model_name}"