diff --git a/docs/docs/Components/components-models.md b/docs/docs/Components/components-models.md
index 24893a46a..6492ba602 100644
--- a/docs/docs/Components/components-models.md
+++ b/docs/docs/Components/components-models.md
@@ -372,3 +372,22 @@ For more information, see [Google Vertex AI documentation](https://cloud.google.
|--------|---------------|-----------------------------------------------------|
| model | LanguageModel | An instance of ChatVertexAI configured with the specified parameters. |
+## Novita AI
+
+This component generates text using Novita AI's language models.
+
+For more information, see [Novita AI documentation](https://novita.ai/docs/model-api/reference/llm/llm.html?utm_source=github_langflow&utm_medium=github_readme&utm_campaign=link).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------------|---------------|------------------------------------------------------------------|
+| api_key | SecretString | Your Novita AI API Key. |
+| model | String | The id of the Novita AI model to use. |
+| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. |
+| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.7. |
+| top_p | Float | Controls the nucleus sampling. Range: [0.0, 1.0]. Default: 1.0. |
+| frequency_penalty | Float | Controls the frequency penalty. Range: [0.0, 2.0]. Default: 0.0. |
+| presence_penalty | Float | Controls the presence penalty. Range: [0.0, 2.0]. Default: 0.0. |
diff --git a/src/backend/base/langflow/base/models/novita_constants.py b/src/backend/base/langflow/base/models/novita_constants.py
new file mode 100644
index 000000000..7e5062a3c
--- /dev/null
+++ b/src/backend/base/langflow/base/models/novita_constants.py
@@ -0,0 +1,35 @@
+NOVITA_MODELS = [
+ "meta-llama/llama-3.3-70b-instruct",
+ "meta-llama/llama-3.1-8b-instruct",
+ "meta-llama/llama-3.1-8b-instruct-max",
+ "meta-llama/llama-3.1-70b-instruct",
+ "meta-llama/llama-3.1-405b-instruct",
+ "meta-llama/llama-3-8b-instruct",
+ "meta-llama/llama-3-70b-instruct",
+ "gryphe/mythomax-l2-13b",
+ "google/gemma-2-9b-it",
+ "mistralai/mistral-nemo",
+ "microsoft/wizardlm-2-8x22b",
+ "mistralai/mistral-7b-instruct",
+ "openchat/openchat-7b",
+ "nousresearch/hermes-2-pro-llama-3-8b",
+ "sao10k/l3-70b-euryale-v2.1",
+ "cognitivecomputations/dolphin-mixtral-8x22b",
+ "jondurbin/airoboros-l2-70b",
+ "lzlv_70b",
+ "nousresearch/nous-hermes-llama2-13b",
+ "teknium/openhermes-2.5-mistral-7b",
+ "sophosympatheia/midnight-rose-70b",
+ "Sao10K/L3-8B-Stheno-v3.2",
+ "sao10k/l3-8b-lunaris",
+ "qwen/qwen-2-vl-72b-instruct",
+ "meta-llama/llama-3.2-1b-instruct",
+ "meta-llama/llama-3.2-11b-vision-instruct",
+ "meta-llama/llama-3.2-3b-instruct",
+ "meta-llama/llama-3.1-8b-instruct-bf16",
+ "qwen/qwen-2.5-72b-instruct",
+ "sao10k/l31-70b-euryale-v2.2",
+ "qwen/qwen-2-7b-instruct",
+ "qwen/qwen-2-72b-instruct",
+]
+MODEL_NAMES = NOVITA_MODELS # reverse compatibility
diff --git a/src/backend/base/langflow/components/models/__init__.py b/src/backend/base/langflow/components/models/__init__.py
index ecd9da217..1d214e939 100644
--- a/src/backend/base/langflow/components/models/__init__.py
+++ b/src/backend/base/langflow/components/models/__init__.py
@@ -11,6 +11,7 @@ from .huggingface import HuggingFaceEndpointsComponent
from .lmstudiomodel import LMStudioModelComponent
from .maritalk import MaritalkModelComponent
from .mistral import MistralAIModelComponent
+from .novita import NovitaModelComponent
from .nvidia import NVIDIAModelComponent
from .ollama import ChatOllamaComponent
from .openai import OpenAIModelComponent
@@ -35,6 +36,7 @@ __all__ = [
"MaritalkModelComponent",
"MistralAIModelComponent",
"NVIDIAModelComponent",
+ "NovitaModelComponent",
"OpenAIModelComponent",
"OpenRouterComponent",
"PerplexityComponent",
diff --git a/src/backend/base/langflow/components/models/novita.py b/src/backend/base/langflow/components/models/novita.py
new file mode 100644
index 000000000..4dcd527d3
--- /dev/null
+++ b/src/backend/base/langflow/components/models/novita.py
@@ -0,0 +1,130 @@
+import requests
+from langchain_openai import ChatOpenAI
+from pydantic.v1 import SecretStr
+from typing_extensions import override
+
+from langflow.base.models.model import LCModelComponent
+from langflow.base.models.novita_constants import MODEL_NAMES
+from langflow.field_typing import LanguageModel
+from langflow.field_typing.range_spec import RangeSpec
+from langflow.inputs import (
+ BoolInput,
+ DictInput,
+ DropdownInput,
+ IntInput,
+ SecretStrInput,
+ SliderInput,
+)
+from langflow.inputs.inputs import HandleInput
+
+
+class NovitaModelComponent(LCModelComponent):
+ display_name = "Novita AI"
+ description = "Generates text using Novita AI LLMs (OpenAI compatible)."
+ icon = "Novita"
+ name = "NovitaModel"
+
+ inputs = [
+ *LCModelComponent._base_inputs,
+ IntInput(
+ name="max_tokens",
+ display_name="Max Tokens",
+ advanced=True,
+ info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
+ range_spec=RangeSpec(min=0, max=128000),
+ ),
+ DictInput(
+ name="model_kwargs",
+ display_name="Model Kwargs",
+ advanced=True,
+ info="Additional keyword arguments to pass to the model.",
+ ),
+ BoolInput(
+ name="json_mode",
+ display_name="JSON Mode",
+ advanced=True,
+ info="If True, it will output JSON regardless of passing a schema.",
+ ),
+ DropdownInput(
+ name="model_name",
+ display_name="Model Name",
+ advanced=False,
+ options=MODEL_NAMES,
+ value=MODEL_NAMES[0],
+ refresh_button=True,
+ ),
+ SecretStrInput(
+ name="api_key",
+ display_name="Novita API Key",
+ info="The Novita API Key to use for Novita AI models.",
+ advanced=False,
+ value="NOVITA_API_KEY",
+ real_time_refresh=True,
+ ),
+ SliderInput(name="temperature", display_name="Temperature", value=0.1, range_spec=RangeSpec(min=0, max=1)),
+ IntInput(
+ name="seed",
+ display_name="Seed",
+ info="The seed controls the reproducibility of the job.",
+ advanced=True,
+ value=1,
+ ),
+ HandleInput(
+ name="output_parser",
+ display_name="Output Parser",
+ info="The parser to use to parse the output of the model",
+ advanced=True,
+ input_types=["OutputParser"],
+ ),
+ ]
+
+ def get_models(self) -> list[str]:
+ base_url = "https://api.novita.ai/v3/openai"
+ url = f"{base_url}/models"
+
+ headers = {"Content-Type": "application/json"}
+
+ try:
+ response = requests.get(url, headers=headers, timeout=10)
+ response.raise_for_status()
+ model_list = response.json()
+ return [model["id"] for model in model_list.get("data", [])]
+ except requests.RequestException as e:
+ self.status = f"Error fetching models: {e}"
+ return MODEL_NAMES
+
+ @override
+ def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
+ if field_name in {"api_key", "model_name"}:
+ models = self.get_models()
+ build_config["model_name"]["options"] = models
+ return build_config
+
+ def build_model(self) -> LanguageModel: # type: ignore[type-var]
+ api_key = self.api_key
+ temperature = self.temperature
+ model_name: str = self.model_name
+ max_tokens = self.max_tokens
+ model_kwargs = self.model_kwargs or {}
+ json_mode = self.json_mode
+ seed = self.seed
+
+ try:
+ output = ChatOpenAI(
+ model=model_name,
+ api_key=(SecretStr(api_key).get_secret_value() if api_key else None),
+ max_tokens=max_tokens or None,
+ temperature=temperature,
+ model_kwargs=model_kwargs,
+ streaming=self.stream,
+ seed=seed,
+ base_url="https://api.novita.ai/v3/openai",
+ )
+ except Exception as e:
+ msg = "Could not connect to Novita API."
+ raise ValueError(msg) from e
+
+ if json_mode:
+ output = output.bind(response_format={"type": "json_object"})
+
+ return output
diff --git a/src/backend/base/langflow/services/settings/constants.py b/src/backend/base/langflow/services/settings/constants.py
index b2f3df12e..122d926e7 100644
--- a/src/backend/base/langflow/services/settings/constants.py
+++ b/src/backend/base/langflow/services/settings/constants.py
@@ -25,4 +25,5 @@ VARIABLES_TO_GET_FROM_ENVIRONMENT = [
"VECTARA_API_KEY",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
+ "NOVITA_API_KEY",
]
diff --git a/src/frontend/src/icons/Novita/index.tsx b/src/frontend/src/icons/Novita/index.tsx
new file mode 100644
index 000000000..1842b689e
--- /dev/null
+++ b/src/frontend/src/icons/Novita/index.tsx
@@ -0,0 +1,9 @@
+import React, { forwardRef } from "react";
+import SvgNovita from "./novita";
+
+export const NovitaIcon = forwardRef<
+ SVGSVGElement,
+ React.PropsWithChildren<{}>
+>((props, ref) => {
+ return ;
+});
diff --git a/src/frontend/src/icons/Novita/novita.jsx b/src/frontend/src/icons/Novita/novita.jsx
new file mode 100644
index 000000000..27491e8f0
--- /dev/null
+++ b/src/frontend/src/icons/Novita/novita.jsx
@@ -0,0 +1,36 @@
+const SvgNovita = (props) => (
+
+);
+export default SvgNovita;
diff --git a/src/frontend/src/icons/Novita/novita.svg b/src/frontend/src/icons/Novita/novita.svg
new file mode 100644
index 000000000..ef89d31eb
--- /dev/null
+++ b/src/frontend/src/icons/Novita/novita.svg
@@ -0,0 +1,10 @@
+
diff --git a/src/frontend/src/utils/styleUtils.ts b/src/frontend/src/utils/styleUtils.ts
index 7203dd0b6..b8e0ddaf6 100644
--- a/src/frontend/src/utils/styleUtils.ts
+++ b/src/frontend/src/utils/styleUtils.ts
@@ -281,6 +281,7 @@ import { MongoDBIcon } from "../icons/MongoDB";
import { NeedleIcon } from "../icons/Needle";
import { NotDiamondIcon } from "../icons/NotDiamond";
import { NotionIcon } from "../icons/Notion";
+import { NovitaIcon } from "../icons/Novita";
import { NvidiaIcon } from "../icons/Nvidia";
import { OllamaIcon } from "../icons/Ollama";
import { OpenAiIcon } from "../icons/OpenAi";
@@ -666,6 +667,8 @@ export const nodeIconsLucide: iconsType = {
notion: NotionIcon,
Notion: NotionIcon,
NotionDirectoryLoader: NotionIcon,
+ novita: NovitaIcon,
+ Novita: NovitaIcon,
Needle: NeedleIcon,
NVIDIA: NvidiaIcon,
ChatOpenAI: OpenAiIcon,