feat: add new model provider Novita AI (#5380)

* feat: add new model provider Novita AI

* [autofix.ci] apply automated fixes

* fix: code format fix

* fix: code format fix

* fix: fix default Novita AI models

* [autofix.ci] apply automated fixes

* feat: Add real-time refresh for Novita API key and include it in VARIABLES_TO_GET_FROM_ENVIRONMENT

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Gabriel Luiz Freitas Almeida <gabriel@langflow.org>
This commit is contained in:
Jason 2025-01-20 20:53:28 +08:00 committed by GitHub
commit 84c6ed2217
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 245 additions and 0 deletions

View file

@ -372,3 +372,22 @@ For more information, see [Google Vertex AI documentation](https://cloud.google.
|--------|---------------|-----------------------------------------------------|
| model | LanguageModel | An instance of ChatVertexAI configured with the specified parameters. |
## Novita AI
This component generates text using Novita AI's language models.
For more information, see [Novita AI documentation](https://novita.ai/docs/model-api/reference/llm/llm.html?utm_source=github_langflow&utm_medium=github_readme&utm_campaign=link).
### Parameters
#### Inputs
| Name | Type | Description |
|---------------------|---------------|------------------------------------------------------------------|
| api_key | SecretString | Your Novita AI API Key. |
| model | String | The id of the Novita AI model to use. |
| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. |
| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.7. |
| top_p | Float | Controls the nucleus sampling. Range: [0.0, 1.0]. Default: 1.0. |
| frequency_penalty | Float | Controls the frequency penalty. Range: [0.0, 2.0]. Default: 0.0. |
| presence_penalty | Float | Controls the presence penalty. Range: [0.0, 2.0]. Default: 0.0. |

View file

@ -0,0 +1,35 @@
NOVITA_MODELS = [
"meta-llama/llama-3.3-70b-instruct",
"meta-llama/llama-3.1-8b-instruct",
"meta-llama/llama-3.1-8b-instruct-max",
"meta-llama/llama-3.1-70b-instruct",
"meta-llama/llama-3.1-405b-instruct",
"meta-llama/llama-3-8b-instruct",
"meta-llama/llama-3-70b-instruct",
"gryphe/mythomax-l2-13b",
"google/gemma-2-9b-it",
"mistralai/mistral-nemo",
"microsoft/wizardlm-2-8x22b",
"mistralai/mistral-7b-instruct",
"openchat/openchat-7b",
"nousresearch/hermes-2-pro-llama-3-8b",
"sao10k/l3-70b-euryale-v2.1",
"cognitivecomputations/dolphin-mixtral-8x22b",
"jondurbin/airoboros-l2-70b",
"lzlv_70b",
"nousresearch/nous-hermes-llama2-13b",
"teknium/openhermes-2.5-mistral-7b",
"sophosympatheia/midnight-rose-70b",
"Sao10K/L3-8B-Stheno-v3.2",
"sao10k/l3-8b-lunaris",
"qwen/qwen-2-vl-72b-instruct",
"meta-llama/llama-3.2-1b-instruct",
"meta-llama/llama-3.2-11b-vision-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3.1-8b-instruct-bf16",
"qwen/qwen-2.5-72b-instruct",
"sao10k/l31-70b-euryale-v2.2",
"qwen/qwen-2-7b-instruct",
"qwen/qwen-2-72b-instruct",
]
MODEL_NAMES = NOVITA_MODELS # reverse compatibility

View file

@ -11,6 +11,7 @@ from .huggingface import HuggingFaceEndpointsComponent
from .lmstudiomodel import LMStudioModelComponent
from .maritalk import MaritalkModelComponent
from .mistral import MistralAIModelComponent
from .novita import NovitaModelComponent
from .nvidia import NVIDIAModelComponent
from .ollama import ChatOllamaComponent
from .openai import OpenAIModelComponent
@ -35,6 +36,7 @@ __all__ = [
"MaritalkModelComponent",
"MistralAIModelComponent",
"NVIDIAModelComponent",
"NovitaModelComponent",
"OpenAIModelComponent",
"OpenRouterComponent",
"PerplexityComponent",

View file

@ -0,0 +1,130 @@
import requests
from langchain_openai import ChatOpenAI
from pydantic.v1 import SecretStr
from typing_extensions import override
from langflow.base.models.model import LCModelComponent
from langflow.base.models.novita_constants import MODEL_NAMES
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
from langflow.inputs import (
BoolInput,
DictInput,
DropdownInput,
IntInput,
SecretStrInput,
SliderInput,
)
from langflow.inputs.inputs import HandleInput
class NovitaModelComponent(LCModelComponent):
display_name = "Novita AI"
description = "Generates text using Novita AI LLMs (OpenAI compatible)."
icon = "Novita"
name = "NovitaModel"
inputs = [
*LCModelComponent._base_inputs,
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
range_spec=RangeSpec(min=0, max=128000),
),
DictInput(
name="model_kwargs",
display_name="Model Kwargs",
advanced=True,
info="Additional keyword arguments to pass to the model.",
),
BoolInput(
name="json_mode",
display_name="JSON Mode",
advanced=True,
info="If True, it will output JSON regardless of passing a schema.",
),
DropdownInput(
name="model_name",
display_name="Model Name",
advanced=False,
options=MODEL_NAMES,
value=MODEL_NAMES[0],
refresh_button=True,
),
SecretStrInput(
name="api_key",
display_name="Novita API Key",
info="The Novita API Key to use for Novita AI models.",
advanced=False,
value="NOVITA_API_KEY",
real_time_refresh=True,
),
SliderInput(name="temperature", display_name="Temperature", value=0.1, range_spec=RangeSpec(min=0, max=1)),
IntInput(
name="seed",
display_name="Seed",
info="The seed controls the reproducibility of the job.",
advanced=True,
value=1,
),
HandleInput(
name="output_parser",
display_name="Output Parser",
info="The parser to use to parse the output of the model",
advanced=True,
input_types=["OutputParser"],
),
]
def get_models(self) -> list[str]:
base_url = "https://api.novita.ai/v3/openai"
url = f"{base_url}/models"
headers = {"Content-Type": "application/json"}
try:
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
model_list = response.json()
return [model["id"] for model in model_list.get("data", [])]
except requests.RequestException as e:
self.status = f"Error fetching models: {e}"
return MODEL_NAMES
@override
def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
if field_name in {"api_key", "model_name"}:
models = self.get_models()
build_config["model_name"]["options"] = models
return build_config
def build_model(self) -> LanguageModel: # type: ignore[type-var]
api_key = self.api_key
temperature = self.temperature
model_name: str = self.model_name
max_tokens = self.max_tokens
model_kwargs = self.model_kwargs or {}
json_mode = self.json_mode
seed = self.seed
try:
output = ChatOpenAI(
model=model_name,
api_key=(SecretStr(api_key).get_secret_value() if api_key else None),
max_tokens=max_tokens or None,
temperature=temperature,
model_kwargs=model_kwargs,
streaming=self.stream,
seed=seed,
base_url="https://api.novita.ai/v3/openai",
)
except Exception as e:
msg = "Could not connect to Novita API."
raise ValueError(msg) from e
if json_mode:
output = output.bind(response_format={"type": "json_object"})
return output

View file

@ -25,4 +25,5 @@ VARIABLES_TO_GET_FROM_ENVIRONMENT = [
"VECTARA_API_KEY",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"NOVITA_API_KEY",
]

View file

@ -0,0 +1,9 @@
import React, { forwardRef } from "react";
import SvgNovita from "./novita";
export const NovitaIcon = forwardRef<
SVGSVGElement,
React.PropsWithChildren<{}>
>((props, ref) => {
return <SvgNovita ref={ref} {...props} />;
});

View file

@ -0,0 +1,36 @@
const SvgNovita = (props) => (
<svg
width="22"
height="25"
viewBox="0 0 22 25"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
fill-rule="evenodd"
clip-rule="evenodd"
d="M2 0.66449C0.895431 0.66449 0 1.55992 0 2.66449V20.0552C0 21.1598 0.895432 22.0552 2 22.0552H3.78015L10.5909 24.6045C11.2445 24.8492 11.9414 24.366 11.9414 23.668V8.8959C11.9414 8.06203 11.424 7.31567 10.6432 7.02307L3.78015 4.45137H17.5438V22.0552H19.3239C20.4285 22.0552 21.3239 21.1598 21.3239 20.0552V2.66449C21.3239 1.55992 20.4285 0.66449 19.324 0.66449H2ZM10.0128 16.943C10.3239 17.0945 10.5764 16.906 10.5764 16.5201C10.5764 16.1341 10.3239 15.6984 10.0128 15.5469C9.70171 15.3946 9.44922 15.5846 9.44922 15.9698C9.44922 16.355 9.70171 16.7907 10.0128 16.943Z"
fill="white"
/>
<path
fill-rule="evenodd"
clip-rule="evenodd"
d="M2 0.66449C0.895431 0.66449 0 1.55992 0 2.66449V20.0552C0 21.1598 0.895432 22.0552 2 22.0552H3.78015L10.5909 24.6045C11.2445 24.8492 11.9414 24.366 11.9414 23.668V8.8959C11.9414 8.06203 11.424 7.31567 10.6432 7.02307L3.78015 4.45137H17.5438V22.0552H19.3239C20.4285 22.0552 21.3239 21.1598 21.3239 20.0552V2.66449C21.3239 1.55992 20.4285 0.66449 19.324 0.66449H2ZM10.0128 16.943C10.3239 17.0945 10.5764 16.906 10.5764 16.5201C10.5764 16.1341 10.3239 15.6984 10.0128 15.5469C9.70171 15.3946 9.44922 15.5846 9.44922 15.9698C9.44922 16.355 9.70171 16.7907 10.0128 16.943Z"
fill="url(#paint0_linear_83_9227)"
/>
<defs>
<linearGradient
id="paint0_linear_83_9227"
x1="21.1236"
y1="-0.699318"
x2="0.585556"
y2="10.4891"
gradientUnits="userSpaceOnUse"
>
<stop stop-color="#2622FF" />
<stop offset="1" stop-color="#A717FF" />
</linearGradient>
</defs>
</svg>
);
export default SvgNovita;

View file

@ -0,0 +1,10 @@
<svg width="22" height="25" viewBox="0 0 22 25" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M2 0.66449C0.895431 0.66449 0 1.55992 0 2.66449V20.0552C0 21.1598 0.895432 22.0552 2 22.0552H3.78015L10.5909 24.6045C11.2445 24.8492 11.9414 24.366 11.9414 23.668V8.8959C11.9414 8.06203 11.424 7.31567 10.6432 7.02307L3.78015 4.45137H17.5438V22.0552H19.3239C20.4285 22.0552 21.3239 21.1598 21.3239 20.0552V2.66449C21.3239 1.55992 20.4285 0.66449 19.324 0.66449H2ZM10.0128 16.943C10.3239 17.0945 10.5764 16.906 10.5764 16.5201C10.5764 16.1341 10.3239 15.6984 10.0128 15.5469C9.70171 15.3946 9.44922 15.5846 9.44922 15.9698C9.44922 16.355 9.70171 16.7907 10.0128 16.943Z" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M2 0.66449C0.895431 0.66449 0 1.55992 0 2.66449V20.0552C0 21.1598 0.895432 22.0552 2 22.0552H3.78015L10.5909 24.6045C11.2445 24.8492 11.9414 24.366 11.9414 23.668V8.8959C11.9414 8.06203 11.424 7.31567 10.6432 7.02307L3.78015 4.45137H17.5438V22.0552H19.3239C20.4285 22.0552 21.3239 21.1598 21.3239 20.0552V2.66449C21.3239 1.55992 20.4285 0.66449 19.324 0.66449H2ZM10.0128 16.943C10.3239 17.0945 10.5764 16.906 10.5764 16.5201C10.5764 16.1341 10.3239 15.6984 10.0128 15.5469C9.70171 15.3946 9.44922 15.5846 9.44922 15.9698C9.44922 16.355 9.70171 16.7907 10.0128 16.943Z" fill="url(#paint0_linear_83_9227)"/>
<defs>
<linearGradient id="paint0_linear_83_9227" x1="21.1236" y1="-0.699318" x2="0.585556" y2="10.4891" gradientUnits="userSpaceOnUse">
<stop stop-color="#2622FF"/>
<stop offset="1" stop-color="#A717FF"/>
</linearGradient>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

View file

@ -281,6 +281,7 @@ import { MongoDBIcon } from "../icons/MongoDB";
import { NeedleIcon } from "../icons/Needle";
import { NotDiamondIcon } from "../icons/NotDiamond";
import { NotionIcon } from "../icons/Notion";
import { NovitaIcon } from "../icons/Novita";
import { NvidiaIcon } from "../icons/Nvidia";
import { OllamaIcon } from "../icons/Ollama";
import { OpenAiIcon } from "../icons/OpenAi";
@ -666,6 +667,8 @@ export const nodeIconsLucide: iconsType = {
notion: NotionIcon,
Notion: NotionIcon,
NotionDirectoryLoader: NotionIcon,
novita: NovitaIcon,
Novita: NovitaIcon,
Needle: NeedleIcon,
NVIDIA: NvidiaIcon,
ChatOpenAI: OpenAiIcon,