diff --git a/docs/docs/Components/components-models.md b/docs/docs/Components/components-models.md
index bcc85cb83..4ae3997e0 100644
--- a/docs/docs/Components/components-models.md
+++ b/docs/docs/Components/components-models.md
@@ -333,6 +333,29 @@ For more information, see [Perplexity documentation](https://perplexity.ai/).
|--------|---------------|-----------------------------------------------------|
| model | LanguageModel | An instance of ChatPerplexity configured with the specified parameters. |
+## SambaNova
+
+This component generates text using SambaNova LLMs.
+
+For more information, see [Sambanova Cloud documentation](https://cloud.sambanova.ai/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------------|---------------|------------------------------------------------------------------|
+| sambanova_url | String | Base URL path for API requests. Default: "https://api.sambanova.ai/v1/chat/completions". |
+| sambanova_api_key | SecretString | Your SambaNova API Key. |
+| model_name | String | The name of the Sambanova model to use. Options include various Llama models. |
+| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. |
+| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.07. |
+#### Outputs
+
+| Name | Type | Description |
+|-------|---------------|------------------------------------------------------------------|
+| model | LanguageModel | An instance of SambaNova model configured with the specified parameters. |
+
## VertexAI
This component generates text using Vertex AI LLMs.
diff --git a/src/backend/base/langflow/base/models/sambanova_constants.py b/src/backend/base/langflow/base/models/sambanova_constants.py
new file mode 100644
index 000000000..88f2b7973
--- /dev/null
+++ b/src/backend/base/langflow/base/models/sambanova_constants.py
@@ -0,0 +1,11 @@
+SAMBANOVA_MODEL_NAMES = [
+ "Meta-Llama-3.1-8B-Instruct",
+ "Meta-Llama-3.1-70B-Instruct",
+ "Meta-Llama-3.1-405B-Instruct",
+ "Meta-Llama-3.2-1B-Instruct",
+ "Meta-Llama-3.2-3B-Instruct",
+ "Llama-3.2-11B-Vision-Instruct",
+ "Llama-3.2-90B-Vision-Instruct",
+]
+
+MODEL_NAMES = SAMBANOVA_MODEL_NAMES
diff --git a/src/backend/base/langflow/components/models/__init__.py b/src/backend/base/langflow/components/models/__init__.py
index 974f32a9c..0c040e107 100644
--- a/src/backend/base/langflow/components/models/__init__.py
+++ b/src/backend/base/langflow/components/models/__init__.py
@@ -14,6 +14,7 @@ from .nvidia import NVIDIAModelComponent
from .ollama import ChatOllamaComponent
from .openai import OpenAIModelComponent
from .perplexity import PerplexityComponent
+from .sambanova import SambaNovaComponent
from .vertexai import ChatVertexAIComponent
__all__ = [
@@ -33,5 +34,6 @@ __all__ = [
"NVIDIAModelComponent",
"OpenAIModelComponent",
"PerplexityComponent",
+ "SambaNovaComponent",
"QianfanChatEndpointComponent",
]
diff --git a/src/backend/base/langflow/components/models/sambanova.py b/src/backend/base/langflow/components/models/sambanova.py
new file mode 100644
index 000000000..33a662804
--- /dev/null
+++ b/src/backend/base/langflow/components/models/sambanova.py
@@ -0,0 +1,74 @@
+from langchain_community.chat_models.sambanova import ChatSambaNovaCloud
+from pydantic.v1 import SecretStr
+
+from langflow.base.models.model import LCModelComponent
+from langflow.base.models.sambanova_constants import SAMBANOVA_MODEL_NAMES
+from langflow.field_typing import LanguageModel
+from langflow.inputs.inputs import HandleInput
+from langflow.io import DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput
+
+
+class SambaNovaComponent(LCModelComponent):
+ display_name = "SambaNova"
+ description = "Generate text using Sambanova LLMs."
+ documentation = "https://cloud.sambanova.ai/"
+ icon = "SambaNova"
+ name = "SambaNovaModel"
+
+ inputs = [
+ *LCModelComponent._base_inputs,
+ StrInput(
+ name="sambanova_url",
+ display_name="SambaNova Cloud Base Url",
+ advanced=True,
+ info="The base URL of the Sambanova Cloud API. "
+ "Defaults to https://api.sambanova.ai/v1/chat/completions. "
+ "You can change this to use other urls like Sambastudio",
+ ),
+ DropdownInput(
+ name="model_name",
+ display_name="Model Name",
+ advanced=False,
+ options=SAMBANOVA_MODEL_NAMES,
+ value=SAMBANOVA_MODEL_NAMES[0],
+ ),
+ SecretStrInput(
+ name="sambanova_api_key",
+ display_name="Sambanova API Key",
+ info="The Sambanova API Key to use for the Sambanova model.",
+ advanced=False,
+ value="SAMBANOVA_API_KEY",
+ ),
+ IntInput(
+ name="max_tokens",
+ display_name="Max Tokens",
+ advanced=True,
+ value=4096,
+ info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
+ ),
+ FloatInput(name="temperature", display_name="Temperature", value=0.07),
+ HandleInput(
+ name="output_parser",
+ display_name="Output Parser",
+ info="The parser to use to parse the output of the model",
+ advanced=True,
+ input_types=["OutputParser"],
+ ),
+ ]
+
+ def build_model(self) -> LanguageModel: # type: ignore[type-var]
+ sambanova_url = self.sambanova_url
+ sambanova_api_key = self.sambanova_api_key
+ model_name = self.model_name
+ max_tokens = self.max_tokens
+ temperature = self.temperature
+
+ api_key = SecretStr(sambanova_api_key).get_secret_value() if sambanova_api_key else None
+
+ return ChatSambaNovaCloud(
+ model=model_name,
+ max_tokens=max_tokens or 1024,
+ temperature=temperature or 0.07,
+ sambanova_url=sambanova_url,
+ sambanova_api_key=api_key,
+ )
diff --git a/src/backend/base/langflow/services/settings/constants.py b/src/backend/base/langflow/services/settings/constants.py
index dd24f90a3..b2f3df12e 100644
--- a/src/backend/base/langflow/services/settings/constants.py
+++ b/src/backend/base/langflow/services/settings/constants.py
@@ -15,6 +15,7 @@ VARIABLES_TO_GET_FROM_ENVIRONMENT = [
"GROQ_API_KEY",
"HUGGINGFACEHUB_API_TOKEN",
"PINECONE_API_KEY",
+ "SAMBANOVA_API_KEY",
"SEARCHAPI_API_KEY",
"SERPAPI_API_KEY",
"UPSTASH_VECTOR_REST_URL",
diff --git a/src/frontend/src/icons/SambaNova/SambaNovaLogo.jsx b/src/frontend/src/icons/SambaNova/SambaNovaLogo.jsx
new file mode 100644
index 000000000..6a448c09f
--- /dev/null
+++ b/src/frontend/src/icons/SambaNova/SambaNovaLogo.jsx
@@ -0,0 +1,27 @@
+const SvgSambaNovaLogo = ({ ...props }) => (
+
+);
+export default SvgSambaNovaLogo;
diff --git a/src/frontend/src/icons/SambaNova/SambaNovaLogo.svg b/src/frontend/src/icons/SambaNova/SambaNovaLogo.svg
new file mode 100644
index 000000000..9b5d0248f
--- /dev/null
+++ b/src/frontend/src/icons/SambaNova/SambaNovaLogo.svg
@@ -0,0 +1,7 @@
+
diff --git a/src/frontend/src/icons/SambaNova/index.tsx b/src/frontend/src/icons/SambaNova/index.tsx
new file mode 100644
index 000000000..59fe8cde3
--- /dev/null
+++ b/src/frontend/src/icons/SambaNova/index.tsx
@@ -0,0 +1,9 @@
+import React, { forwardRef } from "react";
+import SvgSambaNovaLogo from "./SambaNovaLogo";
+
+export const SambaNovaIcon = forwardRef<
+ SVGSVGElement,
+ React.PropsWithChildren<{}>
+>((props, ref) => {
+ return ;
+});
diff --git a/src/frontend/src/utils/styleUtils.ts b/src/frontend/src/utils/styleUtils.ts
index 5abb70c43..f6e4eb9d7 100644
--- a/src/frontend/src/utils/styleUtils.ts
+++ b/src/frontend/src/utils/styleUtils.ts
@@ -284,6 +284,7 @@ import { PythonIcon } from "../icons/Python";
import { QDrantIcon } from "../icons/QDrant";
import { QianFanChatIcon } from "../icons/QianFanChat";
import { RedisIcon } from "../icons/Redis";
+import { SambaNovaIcon } from "../icons/SambaNova";
import { SearxIcon } from "../icons/Searx";
import { ShareIcon } from "../icons/Share";
import { Share2Icon } from "../icons/Share2";
@@ -659,6 +660,7 @@ export const nodeIconsLucide: iconsType = {
Qdrant: QDrantIcon,
ElasticsearchStore: ElasticsearchIcon,
Weaviate: WeaviateIcon,
+ SambaNova: SambaNovaIcon,
Searx: SearxIcon,
SlackDirectoryLoader: SvgSlackIcon,
SpiderTool: SpiderIcon,
diff --git a/src/frontend/tests/extended/features/filterEdge-shard-1.spec.ts b/src/frontend/tests/extended/features/filterEdge-shard-1.spec.ts
index 095a6d0a9..55eb86748 100644
--- a/src/frontend/tests/extended/features/filterEdge-shard-1.spec.ts
+++ b/src/frontend/tests/extended/features/filterEdge-shard-1.spec.ts
@@ -140,6 +140,7 @@ test(
"modelsOpenAI",
"modelsPerplexity",
"modelsQianfan",
+ "modelsSambaNova",
"modelsVertex AI",
];