diff --git a/src/backend/base/langflow/components/models/PerplexityModel.py b/src/backend/base/langflow/components/models/PerplexityModel.py
new file mode 100644
index 000000000..7265db9b4
--- /dev/null
+++ b/src/backend/base/langflow/components/models/PerplexityModel.py
@@ -0,0 +1,83 @@
+from langchain_community.chat_models import ChatPerplexity
+from pydantic.v1 import SecretStr
+
+from langflow.base.models.model import LCModelComponent
+from langflow.field_typing import LanguageModel
+from langflow.io import FloatInput, SecretStrInput, DropdownInput, IntInput
+
+
+class PerplexityComponent(LCModelComponent):
+ display_name = "Perplexity"
+ description = "Generate text using Perplexity LLMs."
+ documentation = "https://python.langchain.com/v0.2/docs/integrations/chat/perplexity/"
+ icon = "Perplexity"
+ name = "PerplexityModel"
+
+ inputs = LCModelComponent._base_inputs + [
+ DropdownInput(
+ name="model_name",
+ display_name="Model Name",
+ advanced=False,
+ options=[
+ "llama-3.1-sonar-small-128k-online",
+ "llama-3.1-sonar-large-128k-online",
+ "llama-3.1-sonar-huge-128k-online",
+ "llama-3.1-sonar-small-128k-chat",
+ "llama-3.1-sonar-large-128k-chat",
+ "llama-3.1-8b-instruct",
+ "llama-3.1-70b-instruct",
+ ],
+ value="llama-3.1-sonar-small-128k-online",
+ ),
+ IntInput(
+ name="max_output_tokens",
+ display_name="Max Output Tokens",
+ info="The maximum number of tokens to generate.",
+ ),
+ SecretStrInput(
+ name="api_key",
+ display_name="Perplexity API Key",
+ info="The Perplexity API Key to use for the Perplexity model.",
+ advanced=False,
+ ),
+ FloatInput(name="temperature", display_name="Temperature", value=0.75),
+ FloatInput(
+ name="top_p",
+ display_name="Top P",
+ info="The maximum cumulative probability of tokens to consider when sampling.",
+ advanced=True,
+ ),
+ IntInput(
+ name="n",
+ display_name="N",
+ info="Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
+ advanced=True,
+ ),
+ IntInput(
+ name="top_k",
+ display_name="Top K",
+ info="Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
+ advanced=True,
+ ),
+ ]
+
+ def build_model(self) -> LanguageModel: # type: ignore[type-var]
+ api_key = SecretStr(self.api_key).get_secret_value()
+ temperature = self.temperature
+ model = self.model_name
+ max_output_tokens = self.max_output_tokens
+ top_k = self.top_k
+ top_p = self.top_p
+ n = self.n
+
+ output = ChatPerplexity(
+ model=model,
+ temperature=temperature or 0.75,
+ pplx_api_key=api_key,
+ top_k=top_k or None,
+ top_p=top_p or None,
+ n=n or 1,
+ max_output_tokens=max_output_tokens,
+ )
+
+ return output # type: ignore
diff --git a/src/backend/base/langflow/components/models/__init__.py b/src/backend/base/langflow/components/models/__init__.py
index 3e708d5a8..08385063c 100644
--- a/src/backend/base/langflow/components/models/__init__.py
+++ b/src/backend/base/langflow/components/models/__init__.py
@@ -9,6 +9,7 @@ from .HuggingFaceModel import HuggingFaceEndpointsComponent
from .OllamaModel import ChatOllamaComponent
from .OpenAIModel import OpenAIModelComponent
from .VertexAiModel import ChatVertexAIComponent
+from .PerplexityModel import PerplexityComponent
__all__ = [
"AIMLModelComponent",
@@ -22,5 +23,6 @@ __all__ = [
"ChatOllamaComponent",
"OpenAIModelComponent",
"ChatVertexAIComponent",
+ "PerplexityComponent",
"base",
]
diff --git a/src/frontend/package-lock.json b/src/frontend/package-lock.json
index db0e15228..f150e6b42 100644
--- a/src/frontend/package-lock.json
+++ b/src/frontend/package-lock.json
@@ -1079,6 +1079,7 @@
},
"node_modules/@clack/prompts/node_modules/is-unicode-supported": {
"version": "1.3.0",
+ "extraneous": true,
"inBundle": true,
"license": "MIT",
"engines": {
diff --git a/src/frontend/src/icons/Perplexity/Perplexity.jsx b/src/frontend/src/icons/Perplexity/Perplexity.jsx
new file mode 100644
index 000000000..5258f03d7
--- /dev/null
+++ b/src/frontend/src/icons/Perplexity/Perplexity.jsx
@@ -0,0 +1,21 @@
+const SvgPerplexity = (props) => (
+
+);
+
+export default SvgPerplexity;
diff --git a/src/frontend/src/icons/Perplexity/index.tsx b/src/frontend/src/icons/Perplexity/index.tsx
new file mode 100644
index 000000000..d6103437c
--- /dev/null
+++ b/src/frontend/src/icons/Perplexity/index.tsx
@@ -0,0 +1,9 @@
+import React, { forwardRef } from "react";
+import PerplexitySVG from "./perplexity";
+
+export const PerplexityIcon = forwardRef<
+ SVGSVGElement,
+ React.PropsWithChildren<{}>
+>((props, ref) => {
+ return ;
+});
diff --git a/src/frontend/src/icons/Perplexity/perplexity.svg b/src/frontend/src/icons/Perplexity/perplexity.svg
new file mode 100644
index 000000000..307f257c0
Binary files /dev/null and b/src/frontend/src/icons/Perplexity/perplexity.svg differ
diff --git a/src/frontend/src/utils/styleUtils.ts b/src/frontend/src/utils/styleUtils.ts
index 0cc99b012..f1607acb5 100644
--- a/src/frontend/src/utils/styleUtils.ts
+++ b/src/frontend/src/utils/styleUtils.ts
@@ -1,4 +1,5 @@
import { AIMLIcon } from "@/icons/AIML";
+import Perplexity from "@/icons/Perplexity/Perplexity";
import { AthenaIcon } from "@/icons/athena/index";
import { freezeAllIcon } from "@/icons/freezeAll";
import {
@@ -599,4 +600,5 @@ export const nodeIconsLucide: iconsType = {
athenaIcon: AthenaIcon,
OptionIcon: OptionIcon,
Option: OptionIcon,
+ Perplexity,
};