diff --git a/src/backend/base/langflow/base/models/aiml_constants.py b/src/backend/base/langflow/base/models/aiml_constants.py
new file mode 100644
index 000000000..41955a949
--- /dev/null
+++ b/src/backend/base/langflow/base/models/aiml_constants.py
@@ -0,0 +1,77 @@
+CHAT_MODELS = [
+ "zero-one-ai/Yi-34B-Chat",
+ "allenai/OLMo-7B-Instruct",
+ "allenai/OLMo-7B-Twin-2T",
+ "allenai/OLMo-7B",
+ "Austism/chronos-hermes-13b",
+ "cognitivecomputations/dolphin-2.5-mixtral-8x7b",
+ "deepseek-ai/deepseek-coder-33b-instruct",
+ "deepseek-ai/deepseek-llm-67b-chat",
+ "garage-bAInd/Platypus2-70B-instruct",
+ "google/gemma-2b-it",
+ "google/gemma-7b-it",
+ "Gryphe/MythoMax-L2-13b",
+ "lmsys/vicuna-13b-v1.5",
+ "lmsys/vicuna-7b-v1.5",
+ "codellama/CodeLlama-13b-Instruct-hf",
+ "codellama/CodeLlama-34b-Instruct-hf",
+ "codellama/CodeLlama-70b-Instruct-hf",
+ "codellama/CodeLlama-7b-Instruct-hf",
+ "meta-llama/Llama-2-70b-chat-hf",
+ "meta-llama/Llama-2-13b-chat-hf",
+ "meta-llama/Llama-2-7b-chat-hf",
+ "mistralai/Mistral-7B-Instruct-v0.1",
+ "mistralai/Mistral-7B-Instruct-v0.2",
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "NousResearch/Nous-Capybara-7B-V1p9",
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
+ "NousResearch/Nous-Hermes-llama-2-7b",
+ "NousResearch/Nous-Hermes-Llama2-13b",
+ "NousResearch/Nous-Hermes-2-Yi-34B",
+ "openchat/openchat-3.5-1210",
+ "Open-Orca/Mistral-7B-OpenOrca",
+ "togethercomputer/Qwen-7B-Chat",
+ "Qwen/Qwen1.5-0.5B-Chat",
+ "Qwen/Qwen1.5-1.8B-Chat",
+ "Qwen/Qwen1.5-4B-Chat",
+ "Qwen/Qwen1.5-7B-Chat",
+ "Qwen/Qwen1.5-14B-Chat",
+ "Qwen/Qwen1.5-72B-Chat",
+ "snorkelai/Snorkel-Mistral-PairRM-DPO",
+ "togethercomputer/alpaca-7b",
+ "teknium/OpenHermes-2-Mistral-7B",
+ "teknium/OpenHermes-2p5-Mistral-7B",
+ "togethercomputer/falcon-40b-instruct",
+ "togethercomputer/falcon-7b-instruct",
+ "togethercomputer/Llama-2-7B-32K-Instruct",
+ "togethercomputer/RedPajama-INCITE-Chat-3B-v1",
+ "togethercomputer/RedPajama-INCITE-7B-Chat",
+ "togethercomputer/StripedHyena-Nous-7B",
+ "Undi95/ReMM-SLERP-L2-13B",
+ "Undi95/Toppy-M-7B",
+ "WizardLM/WizardLM-13B-V1.2",
+ "upstage/SOLAR-10.7B-Instruct-v1.0",
+ "gpt-4",
+ "gpt-4-turbo",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0613",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-instruct",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k-0613",
+ "gpt-4o",
+ "claude-3-opus-20240229",
+ "claude-3-sonnet-20240229",
+ "claude-3-haiku-20240307",
+]
+
+EMBEDDING_MODELS = [
+ "text-embedding-3-small",
+ "text-embedding-3-large",
+ "text-embedding-ada-002",
+]
diff --git a/src/backend/base/langflow/components/embeddings/AIMLEmbeddings.py b/src/backend/base/langflow/components/embeddings/AIMLEmbeddings.py
new file mode 100644
index 000000000..3bd1d3cf9
--- /dev/null
+++ b/src/backend/base/langflow/components/embeddings/AIMLEmbeddings.py
@@ -0,0 +1,34 @@
+from langflow.base.embeddings.model import LCEmbeddingsModel
+from langflow.base.models.aiml_constants import EMBEDDING_MODELS
+from langflow.components.embeddings.util.AIMLEmbeddingsImpl import AIMLEmbeddingsImpl
+from langflow.field_typing import Embeddings
+from langflow.inputs.inputs import DropdownInput
+from langflow.io import SecretStrInput
+
+
+class AIMLEmbeddingsComponent(LCEmbeddingsModel):
+ display_name = "AI/ML Embeddings"
+ description = "Generate embeddings using the AI/ML API."
+ icon = "AI/ML"
+ name = "AIMLEmbeddings"
+
+ inputs = [
+ DropdownInput(
+ name="model_name",
+ display_name="Model Name",
+ options=EMBEDDING_MODELS,
+ required=True,
+ ),
+ SecretStrInput(
+ name="aiml_api_key",
+ display_name="AI/ML API Key",
+ value="AIML_API_KEY",
+ required=True,
+ ),
+ ]
+
+ def build_embeddings(self) -> Embeddings:
+ return AIMLEmbeddingsImpl(
+ api_key=self.aiml_api_key,
+ model=self.model_name,
+ )
diff --git a/src/backend/base/langflow/components/embeddings/__init__.py b/src/backend/base/langflow/components/embeddings/__init__.py
index 79f8173d1..520e3e13f 100644
--- a/src/backend/base/langflow/components/embeddings/__init__.py
+++ b/src/backend/base/langflow/components/embeddings/__init__.py
@@ -1,3 +1,4 @@
+from .AIMLEmbeddings import AIMLEmbeddingsComponent
from .AmazonBedrockEmbeddings import AmazonBedrockEmbeddingsComponent
from .AstraVectorize import AstraVectorizeComponent
from .AzureOpenAIEmbeddings import AzureOpenAIEmbeddingsComponent
@@ -9,6 +10,7 @@ from .OpenAIEmbeddings import OpenAIEmbeddingsComponent
from .VertexAIEmbeddings import VertexAIEmbeddingsComponent
__all__ = [
+ "AIMLEmbeddingsComponent",
"AmazonBedrockEmbeddingsComponent",
"AstraVectorizeComponent",
"AzureOpenAIEmbeddingsComponent",
diff --git a/src/backend/base/langflow/components/embeddings/util/AIMLEmbeddingsImpl.py b/src/backend/base/langflow/components/embeddings/util/AIMLEmbeddingsImpl.py
new file mode 100644
index 000000000..74bf2110c
--- /dev/null
+++ b/src/backend/base/langflow/components/embeddings/util/AIMLEmbeddingsImpl.py
@@ -0,0 +1,70 @@
+import json
+from typing import List
+
+import httpx
+from langflow.field_typing import Embeddings
+from langchain_core.runnables.config import run_in_executor
+from langchain_core.pydantic_v1 import BaseModel, SecretStr
+from loguru import logger
+
+
+class AIMLEmbeddingsImpl(BaseModel, Embeddings):
+ embeddings_completion_url: str = "https://api.aimlapi.com/v1/embeddings"
+
+ api_key: SecretStr
+ model: str
+
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
+ result_vectors = []
+ for text in texts:
+ vector = self.embed_query(text)
+ result_vectors.append(vector)
+
+ return result_vectors
+
+ def embed_query(self, text: str) -> List[float]:
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {self.api_key.get_secret_value()}",
+ }
+
+ payload = {
+ "model": self.model,
+ "input": text,
+ }
+ vector = []
+ try:
+ response = httpx.post(
+ self.embeddings_completion_url,
+ headers=headers,
+ json=payload,
+ )
+ try:
+ response.raise_for_status()
+ result_data = response.json()
+ vector = result_data["data"][0]["embedding"]
+ except httpx.HTTPStatusError as http_err:
+ logger.error(f"HTTP error occurred: {http_err}")
+ raise http_err
+ except httpx.RequestError as req_err:
+ logger.error(f"Request error occurred: {req_err}")
+ raise req_err
+ except json.JSONDecodeError:
+ logger.warning(f"Failed to decode JSON, response text: {response.text}")
+ except KeyError as key_err:
+ logger.warning(f"Key error: {key_err}, response content: {result_data}")
+ raise key_err
+ except httpx.TimeoutException:
+ logger.error("Request timed out.")
+ raise
+ except Exception as exc:
+ logger.error(f"Error: {exc}")
+ raise
+
+ return vector
+
+ async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
+ return await run_in_executor(None, self.embed_documents, texts)
+
+ async def aembed_query(self, text: str) -> List[float]:
+ return await run_in_executor(None, self.embed_query, text)
diff --git a/src/backend/base/langflow/components/embeddings/util/__init__.py b/src/backend/base/langflow/components/embeddings/util/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/backend/base/langflow/components/models/AIMLModel.py b/src/backend/base/langflow/components/models/AIMLModel.py
new file mode 100644
index 000000000..37233860f
--- /dev/null
+++ b/src/backend/base/langflow/components/models/AIMLModel.py
@@ -0,0 +1,148 @@
+import json
+import httpx
+from langflow.base.models.aiml_constants import CHAT_MODELS
+from langflow.custom.custom_component.component import Component
+
+from langflow.inputs.inputs import FloatInput, IntInput, MessageInput, SecretStrInput
+from langflow.schema.message import Message
+from langflow.template.field.base import Output
+from loguru import logger
+from pydantic.v1 import SecretStr
+
+from langflow.inputs import (
+ DropdownInput,
+ StrInput,
+)
+
+
+class AIMLModelComponent(Component):
+ display_name = "AI/ML API"
+ description = "Generates text using the AI/ML API"
+ icon = "AI/ML"
+ chat_completion_url = "https://api.aimlapi.com/v1/chat/completions"
+
+ outputs = [
+ Output(display_name="Text", name="text_output", method="make_request"),
+ ]
+
+ inputs = [
+ DropdownInput(
+ name="model_name",
+ display_name="Model Name",
+ options=CHAT_MODELS,
+ required=True,
+ ),
+ SecretStrInput(
+ name="aiml_api_key",
+ display_name="AI/ML API Key",
+ value="AIML_API_KEY",
+ ),
+ MessageInput(name="input_value", display_name="Input", required=True),
+ IntInput(
+ name="max_tokens",
+ display_name="Max Tokens",
+ advanced=True,
+ info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
+ ),
+ StrInput(
+ name="stop_tokens",
+ display_name="Stop Tokens",
+ info="Comma-separated list of tokens to signal the model to stop generating text.",
+ advanced=True,
+ ),
+ IntInput(
+ name="top_k",
+ display_name="Top K",
+ info="Limits token selection to top K. (Default: 40)",
+ advanced=True,
+ ),
+ FloatInput(
+ name="top_p",
+ display_name="Top P",
+ info="Works together with top-k. (Default: 0.9)",
+ advanced=True,
+ ),
+ FloatInput(
+ name="repeat_penalty",
+ display_name="Repeat Penalty",
+ info="Penalty for repetitions in generated text. (Default: 1.1)",
+ advanced=True,
+ ),
+ FloatInput(
+ name="temperature",
+ display_name="Temperature",
+ value=0.2,
+ info="Controls the creativity of model responses.",
+ ),
+ StrInput(
+ name="system_message",
+ display_name="System Message",
+ info="System message to pass to the model.",
+ advanced=True,
+ ),
+ ]
+
+ def make_request(self) -> Message:
+ api_key = SecretStr(self.aiml_api_key) if self.aiml_api_key else None
+
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {api_key.get_secret_value()}" if api_key else "",
+ }
+
+ messages = []
+ if self.system_message:
+ messages.append({"role": "system", "content": self.system_message})
+
+ if self.input_value:
+ if isinstance(self.input_value, Message):
+ # Though we aren't using langchain here, the helper method is useful
+ message = self.input_value.to_lc_message()
+ if message.type == "human":
+ messages.append({"role": "user", "content": message.content})
+ else:
+ raise ValueError(f"Expected user message, saw: {message.type}")
+ else:
+ raise TypeError(f"Expected Message type, saw: {type(self.input_value)}")
+ else:
+ raise ValueError("Please provide an input value")
+
+ payload = {
+ "model": self.model_name,
+ "messages": messages,
+ "max_tokens": self.max_tokens or None,
+ "temperature": self.temperature or 0.2,
+ "top_k": self.top_k or 40,
+ "top_p": self.top_p or 0.9,
+ "repeat_penalty": self.repeat_penalty or 1.1,
+ "stop_tokens": self.stop_tokens or None,
+ }
+
+ try:
+ response = httpx.post(self.chat_completion_url, headers=headers, json=payload)
+ try:
+ response.raise_for_status()
+ result_data = response.json()
+ choice = result_data["choices"][0]
+ result = choice["message"]["content"]
+ except httpx.HTTPStatusError as http_err:
+ logger.error(f"HTTP error occurred: {http_err}")
+ raise http_err
+ except httpx.RequestError as req_err:
+ logger.error(f"Request error occurred: {req_err}")
+ raise req_err
+ except json.JSONDecodeError:
+ logger.warning("Failed to decode JSON, response text: {response.text}")
+ result = response.text
+ except KeyError as key_err:
+ logger.warning(f"Key error: {key_err}, response content: {result_data}")
+ raise key_err
+
+ self.status = result
+ except httpx.TimeoutException:
+ return Message(text="Request timed out.")
+ except Exception as exc:
+ logger.error(f"Error: {exc}")
+ raise
+
+ return Message(text=result)
diff --git a/src/backend/base/langflow/components/models/MistralModel.py b/src/backend/base/langflow/components/models/MistralModel.py
index 3aa9fcb3a..37339a851 100644
--- a/src/backend/base/langflow/components/models/MistralModel.py
+++ b/src/backend/base/langflow/components/models/MistralModel.py
@@ -45,7 +45,7 @@ class MistralAIModelComponent(LCModelComponent):
),
),
SecretStrInput(
- name="mistral_api_key",
+ name="api_key",
display_name="Mistral API Key",
info="The Mistral API Key to use for the Mistral model.",
advanced=False,
@@ -67,7 +67,7 @@ class MistralAIModelComponent(LCModelComponent):
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
- mistral_api_key = self.mistral_api_key
+ mistral_api_key = self.api_key
temperature = self.temperature
model_name = self.model_name
max_tokens = self.max_tokens
diff --git a/src/backend/base/langflow/components/models/OllamaModel.py b/src/backend/base/langflow/components/models/OllamaModel.py
index 3419aace2..ec8c01ff8 100644
--- a/src/backend/base/langflow/components/models/OllamaModel.py
+++ b/src/backend/base/langflow/components/models/OllamaModel.py
@@ -76,7 +76,7 @@ class ChatOllamaComponent(LCModelComponent):
value="http://localhost:11434",
),
DropdownInput(
- name="model",
+ name="model_name",
display_name="Model Name",
value="llama2",
info="Refer to https://ollama.ai/library for more models.",
@@ -107,6 +107,7 @@ class ChatOllamaComponent(LCModelComponent):
info="Enable/disable Mirostat sampling for controlling perplexity.",
value="Disabled",
advanced=True,
+ real_time_refresh=True,
),
FloatInput(
name="mirostat_eta",
@@ -238,7 +239,7 @@ class ChatOllamaComponent(LCModelComponent):
# Mapping system settings to their corresponding values
llm_params = {
"base_url": self.base_url,
- "model": self.model,
+ "model": self.model_name,
"mirostat": mirostat_value,
"format": self.format,
"metadata": self.metadata,
diff --git a/src/backend/base/langflow/components/models/OpenAIModel.py b/src/backend/base/langflow/components/models/OpenAIModel.py
index ef80ba817..d6051dba6 100644
--- a/src/backend/base/langflow/components/models/OpenAIModel.py
+++ b/src/backend/base/langflow/components/models/OpenAIModel.py
@@ -58,7 +58,7 @@ class OpenAIModelComponent(LCModelComponent):
info="The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
),
SecretStrInput(
- name="openai_api_key",
+ name="api_key",
display_name="OpenAI API Key",
info="The OpenAI API Key to use for the OpenAI model.",
advanced=False,
@@ -82,10 +82,10 @@ class OpenAIModelComponent(LCModelComponent):
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
- # self.output_schea is a list of dictionarie s
+ # self.output_schema is a list of dictionaries
# let's convert it to a dictionary
output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})
- openai_api_key = self.openai_api_key
+ openai_api_key = self.api_key
temperature = self.temperature
model_name: str = self.model_name
max_tokens = self.max_tokens
diff --git a/src/backend/base/langflow/components/models/__init__.py b/src/backend/base/langflow/components/models/__init__.py
index 70e1ab10c..3e708d5a8 100644
--- a/src/backend/base/langflow/components/models/__init__.py
+++ b/src/backend/base/langflow/components/models/__init__.py
@@ -1,3 +1,4 @@
+from .AIMLModel import AIMLModelComponent
from .AmazonBedrockModel import AmazonBedrockComponent
from .AnthropicModel import AnthropicModelComponent
from .AzureOpenAIModel import AzureChatOpenAIComponent
@@ -10,6 +11,7 @@ from .OpenAIModel import OpenAIModelComponent
from .VertexAiModel import ChatVertexAIComponent
__all__ = [
+ "AIMLModelComponent",
"AmazonBedrockComponent",
"AnthropicModelComponent",
"AzureChatOpenAIComponent",
diff --git a/src/frontend/src/icons/AIML/AI-ML API logo square.svg b/src/frontend/src/icons/AIML/AI-ML API logo square.svg
new file mode 100644
index 000000000..82918ac5c
--- /dev/null
+++ b/src/frontend/src/icons/AIML/AI-ML API logo square.svg
@@ -0,0 +1,97 @@
+
diff --git a/src/frontend/src/icons/AIML/AI-ML.jsx b/src/frontend/src/icons/AIML/AI-ML.jsx
new file mode 100644
index 000000000..3de222647
--- /dev/null
+++ b/src/frontend/src/icons/AIML/AI-ML.jsx
@@ -0,0 +1,332 @@
+import { cn } from "@/utils/utils";
+
+export const AIMLComponent = ({ className, ...props }) => (
+
+);
diff --git a/src/frontend/src/icons/AIML/index.tsx b/src/frontend/src/icons/AIML/index.tsx
new file mode 100644
index 000000000..59d66bd99
--- /dev/null
+++ b/src/frontend/src/icons/AIML/index.tsx
@@ -0,0 +1,8 @@
+import React, { forwardRef } from "react";
+import { AIMLComponent } from "./AI-ML";
+
+export const AIMLIcon = forwardRef>(
+ (props, ref) => {
+ return ;
+ },
+);
diff --git a/src/frontend/src/utils/styleUtils.ts b/src/frontend/src/utils/styleUtils.ts
index 6ad2d94f0..f6c8bfd42 100644
--- a/src/frontend/src/utils/styleUtils.ts
+++ b/src/frontend/src/utils/styleUtils.ts
@@ -1,3 +1,4 @@
+import { AIMLIcon } from "@/icons/AIML";
import { freezeAllIcon } from "@/icons/freezeAll";
import {
AlertCircle,
@@ -581,4 +582,6 @@ export const nodeIconsLucide: iconsType = {
PGVector: CpuIcon,
Confluence: ConfluenceIcon,
FreezeAll: freezeAllIcon,
+ AIML: AIMLIcon,
+ "AI/ML": AIMLIcon,
};