refactor: Update VertexAIEmbeddingsComponent to use Embeddings field type

This commit is contained in:
ogabrielluiz 2024-06-11 22:57:06 -03:00
commit 5cee0660e8
7 changed files with 351 additions and 220 deletions

View file

@ -379,6 +379,11 @@ class Vertex:
params[field_name] = [unescape_string(v) for v in val]
elif isinstance(val, str):
params[field_name] = unescape_string(val)
elif field.get("type") == "bool" and val is not None:
if isinstance(val, bool):
params[field_name] = val
elif isinstance(val, str):
params[field_name] = val != ""
elif val is not None and val != "":
params[field_name] = val

View file

@ -7,7 +7,7 @@
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-k39HS",
"name": "text",
"name": "text_output",
"output_types": [
"Text"
]
@ -23,7 +23,7 @@
},
"id": "reactflow__edge-OpenAIModel-k39HS{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-k39HSœ}-ChatOutput-njtka{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-njtkaœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-k39HS",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-k39HSœ, œoutput_typesœ: [œTextœ], œnameœ: œtextœ}",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-k39HSœ, œoutput_typesœ: [œTextœ], œnameœ: œtext_outputœ}",
"style": {
"stroke": "#555"
},
@ -168,7 +168,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": Input(display_name=\"Template\"),\n }\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
},
"template": {
"advanced": false,
@ -277,24 +277,33 @@
],
"frozen": false,
"icon": "OpenAI",
"output_types": [
"Text"
],
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": null,
"name": "text",
"method": "text_response",
"name": "text_output",
"selected": "Text",
"types": [
"Text"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "model_response",
"name": "model_output",
"selected": "BaseLanguageModel",
"types": [
"BaseLanguageModel"
],
"value": "__UNDEFINED__"
}
],
"template": {
"_type": "CustomComponent",
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
@ -311,7 +320,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-3.5-turbo\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n"
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel, Text\nfrom langflow.template import Input, Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n Input(name=\"input_value\", type=str, display_name=\"Input\", input_types=[\"Text\", \"Record\", \"Prompt\"]),\n Input(\n name=\"max_tokens\",\n type=Optional[int],\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n Input(name=\"model_kwargs\", type=dict, display_name=\"Model Kwargs\", advanced=True),\n Input(name=\"model_name\", type=str, display_name=\"Model Name\", advanced=False, options=MODEL_NAMES),\n Input(\n name=\"openai_api_base\",\n type=Optional[str],\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n Input(\n name=\"openai_api_key\",\n type=str,\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n password=True,\n ),\n Input(name=\"temperature\", type=float, display_name=\"Temperature\", advanced=False, default=0.1),\n Input(name=\"stream\", type=bool, display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n Input(\n name=\"system_message\",\n type=Optional[str],\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"model_response\"),\n ]\n\n def text_response(self) -> Text:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.model_response()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def model_response(self) -> BaseLanguageModel:\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n return output\n"
},
"input_value": {
"advanced": false,
@ -331,10 +340,11 @@
"name": "input_value",
"password": false,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"max_tokens": {
"advanced": true,
@ -343,6 +353,9 @@
"fileTypes": [],
"file_path": "",
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -352,8 +365,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "int",
"value": 256
"type": "str",
"value": ""
},
"model_kwargs": {
"advanced": true,
@ -362,6 +375,9 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -371,8 +387,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "NestedDict",
"value": {}
"type": "str",
"value": ""
},
"model_name": {
"advanced": false,
@ -401,7 +417,7 @@
"show": true,
"title_case": false,
"type": "str",
"value": "gpt-3.5-turbo"
"value": ""
},
"openai_api_base": {
"advanced": true,
@ -422,7 +438,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"openai_api_key": {
"advanced": false,
@ -435,16 +452,16 @@
"Text"
],
"list": false,
"load_from_db": true,
"load_from_db": false,
"multiline": false,
"name": "openai_api_key",
"password": true,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
"value": ""
},
"stream": {
"advanced": true,
@ -453,6 +470,9 @@
"fileTypes": [],
"file_path": "",
"info": "Stream the response from the model. Streaming works only in Chat.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -462,8 +482,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "bool",
"value": true
"type": "str",
"value": ""
},
"system_message": {
"advanced": true,
@ -484,7 +504,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"temperature": {
"advanced": false,
@ -493,23 +514,20 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
"name": "temperature",
"password": false,
"placeholder": "",
"rangeSpec": {
"max": 1,
"min": -1,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"type": "float",
"value": 0.1
"type": "str",
"value": ""
}
}
},

View file

@ -40,7 +40,7 @@
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-gi29P",
"name": "text",
"name": "text_output",
"output_types": [
"Text"
]
@ -56,7 +56,7 @@
},
"id": "reactflow__edge-OpenAIModel-gi29P{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-gi29Pœ}-ChatOutput-JPlxl{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-JPlxlœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-gi29P",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-gi29Pœ, œoutput_typesœ: [œTextœ], œnameœ: œtextœ}",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-gi29Pœ, œoutput_typesœ: [œTextœ], œnameœ: œtext_outputœ}",
"style": {
"stroke": "#555"
},
@ -236,7 +236,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": Input(display_name=\"Template\"),\n }\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
},
"instructions": {
"advanced": false,
@ -691,24 +691,33 @@
],
"frozen": false,
"icon": "OpenAI",
"output_types": [
"Text"
],
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": null,
"name": "text",
"method": "text_response",
"name": "text_output",
"selected": "Text",
"types": [
"Text"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "model_response",
"name": "model_output",
"selected": "BaseLanguageModel",
"types": [
"BaseLanguageModel"
],
"value": "__UNDEFINED__"
}
],
"template": {
"_type": "CustomComponent",
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
@ -725,7 +734,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-3.5-turbo\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n"
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel, Text\nfrom langflow.template import Input, Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n Input(name=\"input_value\", type=str, display_name=\"Input\", input_types=[\"Text\", \"Record\", \"Prompt\"]),\n Input(\n name=\"max_tokens\",\n type=Optional[int],\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n Input(name=\"model_kwargs\", type=dict, display_name=\"Model Kwargs\", advanced=True),\n Input(name=\"model_name\", type=str, display_name=\"Model Name\", advanced=False, options=MODEL_NAMES),\n Input(\n name=\"openai_api_base\",\n type=Optional[str],\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n Input(\n name=\"openai_api_key\",\n type=str,\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n password=True,\n ),\n Input(name=\"temperature\", type=float, display_name=\"Temperature\", advanced=False, default=0.1),\n Input(name=\"stream\", type=bool, display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n Input(\n name=\"system_message\",\n type=Optional[str],\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"model_response\"),\n ]\n\n def text_response(self) -> Text:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.model_response()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def model_response(self) -> BaseLanguageModel:\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n return output\n"
},
"input_value": {
"advanced": false,
@ -745,10 +754,11 @@
"name": "input_value",
"password": false,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"max_tokens": {
"advanced": true,
@ -757,6 +767,9 @@
"fileTypes": [],
"file_path": "",
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -766,8 +779,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "int",
"value": "1024"
"type": "str",
"value": ""
},
"model_kwargs": {
"advanced": true,
@ -776,6 +789,9 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -785,8 +801,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "NestedDict",
"value": {}
"type": "str",
"value": ""
},
"model_name": {
"advanced": false,
@ -815,7 +831,7 @@
"show": true,
"title_case": false,
"type": "str",
"value": "gpt-3.5-turbo-0125"
"value": ""
},
"openai_api_base": {
"advanced": true,
@ -836,7 +852,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"openai_api_key": {
"advanced": false,
@ -854,11 +871,11 @@
"name": "openai_api_key",
"password": true,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
"value": ""
},
"stream": {
"advanced": true,
@ -867,6 +884,9 @@
"fileTypes": [],
"file_path": "",
"info": "Stream the response from the model. Streaming works only in Chat.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -876,8 +896,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "bool",
"value": true
"type": "str",
"value": ""
},
"system_message": {
"advanced": true,
@ -898,7 +918,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"temperature": {
"advanced": false,
@ -907,23 +928,20 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
"name": "temperature",
"password": false,
"placeholder": "",
"rangeSpec": {
"max": 1,
"min": -1,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"type": "float",
"value": "0.1"
"type": "str",
"value": ""
}
}
},

View file

@ -102,7 +102,7 @@
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-Bt067",
"name": "text",
"name": "text_output",
"output_types": [
"Text"
]
@ -118,7 +118,7 @@
},
"id": "reactflow__edge-OpenAIModel-Bt067{œbaseClassesœ:[œobjectœ,œstrœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-Bt067œ}-ChatOutput-F5Awj{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-F5Awjœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-Bt067",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-Bt067œ, œoutput_typesœ: [œTextœ], œnameœ: œtextœ}",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-Bt067œ, œoutput_typesœ: [œTextœ], œnameœ: œtext_outputœ}",
"style": {
"stroke": "#555"
},
@ -253,7 +253,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": Input(display_name=\"Template\"),\n }\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
},
"template": {
"advanced": false,
@ -835,24 +835,33 @@
],
"frozen": false,
"icon": "OpenAI",
"output_types": [
"Text"
],
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": null,
"name": "text",
"method": "text_response",
"name": "text_output",
"selected": "Text",
"types": [
"Text"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "model_response",
"name": "model_output",
"selected": "BaseLanguageModel",
"types": [
"BaseLanguageModel"
],
"value": "__UNDEFINED__"
}
],
"template": {
"_type": "CustomComponent",
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
@ -869,7 +878,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-3.5-turbo\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n"
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel, Text\nfrom langflow.template import Input, Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n Input(name=\"input_value\", type=str, display_name=\"Input\", input_types=[\"Text\", \"Record\", \"Prompt\"]),\n Input(\n name=\"max_tokens\",\n type=Optional[int],\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n Input(name=\"model_kwargs\", type=dict, display_name=\"Model Kwargs\", advanced=True),\n Input(name=\"model_name\", type=str, display_name=\"Model Name\", advanced=False, options=MODEL_NAMES),\n Input(\n name=\"openai_api_base\",\n type=Optional[str],\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n Input(\n name=\"openai_api_key\",\n type=str,\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n password=True,\n ),\n Input(name=\"temperature\", type=float, display_name=\"Temperature\", advanced=False, default=0.1),\n Input(name=\"stream\", type=bool, display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n Input(\n name=\"system_message\",\n type=Optional[str],\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"model_response\"),\n ]\n\n def text_response(self) -> Text:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.model_response()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def model_response(self) -> BaseLanguageModel:\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n return output\n"
},
"input_value": {
"advanced": false,
@ -889,10 +898,11 @@
"name": "input_value",
"password": false,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"max_tokens": {
"advanced": true,
@ -901,6 +911,9 @@
"fileTypes": [],
"file_path": "",
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -910,8 +923,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "int",
"value": 256
"type": "str",
"value": ""
},
"model_kwargs": {
"advanced": true,
@ -920,6 +933,9 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -929,8 +945,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "NestedDict",
"value": {}
"type": "str",
"value": ""
},
"model_name": {
"advanced": false,
@ -959,7 +975,7 @@
"show": true,
"title_case": false,
"type": "str",
"value": "gpt-4-turbo-preview"
"value": ""
},
"openai_api_base": {
"advanced": true,
@ -980,7 +996,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"openai_api_key": {
"advanced": false,
@ -998,11 +1015,11 @@
"name": "openai_api_key",
"password": true,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
"value": ""
},
"stream": {
"advanced": true,
@ -1011,6 +1028,9 @@
"fileTypes": [],
"file_path": "",
"info": "Stream the response from the model. Streaming works only in Chat.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1020,8 +1040,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "bool",
"value": true
"type": "str",
"value": ""
},
"system_message": {
"advanced": true,
@ -1042,7 +1062,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"temperature": {
"advanced": false,
@ -1051,23 +1072,20 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
"name": "temperature",
"password": false,
"placeholder": "",
"rangeSpec": {
"max": 1,
"min": -1,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"type": "float",
"value": 0.1
"type": "str",
"value": ""
}
}
},

View file

@ -104,7 +104,7 @@
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-9RykF",
"name": "text",
"name": "text_output",
"output_types": [
"Text"
]
@ -120,7 +120,7 @@
},
"id": "reactflow__edge-OpenAIModel-9RykF{œbaseClassesœ:[œstrœ,œobjectœ,œTextœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-9RykFœ}-ChatOutput-P1jEe{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-P1jEeœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-9RykF",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-9RykFœ, œoutput_typesœ: [œTextœ], œnameœ: œtextœ}",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-9RykFœ, œoutput_typesœ: [œTextœ], œnameœ: œtext_outputœ}",
"style": {
"stroke": "#555"
},
@ -834,7 +834,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": Input(display_name=\"Template\"),\n }\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
},
"context": {
"advanced": false,
@ -967,24 +967,33 @@
],
"frozen": false,
"icon": "OpenAI",
"output_types": [
"Text"
],
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": null,
"name": "text",
"method": "text_response",
"name": "text_output",
"selected": "Text",
"types": [
"Text"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "model_response",
"name": "model_output",
"selected": "BaseLanguageModel",
"types": [
"BaseLanguageModel"
],
"value": "__UNDEFINED__"
}
],
"template": {
"_type": "CustomComponent",
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
@ -1001,7 +1010,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-3.5-turbo\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n"
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel, Text\nfrom langflow.template import Input, Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n Input(name=\"input_value\", type=str, display_name=\"Input\", input_types=[\"Text\", \"Record\", \"Prompt\"]),\n Input(\n name=\"max_tokens\",\n type=Optional[int],\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n Input(name=\"model_kwargs\", type=dict, display_name=\"Model Kwargs\", advanced=True),\n Input(name=\"model_name\", type=str, display_name=\"Model Name\", advanced=False, options=MODEL_NAMES),\n Input(\n name=\"openai_api_base\",\n type=Optional[str],\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n Input(\n name=\"openai_api_key\",\n type=str,\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n password=True,\n ),\n Input(name=\"temperature\", type=float, display_name=\"Temperature\", advanced=False, default=0.1),\n Input(name=\"stream\", type=bool, display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n Input(\n name=\"system_message\",\n type=Optional[str],\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"model_response\"),\n ]\n\n def text_response(self) -> Text:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.model_response()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def model_response(self) -> BaseLanguageModel:\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n return output\n"
},
"input_value": {
"advanced": false,
@ -1021,10 +1030,11 @@
"name": "input_value",
"password": false,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"max_tokens": {
"advanced": true,
@ -1033,6 +1043,9 @@
"fileTypes": [],
"file_path": "",
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1042,8 +1055,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "int",
"value": 256
"type": "str",
"value": ""
},
"model_kwargs": {
"advanced": true,
@ -1052,6 +1065,9 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1061,8 +1077,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "NestedDict",
"value": {}
"type": "str",
"value": ""
},
"model_name": {
"advanced": false,
@ -1091,7 +1107,7 @@
"show": true,
"title_case": false,
"type": "str",
"value": "gpt-4-1106-preview"
"value": ""
},
"openai_api_base": {
"advanced": true,
@ -1112,7 +1128,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"openai_api_key": {
"advanced": false,
@ -1125,16 +1142,16 @@
"Text"
],
"list": false,
"load_from_db": true,
"load_from_db": false,
"multiline": false,
"name": "openai_api_key",
"password": true,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
"value": ""
},
"stream": {
"advanced": true,
@ -1143,6 +1160,9 @@
"fileTypes": [],
"file_path": "",
"info": "Stream the response from the model. Streaming works only in Chat.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1152,8 +1172,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "bool",
"value": false
"type": "str",
"value": ""
},
"system_message": {
"advanced": true,
@ -1174,7 +1194,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"temperature": {
"advanced": false,
@ -1183,23 +1204,20 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
"name": "temperature",
"password": false,
"placeholder": "",
"rangeSpec": {
"max": 1,
"min": -1,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"type": "float",
"value": "0.2"
"type": "str",
"value": ""
}
}
},

View file

@ -100,7 +100,7 @@
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-uYXZJ",
"name": "text",
"name": "text_output",
"output_types": [
"Text"
]
@ -119,7 +119,7 @@
},
"id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-Prompt-gTNiz{œfieldNameœ:œsummaryœ,œidœ:œPrompt-gTNizœ,œinputTypesœ:[œDocumentœ,œBaseOutputParserœ,œRecordœ,œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-uYXZJ",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-uYXZJœ, œoutput_typesœ: [œTextœ], œnameœ: œtextœ}",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-uYXZJœ, œoutput_typesœ: [œTextœ], œnameœ: œtext_outputœ}",
"style": {
"stroke": "#555"
},
@ -132,7 +132,7 @@
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-uYXZJ",
"name": "text",
"name": "text_output",
"output_types": [
"Text"
]
@ -148,7 +148,7 @@
},
"id": "reactflow__edge-OpenAIModel-uYXZJ{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-uYXZJœ}-ChatOutput-EJkG3{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-EJkG3œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-uYXZJ",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-uYXZJœ, œoutput_typesœ: [œTextœ], œnameœ: œtextœ}",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-uYXZJœ, œoutput_typesœ: [œTextœ], œnameœ: œtext_outputœ}",
"style": {
"stroke": "#555"
},
@ -222,7 +222,7 @@
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-XawYB",
"name": "text",
"name": "text_output",
"output_types": [
"Text"
]
@ -238,7 +238,7 @@
},
"id": "reactflow__edge-OpenAIModel-XawYB{œbaseClassesœ:[œstrœ,œTextœ,œobjectœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-XawYBœ}-ChatOutput-DNmvg{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-DNmvgœ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-XawYB",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-XawYBœ, œoutput_typesœ: [œTextœ], œnameœ: œtextœ}",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-XawYBœ, œoutput_typesœ: [œTextœ], œnameœ: œtext_outputœ}",
"style": {
"stroke": "#555"
},
@ -320,7 +320,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": Input(display_name=\"Template\"),\n }\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
},
"document": {
"advanced": false,
@ -462,7 +462,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": Input(display_name=\"Template\"),\n }\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
},
"summary": {
"advanced": false,
@ -1191,24 +1191,33 @@
],
"frozen": false,
"icon": "OpenAI",
"output_types": [
"Text"
],
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": null,
"name": "text",
"method": "text_response",
"name": "text_output",
"selected": "Text",
"types": [
"Text"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "model_response",
"name": "model_output",
"selected": "BaseLanguageModel",
"types": [
"BaseLanguageModel"
],
"value": "__UNDEFINED__"
}
],
"template": {
"_type": "CustomComponent",
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
@ -1225,7 +1234,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-3.5-turbo\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n"
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel, Text\nfrom langflow.template import Input, Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n Input(name=\"input_value\", type=str, display_name=\"Input\", input_types=[\"Text\", \"Record\", \"Prompt\"]),\n Input(\n name=\"max_tokens\",\n type=Optional[int],\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n Input(name=\"model_kwargs\", type=dict, display_name=\"Model Kwargs\", advanced=True),\n Input(name=\"model_name\", type=str, display_name=\"Model Name\", advanced=False, options=MODEL_NAMES),\n Input(\n name=\"openai_api_base\",\n type=Optional[str],\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n Input(\n name=\"openai_api_key\",\n type=str,\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n password=True,\n ),\n Input(name=\"temperature\", type=float, display_name=\"Temperature\", advanced=False, default=0.1),\n Input(name=\"stream\", type=bool, display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n Input(\n name=\"system_message\",\n type=Optional[str],\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"model_response\"),\n ]\n\n def text_response(self) -> Text:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.model_response()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def model_response(self) -> BaseLanguageModel:\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n return output\n"
},
"input_value": {
"advanced": false,
@ -1245,10 +1254,11 @@
"name": "input_value",
"password": false,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"max_tokens": {
"advanced": true,
@ -1257,6 +1267,9 @@
"fileTypes": [],
"file_path": "",
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1266,8 +1279,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "int",
"value": 256
"type": "str",
"value": ""
},
"model_kwargs": {
"advanced": true,
@ -1276,6 +1289,9 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1285,8 +1301,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "NestedDict",
"value": {}
"type": "str",
"value": ""
},
"model_name": {
"advanced": false,
@ -1315,7 +1331,7 @@
"show": true,
"title_case": false,
"type": "str",
"value": "gpt-4-turbo-preview"
"value": ""
},
"openai_api_base": {
"advanced": true,
@ -1336,7 +1352,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"openai_api_key": {
"advanced": false,
@ -1354,11 +1371,11 @@
"name": "openai_api_key",
"password": true,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
"value": ""
},
"stream": {
"advanced": true,
@ -1367,6 +1384,9 @@
"fileTypes": [],
"file_path": "",
"info": "Stream the response from the model. Streaming works only in Chat.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1376,8 +1396,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "bool",
"value": false
"type": "str",
"value": ""
},
"system_message": {
"advanced": true,
@ -1398,7 +1418,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"temperature": {
"advanced": false,
@ -1407,23 +1428,20 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
"name": "temperature",
"password": false,
"placeholder": "",
"rangeSpec": {
"max": 1,
"min": -1,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"type": "float",
"value": 0.1
"type": "str",
"value": ""
}
}
},
@ -1590,24 +1608,33 @@
],
"frozen": false,
"icon": "OpenAI",
"output_types": [
"Text"
],
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": null,
"name": "text",
"method": "text_response",
"name": "text_output",
"selected": "Text",
"types": [
"Text"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "model_response",
"name": "model_output",
"selected": "BaseLanguageModel",
"types": [
"BaseLanguageModel"
],
"value": "__UNDEFINED__"
}
],
"template": {
"_type": "CustomComponent",
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
@ -1624,7 +1651,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-3.5-turbo\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n"
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel, Text\nfrom langflow.template import Input, Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n Input(name=\"input_value\", type=str, display_name=\"Input\", input_types=[\"Text\", \"Record\", \"Prompt\"]),\n Input(\n name=\"max_tokens\",\n type=Optional[int],\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n Input(name=\"model_kwargs\", type=dict, display_name=\"Model Kwargs\", advanced=True),\n Input(name=\"model_name\", type=str, display_name=\"Model Name\", advanced=False, options=MODEL_NAMES),\n Input(\n name=\"openai_api_base\",\n type=Optional[str],\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n Input(\n name=\"openai_api_key\",\n type=str,\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n password=True,\n ),\n Input(name=\"temperature\", type=float, display_name=\"Temperature\", advanced=False, default=0.1),\n Input(name=\"stream\", type=bool, display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n Input(\n name=\"system_message\",\n type=Optional[str],\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"model_response\"),\n ]\n\n def text_response(self) -> Text:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.model_response()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def model_response(self) -> BaseLanguageModel:\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n return output\n"
},
"input_value": {
"advanced": false,
@ -1644,10 +1671,11 @@
"name": "input_value",
"password": false,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"max_tokens": {
"advanced": true,
@ -1656,6 +1684,9 @@
"fileTypes": [],
"file_path": "",
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1665,8 +1696,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "int",
"value": 256
"type": "str",
"value": ""
},
"model_kwargs": {
"advanced": true,
@ -1675,6 +1706,9 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1684,8 +1718,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "NestedDict",
"value": {}
"type": "str",
"value": ""
},
"model_name": {
"advanced": false,
@ -1714,7 +1748,7 @@
"show": true,
"title_case": false,
"type": "str",
"value": "gpt-4-turbo-preview"
"value": ""
},
"openai_api_base": {
"advanced": true,
@ -1735,7 +1769,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"openai_api_key": {
"advanced": false,
@ -1753,7 +1788,7 @@
"name": "openai_api_key",
"password": true,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str",
@ -1766,6 +1801,9 @@
"fileTypes": [],
"file_path": "",
"info": "Stream the response from the model. Streaming works only in Chat.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1775,8 +1813,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "bool",
"value": false
"type": "str",
"value": ""
},
"system_message": {
"advanced": true,
@ -1797,7 +1835,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"temperature": {
"advanced": false,
@ -1806,23 +1845,20 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
"name": "temperature",
"password": false,
"placeholder": "",
"rangeSpec": {
"max": 1,
"min": -1,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"type": "float",
"value": 0.1
"type": "str",
"value": ""
}
}
},

View file

@ -105,7 +105,7 @@
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-EjXlN",
"name": "text",
"name": "text_output",
"output_types": [
"Text"
]
@ -122,7 +122,7 @@
"id": "reactflow__edge-OpenAIModel-EjXlN{œbaseClassesœ:[œobjectœ,œTextœ,œstrœ],œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-EjXlNœ}-ChatOutput-Q39I8{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-Q39I8œ,œinputTypesœ:[œTextœ],œtypeœ:œstrœ}",
"selected": false,
"source": "OpenAIModel-EjXlN",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-EjXlNœ, œoutput_typesœ: [œTextœ], œnameœ: œtextœ}",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-EjXlNœ, œoutput_typesœ: [œTextœ], œnameœ: œtext_outputœ}",
"style": {
"stroke": "#555"
},
@ -1159,24 +1159,33 @@
],
"frozen": false,
"icon": "OpenAI",
"output_types": [
"Text"
],
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": null,
"name": "text",
"method": "text_response",
"name": "text_output",
"selected": "Text",
"types": [
"Text"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "model_response",
"name": "model_output",
"selected": "BaseLanguageModel",
"types": [
"BaseLanguageModel"
],
"value": "__UNDEFINED__"
}
],
"template": {
"_type": "CustomComponent",
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
@ -1193,7 +1202,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import NestedDict, Text\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n field_order = [\n \"max_tokens\",\n \"model_kwargs\",\n \"model_name\",\n \"openai_api_base\",\n \"openai_api_key\",\n \"temperature\",\n \"input_value\",\n \"system_message\",\n \"stream\",\n ]\n\n def build_config(self):\n return {\n \"input_value\": {\"display_name\": \"Input\", \"input_types\": [\"Text\", \"Record\", \"Prompt\"]},\n \"max_tokens\": {\n \"display_name\": \"Max Tokens\",\n \"advanced\": True,\n \"info\": \"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n },\n \"model_kwargs\": {\n \"display_name\": \"Model Kwargs\",\n \"advanced\": True,\n },\n \"model_name\": {\n \"display_name\": \"Model Name\",\n \"advanced\": False,\n \"options\": MODEL_NAMES,\n },\n \"openai_api_base\": {\n \"display_name\": \"OpenAI API Base\",\n \"advanced\": True,\n \"info\": (\n \"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\n\"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\"\n ),\n },\n \"openai_api_key\": {\n \"display_name\": \"OpenAI API Key\",\n \"info\": \"The OpenAI API Key to use for the OpenAI model.\",\n \"advanced\": False,\n \"password\": True,\n },\n \"temperature\": {\n \"display_name\": \"Temperature\",\n \"advanced\": False,\n \"value\": 0.1,\n },\n \"stream\": {\n \"display_name\": \"Stream\",\n \"info\": STREAM_INFO_TEXT,\n \"advanced\": True,\n },\n \"system_message\": {\n \"display_name\": \"System Message\",\n \"info\": \"System message to pass to the model.\",\n \"advanced\": True,\n },\n }\n\n def build(\n self,\n input_value: Text,\n openai_api_key: str,\n temperature: float = 0.1,\n model_name: str = \"gpt-3.5-turbo\",\n max_tokens: Optional[int] = 256,\n model_kwargs: NestedDict = {},\n openai_api_base: Optional[str] = None,\n stream: bool = False,\n system_message: Optional[str] = None,\n ) -> Text:\n if not openai_api_base:\n openai_api_base = \"https://api.openai.com/v1\"\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n\n return self.get_chat_result(output, stream, input_value, system_message)\n"
"value": "from typing import Optional\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel, Text\nfrom langflow.template import Input, Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n Input(name=\"input_value\", type=str, display_name=\"Input\", input_types=[\"Text\", \"Record\", \"Prompt\"]),\n Input(\n name=\"max_tokens\",\n type=Optional[int],\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n Input(name=\"model_kwargs\", type=dict, display_name=\"Model Kwargs\", advanced=True),\n Input(name=\"model_name\", type=str, display_name=\"Model Name\", advanced=False, options=MODEL_NAMES),\n Input(\n name=\"openai_api_base\",\n type=Optional[str],\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\\n\\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n Input(\n name=\"openai_api_key\",\n type=str,\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n password=True,\n ),\n Input(name=\"temperature\", type=float, display_name=\"Temperature\", advanced=False, default=0.1),\n Input(name=\"stream\", type=bool, display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n Input(\n name=\"system_message\",\n type=Optional[str],\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"model_response\"),\n ]\n\n def text_response(self) -> Text:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.model_response()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def model_response(self) -> BaseLanguageModel:\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature,\n )\n return output\n"
},
"input_value": {
"advanced": false,
@ -1213,10 +1222,11 @@
"name": "input_value",
"password": false,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"max_tokens": {
"advanced": true,
@ -1225,6 +1235,9 @@
"fileTypes": [],
"file_path": "",
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1234,8 +1247,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "int",
"value": 256
"type": "str",
"value": ""
},
"model_kwargs": {
"advanced": true,
@ -1244,6 +1257,9 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1253,8 +1269,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "NestedDict",
"value": {}
"type": "str",
"value": ""
},
"model_name": {
"advanced": false,
@ -1283,7 +1299,7 @@
"show": true,
"title_case": false,
"type": "str",
"value": "gpt-3.5-turbo"
"value": ""
},
"openai_api_base": {
"advanced": true,
@ -1304,7 +1320,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"openai_api_key": {
"advanced": false,
@ -1322,11 +1339,11 @@
"name": "openai_api_key",
"password": true,
"placeholder": "",
"required": true,
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
"value": ""
},
"stream": {
"advanced": true,
@ -1335,6 +1352,9 @@
"fileTypes": [],
"file_path": "",
"info": "Stream the response from the model. Streaming works only in Chat.",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
@ -1344,8 +1364,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "bool",
"value": false
"type": "str",
"value": ""
},
"system_message": {
"advanced": true,
@ -1366,7 +1386,8 @@
"required": false,
"show": true,
"title_case": false,
"type": "str"
"type": "str",
"value": ""
},
"temperature": {
"advanced": false,
@ -1375,23 +1396,20 @@
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Text"
],
"list": false,
"load_from_db": false,
"multiline": false,
"name": "temperature",
"password": false,
"placeholder": "",
"rangeSpec": {
"max": 1,
"min": -1,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"type": "float",
"value": 0.1
"type": "str",
"value": ""
}
}
},
@ -1486,7 +1504,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n def build_config(self):\n return {\n \"template\": Input(display_name=\"Template\"),\n }\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
"value": "from langflow.custom import Component\nfrom langflow.field_typing import Input, Output\nfrom langflow.field_typing.prompt import Prompt\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n inputs = [\n Input(name=\"template\", type=Prompt, display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n Output(display_name=\"Text\", name=\"text\", method=\"format_prompt\"),\n ]\n\n async def format_prompt(self) -> str:\n prompt = await self.build_prompt()\n formatted_text = prompt.format_text()\n self.status = formatted_text\n return formatted_text\n\n async def build_prompt(\n self,\n ) -> Prompt:\n kwargs = {k: v for k, v in self._arguments.items() if k != \"template\"}\n prompt = await Prompt.from_template_and_variables(self.template, kwargs)\n self.status = prompt.format_text()\n return prompt\n"
},
"context": {
"advanced": false,