From e7043443ce7dc46c68dd2dd747118a0767b1e42c Mon Sep 17 00:00:00 2001 From: Gabriel Luiz Freitas Almeida Date: Thu, 20 Jun 2024 11:10:52 -0300 Subject: [PATCH] refactor: Migrate from BaseLanguageModel to field_typing.LanguageModel --- .../langflow/components/agents/CSVAgent.py | 6 +- .../langflow/components/agents/JsonAgent.py | 4 +- .../langflow/components/agents/SQLAgent.py | 4 +- .../components/agents/ToolCallingAgent.py | 4 +- .../components/agents/VectorStoreAgent.py | 4 +- .../agents/VectorStoreRouterAgent.py | 4 +- .../langflow/components/agents/XMLAgent.py | 4 +- .../components/chains/ConversationChain.py | 4 +- .../langflow/components/chains/LLMChain.py | 4 +- .../components/chains/LLMCheckerChain.py | 4 +- .../components/chains/LLMMathChain.py | 4 +- .../langflow/components/chains/RetrievalQA.py | 4 +- .../chains/RetrievalQAWithSourcesChain.py | 4 +- .../components/chains/SQLGenerator.py | 4 +- .../components/experimental/AgentComponent.py | 4 +- .../components/helpers/ShouldRunNext.py | 4 +- .../model_specs/AmazonBedrockSpecs.py | 4 +- .../model_specs/AnthropicLLMSpecs.py | 4 +- .../model_specs/AzureChatOpenAISpecs.py | 4 +- .../BaiduQianfanChatEndpointsSpecs.py | 5 +- .../BaiduQianfanLLMEndpointsSpecs.py | 4 +- .../model_specs/ChatAnthropicSpecs.py | 6 +- .../model_specs/ChatLiteLLMSpecs.py | 4 +- .../model_specs/ChatMistralSpecs.py | 4 +- .../components/model_specs/ChatOpenAISpecs.py | 4 +- .../model_specs/ChatVertexAISpecs.py | 6 +- .../components/model_specs/CohereSpecs.py | 4 +- .../model_specs/GoogleGenerativeAISpecs.py | 4 +- .../components/model_specs/GroqModelSpecs.py | 4 +- .../model_specs/HuggingFaceEndpointsSpecs.py | 4 +- .../components/model_specs/OllamaLLMSpecs.py | 4 +- .../components/model_specs/VertexAISpecs.py | 6 +- .../components/models/AmazonBedrockModel.py | 4 +- .../components/models/AnthropicModel.py | 4 +- .../components/models/AzureOpenAIModel.py | 6 +- .../models/BaiduQianfanChatModel.py | 6 +- .../components/models/ChatLiteLLMModel.py | 5 +- .../langflow/components/models/CohereModel.py | 4 +- .../models/GoogleGenerativeAIModel.py | 4 +- .../langflow/components/models/GroqModel.py | 4 +- .../components/models/HuggingFaceModel.py | 4 +- .../components/models/MistralModel.py | 4 +- .../langflow/components/models/OllamaModel.py | 4 +- .../langflow/components/models/OpenAIModel.py | 4 +- .../components/models/VertexAiModel.py | 6 +- .../retrievers/MultiQueryRetriever.py | 4 +- .../retrievers/SelfQueryRetriever.py | 4 +- .../retrievers/VectaraSelfQueryRetriver.py | 4 +- .../components/toolkits/OpenAPIToolkit.py | 4 +- .../toolkits/VectorStoreRouterToolkit.py | 6 +- .../components/toolkits/VectorStoreToolkit.py | 4 +- .../base/langflow/field_typing/constants.py | 3 + .../Basic Prompting (Hello, world!).json | 6 +- .../Langflow Blog Writer.json | 2648 ++++++++--------- .../Langflow Document QA.json | 8 +- .../Langflow Memory Conversation.json | 6 +- .../Langflow Prompt Chaining.json | 12 +- .../VectorStore-RAG-Flows.json | 10 +- src/frontend/src/utils/styleUtils.ts | 1 + 59 files changed, 1461 insertions(+), 1455 deletions(-) diff --git a/src/backend/base/langflow/components/agents/CSVAgent.py b/src/backend/base/langflow/components/agents/CSVAgent.py index 57774568f..21b285de2 100644 --- a/src/backend/base/langflow/components/agents/CSVAgent.py +++ b/src/backend/base/langflow/components/agents/CSVAgent.py @@ -1,7 +1,7 @@ from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_agent from langflow.custom import CustomComponent -from langflow.field_typing import AgentExecutor, BaseLanguageModel +from langflow.field_typing import AgentExecutor, LanguageModel class CSVAgentComponent(CustomComponent): @@ -11,7 +11,7 @@ class CSVAgentComponent(CustomComponent): def build_config(self): return { - "llm": {"display_name": "LLM", "type": BaseLanguageModel}, + "llm": {"display_name": "LLM", "type": LanguageModel}, "path": {"display_name": "Path", "field_type": "file", "suffixes": [".csv"], "file_types": [".csv"]}, "handle_parsing_errors": {"display_name": "Handle Parse Errors", "advanced": True}, "agent_type": { @@ -22,7 +22,7 @@ class CSVAgentComponent(CustomComponent): } def build( - self, llm: BaseLanguageModel, path: str, handle_parsing_errors: bool = True, agent_type: str = "openai-tools" + self, llm: LanguageModel, path: str, handle_parsing_errors: bool = True, agent_type: str = "openai-tools" ) -> AgentExecutor: # Instantiate and return the CSV agent class with the provided llm and path return create_csv_agent( diff --git a/src/backend/base/langflow/components/agents/JsonAgent.py b/src/backend/base/langflow/components/agents/JsonAgent.py index 17826ef00..cadc684f8 100644 --- a/src/backend/base/langflow/components/agents/JsonAgent.py +++ b/src/backend/base/langflow/components/agents/JsonAgent.py @@ -3,7 +3,7 @@ from langchain_community.agent_toolkits import create_json_agent from langchain_community.agent_toolkits.json.toolkit import JsonToolkit from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class JsonAgentComponent(CustomComponent): @@ -18,7 +18,7 @@ class JsonAgentComponent(CustomComponent): def build( self, - llm: BaseLanguageModel, + llm: LanguageModel, toolkit: JsonToolkit, ) -> AgentExecutor: return create_json_agent(llm=llm, toolkit=toolkit) diff --git a/src/backend/base/langflow/components/agents/SQLAgent.py b/src/backend/base/langflow/components/agents/SQLAgent.py index cd6b03f94..4f2a8e89e 100644 --- a/src/backend/base/langflow/components/agents/SQLAgent.py +++ b/src/backend/base/langflow/components/agents/SQLAgent.py @@ -6,7 +6,7 @@ from langchain_community.agent_toolkits.sql.base import create_sql_agent from langchain_community.utilities import SQLDatabase from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class SQLAgentComponent(CustomComponent): @@ -22,7 +22,7 @@ class SQLAgentComponent(CustomComponent): def build( self, - llm: BaseLanguageModel, + llm: LanguageModel, database_uri: str, verbose: bool = False, ) -> Union[AgentExecutor, Callable]: diff --git a/src/backend/base/langflow/components/agents/ToolCallingAgent.py b/src/backend/base/langflow/components/agents/ToolCallingAgent.py index eda017cc8..3783607dc 100644 --- a/src/backend/base/langflow/components/agents/ToolCallingAgent.py +++ b/src/backend/base/langflow/components/agents/ToolCallingAgent.py @@ -4,7 +4,7 @@ from langchain.agents.tool_calling_agent.base import create_tool_calling_agent from langchain_core.prompts import ChatPromptTemplate from langflow.base.agents.agent import LCAgentComponent -from langflow.field_typing import BaseLanguageModel, Text, Tool +from langflow.field_typing import LanguageModel, Text, Tool from langflow.schema import Data @@ -39,7 +39,7 @@ class ToolCallingAgentComponent(LCAgentComponent): async def build( self, input_value: str, - llm: BaseLanguageModel, + llm: LanguageModel, tools: List[Tool], user_prompt: str = "{input}", message_history: Optional[List[Data]] = None, diff --git a/src/backend/base/langflow/components/agents/VectorStoreAgent.py b/src/backend/base/langflow/components/agents/VectorStoreAgent.py index 3cba51a09..2bb324e50 100644 --- a/src/backend/base/langflow/components/agents/VectorStoreAgent.py +++ b/src/backend/base/langflow/components/agents/VectorStoreAgent.py @@ -4,7 +4,7 @@ from langchain.agents import AgentExecutor, create_vectorstore_agent from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreToolkit from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class VectorStoreAgentComponent(CustomComponent): @@ -19,7 +19,7 @@ class VectorStoreAgentComponent(CustomComponent): def build( self, - llm: BaseLanguageModel, + llm: LanguageModel, vector_store_toolkit: VectorStoreToolkit, ) -> Union[AgentExecutor, Callable]: return create_vectorstore_agent(llm=llm, toolkit=vector_store_toolkit) diff --git a/src/backend/base/langflow/components/agents/VectorStoreRouterAgent.py b/src/backend/base/langflow/components/agents/VectorStoreRouterAgent.py index e483f0d2c..a696a19c2 100644 --- a/src/backend/base/langflow/components/agents/VectorStoreRouterAgent.py +++ b/src/backend/base/langflow/components/agents/VectorStoreRouterAgent.py @@ -2,7 +2,7 @@ from typing import Callable from langchain.agents import create_vectorstore_router_agent from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit -from langchain_core.language_models.base import BaseLanguageModel +from langflow.field_typing import LanguageModel from langflow.custom import CustomComponent @@ -17,5 +17,5 @@ class VectorStoreRouterAgentComponent(CustomComponent): "vectorstoreroutertoolkit": {"display_name": "Vector Store Router Toolkit"}, } - def build(self, llm: BaseLanguageModel, vectorstoreroutertoolkit: VectorStoreRouterToolkit) -> Callable: + def build(self, llm: LanguageModel, vectorstoreroutertoolkit: VectorStoreRouterToolkit) -> Callable: return create_vectorstore_router_agent(llm=llm, toolkit=vectorstoreroutertoolkit) diff --git a/src/backend/base/langflow/components/agents/XMLAgent.py b/src/backend/base/langflow/components/agents/XMLAgent.py index 1b49520f1..b0a9b1873 100644 --- a/src/backend/base/langflow/components/agents/XMLAgent.py +++ b/src/backend/base/langflow/components/agents/XMLAgent.py @@ -4,7 +4,7 @@ from langchain.agents import create_xml_agent from langchain_core.prompts import ChatPromptTemplate from langflow.base.agents.agent import LCAgentComponent -from langflow.field_typing import BaseLanguageModel, Text, Tool +from langflow.field_typing import LanguageModel, Text, Tool from langflow.schema import Data @@ -72,7 +72,7 @@ class XMLAgentComponent(LCAgentComponent): async def build( self, input_value: str, - llm: BaseLanguageModel, + llm: LanguageModel, tools: List[Tool], user_prompt: str = "{input}", system_message: str = "You are a helpful assistant", diff --git a/src/backend/base/langflow/components/chains/ConversationChain.py b/src/backend/base/langflow/components/chains/ConversationChain.py index 0801f4623..a400d4993 100644 --- a/src/backend/base/langflow/components/chains/ConversationChain.py +++ b/src/backend/base/langflow/components/chains/ConversationChain.py @@ -3,7 +3,7 @@ from typing import Optional from langchain.chains import ConversationChain from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, BaseMemory, Text +from langflow.field_typing import BaseMemory, LanguageModel, Text class ConversationChainComponent(CustomComponent): @@ -27,7 +27,7 @@ class ConversationChainComponent(CustomComponent): def build( self, input_value: Text, - llm: BaseLanguageModel, + llm: LanguageModel, memory: Optional[BaseMemory] = None, ) -> Text: if memory is None: diff --git a/src/backend/base/langflow/components/chains/LLMChain.py b/src/backend/base/langflow/components/chains/LLMChain.py index 0387b50f3..b0f6913bf 100644 --- a/src/backend/base/langflow/components/chains/LLMChain.py +++ b/src/backend/base/langflow/components/chains/LLMChain.py @@ -4,7 +4,7 @@ from langchain.chains.llm import LLMChain from langchain_core.prompts import PromptTemplate from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, BaseMemory, Text +from langflow.field_typing import BaseMemory, LanguageModel, Text class LLMChainComponent(CustomComponent): @@ -21,7 +21,7 @@ class LLMChainComponent(CustomComponent): def build( self, template: Text, - llm: BaseLanguageModel, + llm: LanguageModel, memory: Optional[BaseMemory] = None, ) -> Text: prompt = PromptTemplate.from_template(template) diff --git a/src/backend/base/langflow/components/chains/LLMCheckerChain.py b/src/backend/base/langflow/components/chains/LLMCheckerChain.py index f413081b1..9c86c31dd 100644 --- a/src/backend/base/langflow/components/chains/LLMCheckerChain.py +++ b/src/backend/base/langflow/components/chains/LLMCheckerChain.py @@ -1,7 +1,7 @@ from langchain.chains import LLMCheckerChain from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text class LLMCheckerChainComponent(CustomComponent): @@ -21,7 +21,7 @@ class LLMCheckerChainComponent(CustomComponent): def build( self, input_value: Text, - llm: BaseLanguageModel, + llm: LanguageModel, ) -> Text: chain = LLMCheckerChain.from_llm(llm=llm) response = chain.invoke({chain.input_key: input_value}) diff --git a/src/backend/base/langflow/components/chains/LLMMathChain.py b/src/backend/base/langflow/components/chains/LLMMathChain.py index 2bb573ef5..9cb73ef71 100644 --- a/src/backend/base/langflow/components/chains/LLMMathChain.py +++ b/src/backend/base/langflow/components/chains/LLMMathChain.py @@ -3,7 +3,7 @@ from typing import Optional from langchain.chains import LLMChain, LLMMathChain from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, BaseMemory, Text +from langflow.field_typing import BaseMemory, LanguageModel, Text class LLMMathChainComponent(CustomComponent): @@ -27,7 +27,7 @@ class LLMMathChainComponent(CustomComponent): def build( self, input_value: Text, - llm: BaseLanguageModel, + llm: LanguageModel, llm_chain: LLMChain, input_key: str = "question", output_key: str = "answer", diff --git a/src/backend/base/langflow/components/chains/RetrievalQA.py b/src/backend/base/langflow/components/chains/RetrievalQA.py index 074800868..0d9efdc65 100644 --- a/src/backend/base/langflow/components/chains/RetrievalQA.py +++ b/src/backend/base/langflow/components/chains/RetrievalQA.py @@ -4,7 +4,7 @@ from langchain.chains.retrieval_qa.base import RetrievalQA from langchain_core.documents import Document from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever, Text +from langflow.field_typing import BaseMemory, BaseRetriever, LanguageModel, Text from langflow.schema import Data @@ -29,7 +29,7 @@ class RetrievalQAComponent(CustomComponent): def build( self, - llm: BaseLanguageModel, + llm: LanguageModel, chain_type: str, retriever: BaseRetriever, input_value: str = "", diff --git a/src/backend/base/langflow/components/chains/RetrievalQAWithSourcesChain.py b/src/backend/base/langflow/components/chains/RetrievalQAWithSourcesChain.py index ea2d950a9..ae79b9e16 100644 --- a/src/backend/base/langflow/components/chains/RetrievalQAWithSourcesChain.py +++ b/src/backend/base/langflow/components/chains/RetrievalQAWithSourcesChain.py @@ -4,7 +4,7 @@ from langchain.chains import RetrievalQAWithSourcesChain from langchain_core.documents import Document from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever, Text +from langflow.field_typing import BaseMemory, BaseRetriever, LanguageModel, Text class RetrievalQAWithSourcesChainComponent(CustomComponent): @@ -32,7 +32,7 @@ class RetrievalQAWithSourcesChainComponent(CustomComponent): self, input_value: Text, retriever: BaseRetriever, - llm: BaseLanguageModel, + llm: LanguageModel, chain_type: str, memory: Optional[BaseMemory] = None, return_source_documents: Optional[bool] = True, diff --git a/src/backend/base/langflow/components/chains/SQLGenerator.py b/src/backend/base/langflow/components/chains/SQLGenerator.py index a6ff0ee2f..dc085c46e 100644 --- a/src/backend/base/langflow/components/chains/SQLGenerator.py +++ b/src/backend/base/langflow/components/chains/SQLGenerator.py @@ -6,7 +6,7 @@ from langchain_core.prompts import PromptTemplate from langchain_core.runnables import Runnable from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text class SQLGeneratorComponent(CustomComponent): @@ -35,7 +35,7 @@ class SQLGeneratorComponent(CustomComponent): self, input_value: Text, db: SQLDatabase, - llm: BaseLanguageModel, + llm: LanguageModel, top_k: int = 5, prompt: Optional[Text] = None, ) -> Text: diff --git a/src/backend/base/langflow/components/experimental/AgentComponent.py b/src/backend/base/langflow/components/experimental/AgentComponent.py index c40e416f1..657dadc0a 100644 --- a/src/backend/base/langflow/components/experimental/AgentComponent.py +++ b/src/backend/base/langflow/components/experimental/AgentComponent.py @@ -5,7 +5,7 @@ from langchain_core.prompts.chat import HumanMessagePromptTemplate, SystemMessag from langflow.base.agents.agent import LCAgentComponent from langflow.base.agents.utils import AGENTS, AgentSpec, get_agents_list -from langflow.field_typing import BaseLanguageModel, Text, Tool +from langflow.field_typing import LanguageModel, Text, Tool from langflow.schema import Data from langflow.schema.dotdict import dotdict @@ -145,7 +145,7 @@ class AgentComponent(LCAgentComponent): self, agent_name: str, input_value: str, - llm: BaseLanguageModel, + llm: LanguageModel, tools: List[Tool], system_message: str = "You are a helpful assistant. Help the user answer any questions.", user_prompt: str = "{input}", diff --git a/src/backend/base/langflow/components/helpers/ShouldRunNext.py b/src/backend/base/langflow/components/helpers/ShouldRunNext.py index 0d20706ea..7ca5651a7 100644 --- a/src/backend/base/langflow/components/helpers/ShouldRunNext.py +++ b/src/backend/base/langflow/components/helpers/ShouldRunNext.py @@ -2,14 +2,14 @@ from langchain_core.messages import BaseMessage from langchain_core.prompts import PromptTemplate from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text class ShouldRunNextComponent(CustomComponent): display_name = "Should Run Next" description = "Determines if a vertex is runnable." - def build(self, llm: BaseLanguageModel, question: str, context: str, retries: int = 3) -> Text: + def build(self, llm: LanguageModel, question: str, context: str, retries: int = 3) -> Text: template = "Given the following question and the context below, answer with a yes or no.\n\n{error_message}\n\nQuestion: {question}\n\nContext: {context}\n\nAnswer:" prompt = PromptTemplate.from_template(template) diff --git a/src/backend/base/langflow/components/model_specs/AmazonBedrockSpecs.py b/src/backend/base/langflow/components/model_specs/AmazonBedrockSpecs.py index 0e27e620f..6799e589d 100644 --- a/src/backend/base/langflow/components/model_specs/AmazonBedrockSpecs.py +++ b/src/backend/base/langflow/components/model_specs/AmazonBedrockSpecs.py @@ -3,7 +3,7 @@ from typing import Optional from langchain_community.llms.bedrock import Bedrock from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class AmazonBedrockComponent(CustomComponent): @@ -46,7 +46,7 @@ class AmazonBedrockComponent(CustomComponent): endpoint_url: Optional[str] = None, streaming: bool = False, cache: Optional[bool] = None, - ) -> BaseLanguageModel: + ) -> LanguageModel: try: output = Bedrock( credentials_profile_name=credentials_profile_name, diff --git a/src/backend/base/langflow/components/model_specs/AnthropicLLMSpecs.py b/src/backend/base/langflow/components/model_specs/AnthropicLLMSpecs.py index 1c97c1e28..eaabb6212 100644 --- a/src/backend/base/langflow/components/model_specs/AnthropicLLMSpecs.py +++ b/src/backend/base/langflow/components/model_specs/AnthropicLLMSpecs.py @@ -1,10 +1,10 @@ from typing import Optional from langchain_anthropic import ChatAnthropic -from langchain_core.language_models import BaseLanguageModel from pydantic.v1 import SecretStr from langflow.custom import CustomComponent +from langflow.field_typing import LanguageModel class ChatAntropicSpecsComponent(CustomComponent): @@ -57,7 +57,7 @@ class ChatAntropicSpecsComponent(CustomComponent): max_tokens: Optional[int] = 1000, temperature: Optional[float] = None, api_endpoint: Optional[str] = None, - ) -> BaseLanguageModel: + ) -> LanguageModel: # Set default API endpoint if not provided if not api_endpoint: api_endpoint = "https://api.anthropic.com" diff --git a/src/backend/base/langflow/components/model_specs/AzureChatOpenAISpecs.py b/src/backend/base/langflow/components/model_specs/AzureChatOpenAISpecs.py index 947a1e2a3..18cceed6f 100644 --- a/src/backend/base/langflow/components/model_specs/AzureChatOpenAISpecs.py +++ b/src/backend/base/langflow/components/model_specs/AzureChatOpenAISpecs.py @@ -1,10 +1,10 @@ from typing import Optional -from langchain_core.language_models import BaseLanguageModel from langchain_openai import AzureChatOpenAI from pydantic.v1 import SecretStr from langflow.custom import CustomComponent +from langflow.field_typing import LanguageModel class AzureChatOpenAISpecsComponent(CustomComponent): @@ -81,7 +81,7 @@ class AzureChatOpenAISpecsComponent(CustomComponent): api_version: str, temperature: float = 0.7, max_tokens: Optional[int] = 1000, - ) -> BaseLanguageModel: + ) -> LanguageModel: if api_key: azure_api_key = SecretStr(api_key) else: diff --git a/src/backend/base/langflow/components/model_specs/BaiduQianfanChatEndpointsSpecs.py b/src/backend/base/langflow/components/model_specs/BaiduQianfanChatEndpointsSpecs.py index a353410ad..3764203f9 100644 --- a/src/backend/base/langflow/components/model_specs/BaiduQianfanChatEndpointsSpecs.py +++ b/src/backend/base/langflow/components/model_specs/BaiduQianfanChatEndpointsSpecs.py @@ -1,11 +1,10 @@ from typing import Optional from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint - from pydantic.v1 import SecretStr from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class QianfanChatEndpointComponent(CustomComponent): @@ -80,7 +79,7 @@ class QianfanChatEndpointComponent(CustomComponent): temperature: Optional[float] = None, penalty_score: Optional[float] = None, endpoint: Optional[str] = None, - ) -> BaseLanguageModel: + ) -> LanguageModel: try: output = QianfanChatEndpoint( # type: ignore model=model, diff --git a/src/backend/base/langflow/components/model_specs/BaiduQianfanLLMEndpointsSpecs.py b/src/backend/base/langflow/components/model_specs/BaiduQianfanLLMEndpointsSpecs.py index 273bb5d98..e12ff565f 100644 --- a/src/backend/base/langflow/components/model_specs/BaiduQianfanLLMEndpointsSpecs.py +++ b/src/backend/base/langflow/components/model_specs/BaiduQianfanLLMEndpointsSpecs.py @@ -3,7 +3,7 @@ from typing import Optional from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class QianfanLLMEndpointComponent(CustomComponent): @@ -78,7 +78,7 @@ class QianfanLLMEndpointComponent(CustomComponent): temperature: Optional[float] = None, penalty_score: Optional[float] = None, endpoint: Optional[str] = None, - ) -> BaseLanguageModel: + ) -> LanguageModel: try: output = QianfanLLMEndpoint( # type: ignore model=model, diff --git a/src/backend/base/langflow/components/model_specs/ChatAnthropicSpecs.py b/src/backend/base/langflow/components/model_specs/ChatAnthropicSpecs.py index ce01320eb..6f8583a9d 100644 --- a/src/backend/base/langflow/components/model_specs/ChatAnthropicSpecs.py +++ b/src/backend/base/langflow/components/model_specs/ChatAnthropicSpecs.py @@ -3,9 +3,7 @@ from typing import Optional from langchain_anthropic import ChatAnthropic from pydantic.v1.types import SecretStr - -from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class AnthropicLLM(CustomComponent): @@ -69,7 +67,7 @@ class AnthropicLLM(CustomComponent): max_tokens: Optional[int] = 1000, temperature: Optional[float] = None, anthropic_api_url: Optional[str] = None, - ) -> BaseLanguageModel: + ) -> LanguageModel: # Set default API endpoint if not provided if not anthropic_api_url: anthropic_api_url = "https://api.anthropic.com" diff --git a/src/backend/base/langflow/components/model_specs/ChatLiteLLMSpecs.py b/src/backend/base/langflow/components/model_specs/ChatLiteLLMSpecs.py index b3bce849e..1e7c32242 100644 --- a/src/backend/base/langflow/components/model_specs/ChatLiteLLMSpecs.py +++ b/src/backend/base/langflow/components/model_specs/ChatLiteLLMSpecs.py @@ -3,7 +3,7 @@ from typing import Any, Dict, Optional from langchain_community.chat_models.litellm import ChatLiteLLM, ChatLiteLLMException from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class ChatLiteLLMComponent(CustomComponent): @@ -116,7 +116,7 @@ class ChatLiteLLMComponent(CustomComponent): max_tokens: int = 256, max_retries: int = 6, verbose: bool = False, - ) -> BaseLanguageModel: + ) -> LanguageModel: try: import litellm # type: ignore diff --git a/src/backend/base/langflow/components/model_specs/ChatMistralSpecs.py b/src/backend/base/langflow/components/model_specs/ChatMistralSpecs.py index 73bbc3220..e977e5713 100644 --- a/src/backend/base/langflow/components/model_specs/ChatMistralSpecs.py +++ b/src/backend/base/langflow/components/model_specs/ChatMistralSpecs.py @@ -4,7 +4,7 @@ from langchain_mistralai import ChatMistralAI from pydantic.v1 import SecretStr from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class MistralAIModelComponent(CustomComponent): @@ -68,7 +68,7 @@ class MistralAIModelComponent(CustomComponent): mistral_api_key: Optional[str] = None, max_tokens: Optional[int] = None, mistral_api_base: Optional[str] = None, - ) -> BaseLanguageModel: + ) -> LanguageModel: # Set default API endpoint if not provided if not mistral_api_base: mistral_api_base = "https://api.mistral.ai" diff --git a/src/backend/base/langflow/components/model_specs/ChatOpenAISpecs.py b/src/backend/base/langflow/components/model_specs/ChatOpenAISpecs.py index 76974a00f..f4fe8af96 100644 --- a/src/backend/base/langflow/components/model_specs/ChatOpenAISpecs.py +++ b/src/backend/base/langflow/components/model_specs/ChatOpenAISpecs.py @@ -5,7 +5,7 @@ from pydantic.v1 import SecretStr from langflow.base.models.openai_constants import MODEL_NAMES from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, NestedDict +from langflow.field_typing import LanguageModel, NestedDict class ChatOpenAIComponent(CustomComponent): @@ -57,7 +57,7 @@ class ChatOpenAIComponent(CustomComponent): openai_api_base: Optional[str] = None, openai_api_key: Optional[str] = None, temperature: float = 0.7, - ) -> BaseLanguageModel: + ) -> LanguageModel: if not openai_api_base: openai_api_base = "https://api.openai.com/v1" if openai_api_key: diff --git a/src/backend/base/langflow/components/model_specs/ChatVertexAISpecs.py b/src/backend/base/langflow/components/model_specs/ChatVertexAISpecs.py index c6dd0154a..02cc675e9 100644 --- a/src/backend/base/langflow/components/model_specs/ChatVertexAISpecs.py +++ b/src/backend/base/langflow/components/model_specs/ChatVertexAISpecs.py @@ -1,8 +1,10 @@ from typing import Optional from langchain_community.chat_models.vertexai import ChatVertexAI + + from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class ChatVertexAIComponent(CustomComponent): @@ -70,7 +72,7 @@ class ChatVertexAIComponent(CustomComponent): top_k: int = 40, top_p: float = 0.95, verbose: bool = False, - ) -> BaseLanguageModel: + ) -> LanguageModel: return ChatVertexAI( credentials=credentials, location=location, diff --git a/src/backend/base/langflow/components/model_specs/CohereSpecs.py b/src/backend/base/langflow/components/model_specs/CohereSpecs.py index 2e2a1fa7e..f65a441f1 100644 --- a/src/backend/base/langflow/components/model_specs/CohereSpecs.py +++ b/src/backend/base/langflow/components/model_specs/CohereSpecs.py @@ -1,7 +1,7 @@ from typing import Optional from langchain_cohere import ChatCohere -from langchain_core.language_models.base import BaseLanguageModel +from langflow.field_typing import LanguageModel from pydantic.v1 import SecretStr from langflow.custom import CustomComponent @@ -29,7 +29,7 @@ class CohereComponent(CustomComponent): cohere_api_key: str, max_tokens: Optional[int] = 256, temperature: float = 0.75, - ) -> BaseLanguageModel: + ) -> LanguageModel: if cohere_api_key: api_key = SecretStr(cohere_api_key) else: diff --git a/src/backend/base/langflow/components/model_specs/GoogleGenerativeAISpecs.py b/src/backend/base/langflow/components/model_specs/GoogleGenerativeAISpecs.py index 534085938..cf71ddd39 100644 --- a/src/backend/base/langflow/components/model_specs/GoogleGenerativeAISpecs.py +++ b/src/backend/base/langflow/components/model_specs/GoogleGenerativeAISpecs.py @@ -4,7 +4,7 @@ from langchain_google_genai import ChatGoogleGenerativeAI # type: ignore from pydantic.v1.types import SecretStr from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, RangeSpec +from langflow.field_typing import LanguageModel, RangeSpec class GoogleGenerativeAIComponent(CustomComponent): @@ -62,7 +62,7 @@ class GoogleGenerativeAIComponent(CustomComponent): top_k: Optional[int] = None, top_p: Optional[float] = None, n: Optional[int] = 1, - ) -> BaseLanguageModel: + ) -> LanguageModel: return ChatGoogleGenerativeAI( model=model, max_output_tokens=max_output_tokens or None, # type: ignore diff --git a/src/backend/base/langflow/components/model_specs/GroqModelSpecs.py b/src/backend/base/langflow/components/model_specs/GroqModelSpecs.py index ec203471c..4851e700b 100644 --- a/src/backend/base/langflow/components/model_specs/GroqModelSpecs.py +++ b/src/backend/base/langflow/components/model_specs/GroqModelSpecs.py @@ -6,7 +6,7 @@ from pydantic.v1 import SecretStr from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.groq_constants import MODEL_NAMES from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class GroqModelSpecs(LCModelComponent): @@ -74,7 +74,7 @@ class GroqModelSpecs(LCModelComponent): temperature: float = 0.1, n: Optional[int] = 1, stream: bool = False, - ) -> BaseLanguageModel: + ) -> LanguageModel: return ChatGroq( model_name=model_name, max_tokens=max_tokens or None, # type: ignore diff --git a/src/backend/base/langflow/components/model_specs/HuggingFaceEndpointsSpecs.py b/src/backend/base/langflow/components/model_specs/HuggingFaceEndpointsSpecs.py index 4de68365f..23a2e50c1 100644 --- a/src/backend/base/langflow/components/model_specs/HuggingFaceEndpointsSpecs.py +++ b/src/backend/base/langflow/components/model_specs/HuggingFaceEndpointsSpecs.py @@ -3,7 +3,7 @@ from typing import Optional from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class HuggingFaceEndpointsComponent(CustomComponent): @@ -32,7 +32,7 @@ class HuggingFaceEndpointsComponent(CustomComponent): task: str = "text2text-generation", huggingfacehub_api_token: Optional[str] = None, model_kwargs: Optional[dict] = None, - ) -> BaseLanguageModel: + ) -> LanguageModel: try: output = HuggingFaceEndpoint( # type: ignore endpoint_url=endpoint_url, diff --git a/src/backend/base/langflow/components/model_specs/OllamaLLMSpecs.py b/src/backend/base/langflow/components/model_specs/OllamaLLMSpecs.py index 1c416f1b1..76372f8cd 100644 --- a/src/backend/base/langflow/components/model_specs/OllamaLLMSpecs.py +++ b/src/backend/base/langflow/components/model_specs/OllamaLLMSpecs.py @@ -3,7 +3,7 @@ from typing import List, Optional from langchain_community.llms.ollama import Ollama from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class OllamaLLM(CustomComponent): @@ -118,7 +118,7 @@ class OllamaLLM(CustomComponent): tfs_z: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[int] = None, - ) -> BaseLanguageModel: + ) -> LanguageModel: if not base_url: base_url = "http://localhost:11434" diff --git a/src/backend/base/langflow/components/model_specs/VertexAISpecs.py b/src/backend/base/langflow/components/model_specs/VertexAISpecs.py index 8e7794bd4..d7e646c5f 100644 --- a/src/backend/base/langflow/components/model_specs/VertexAISpecs.py +++ b/src/backend/base/langflow/components/model_specs/VertexAISpecs.py @@ -1,8 +1,10 @@ from typing import Dict, Optional from langchain_community.llms.vertexai import VertexAI + + from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class VertexAIComponent(CustomComponent): @@ -128,7 +130,7 @@ class VertexAIComponent(CustomComponent): top_p: float = 0.95, tuned_model_name: Optional[str] = None, verbose: bool = False, - ) -> BaseLanguageModel: + ) -> LanguageModel: return VertexAI( credentials=credentials, location=location, diff --git a/src/backend/base/langflow/components/models/AmazonBedrockModel.py b/src/backend/base/langflow/components/models/AmazonBedrockModel.py index ecf7053dc..64f1881cb 100644 --- a/src/backend/base/langflow/components/models/AmazonBedrockModel.py +++ b/src/backend/base/langflow/components/models/AmazonBedrockModel.py @@ -2,7 +2,7 @@ from langchain_aws import ChatBedrock from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput, Output, StrInput @@ -78,7 +78,7 @@ class AmazonBedrockComponent(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: model_id = self.model_id credentials_profile_name = self.credentials_profile_name region_name = self.region_name diff --git a/src/backend/base/langflow/components/models/AnthropicModel.py b/src/backend/base/langflow/components/models/AnthropicModel.py index 9e65af05c..4562165a3 100644 --- a/src/backend/base/langflow/components/models/AnthropicModel.py +++ b/src/backend/base/langflow/components/models/AnthropicModel.py @@ -3,7 +3,7 @@ from pydantic.v1 import SecretStr from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, Output, SecretStrInput, TextInput @@ -82,7 +82,7 @@ class AnthropicModelComponent(LCModelComponent): self.status = result.content return prefill + result.content - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: model = self.model anthropic_api_key = self.anthropic_api_key max_tokens = self.max_tokens diff --git a/src/backend/base/langflow/components/models/AzureOpenAIModel.py b/src/backend/base/langflow/components/models/AzureOpenAIModel.py index afbf1c2dc..2998e7935 100644 --- a/src/backend/base/langflow/components/models/AzureOpenAIModel.py +++ b/src/backend/base/langflow/components/models/AzureOpenAIModel.py @@ -3,8 +3,8 @@ from pydantic.v1 import SecretStr from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text -from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, StrInput, SecretStrInput +from langflow.field_typing import LanguageModel, Text +from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput class AzureChatOpenAIComponent(LCModelComponent): @@ -87,7 +87,7 @@ class AzureChatOpenAIComponent(LCModelComponent): self.status = result return result - def model_response(self) -> BaseLanguageModel: + def model_response(self) -> LanguageModel: model = self.model azure_endpoint = self.azure_endpoint azure_deployment = self.azure_deployment diff --git a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py index cf2c2d2ff..c19b36804 100644 --- a/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py +++ b/src/backend/base/langflow/components/models/BaiduQianfanChatModel.py @@ -3,8 +3,8 @@ from pydantic.v1 import SecretStr from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text -from langflow.io import BoolInput, FloatInput, Output, SecretStrInput, TextInput, DropdownInput +from langflow.io import BoolInput, DropdownInput, FloatInput, Output, SecretStrInput, TextInput +from langflow.io import BoolInput, DropdownInput, FloatInput, Output, SecretStrInput, TextInput class QianfanChatEndpointComponent(LCModelComponent): @@ -98,7 +98,7 @@ class QianfanChatEndpointComponent(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: model = self.model qianfan_ak = self.qianfan_ak qianfan_sk = self.qianfan_sk diff --git a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py index af39252e2..4bf99f39f 100644 --- a/src/backend/base/langflow/components/models/ChatLiteLLMModel.py +++ b/src/backend/base/langflow/components/models/ChatLiteLLMModel.py @@ -1,9 +1,10 @@ from typing import Optional from langchain_community.chat_models.litellm import ChatLiteLLM, ChatLiteLLMException + from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel from langflow.io import ( BoolInput, DictInput, @@ -137,7 +138,7 @@ class ChatLiteLLMModelComponent(LCModelComponent): self.status = message return message - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: try: import litellm # type: ignore diff --git a/src/backend/base/langflow/components/models/CohereModel.py b/src/backend/base/langflow/components/models/CohereModel.py index 6f1baa424..d9c7f0f86 100644 --- a/src/backend/base/langflow/components/models/CohereModel.py +++ b/src/backend/base/langflow/components/models/CohereModel.py @@ -4,7 +4,7 @@ from pydantic.v1 import SecretStr from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.io import BoolInput, FloatInput, MessageInput, Output, SecretStrInput, StrInput @@ -46,7 +46,7 @@ class CohereComponent(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel | BaseChatModel: + def build_model(self) -> LanguageModel | BaseChatModel: cohere_api_key = self.cohere_api_key temperature = self.temperature diff --git a/src/backend/base/langflow/components/models/GoogleGenerativeAIModel.py b/src/backend/base/langflow/components/models/GoogleGenerativeAIModel.py index 9e4f392c4..823583a2f 100644 --- a/src/backend/base/langflow/components/models/GoogleGenerativeAIModel.py +++ b/src/backend/base/langflow/components/models/GoogleGenerativeAIModel.py @@ -2,7 +2,7 @@ from pydantic.v1 import SecretStr from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput @@ -82,7 +82,7 @@ class GoogleGenerativeAIComponent(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: try: from langchain_google_genai import ChatGoogleGenerativeAI except ImportError: diff --git a/src/backend/base/langflow/components/models/GroqModel.py b/src/backend/base/langflow/components/models/GroqModel.py index a30dc21ad..02176ef09 100644 --- a/src/backend/base/langflow/components/models/GroqModel.py +++ b/src/backend/base/langflow/components/models/GroqModel.py @@ -4,7 +4,7 @@ from pydantic.v1 import SecretStr from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.groq_constants import MODEL_NAMES from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, Output, SecretStrInput, TextInput @@ -82,7 +82,7 @@ class GroqModel(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: groq_api_key = self.groq_api_key model_name = self.model_name max_tokens = self.max_tokens diff --git a/src/backend/base/langflow/components/models/HuggingFaceModel.py b/src/backend/base/langflow/components/models/HuggingFaceModel.py index d8f7a482d..5c2335cb4 100644 --- a/src/backend/base/langflow/components/models/HuggingFaceModel.py +++ b/src/backend/base/langflow/components/models/HuggingFaceModel.py @@ -3,7 +3,7 @@ from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput, Output, SecretStrInput, StrInput @@ -45,7 +45,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: endpoint_url = self.endpoint_url task = self.task huggingfacehub_api_token = self.huggingfacehub_api_token diff --git a/src/backend/base/langflow/components/models/MistralModel.py b/src/backend/base/langflow/components/models/MistralModel.py index c90f4c230..cc74e75fd 100644 --- a/src/backend/base/langflow/components/models/MistralModel.py +++ b/src/backend/base/langflow/components/models/MistralModel.py @@ -3,7 +3,7 @@ from pydantic.v1 import SecretStr from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput @@ -79,7 +79,7 @@ class MistralAIModelComponent(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: mistral_api_key = self.mistral_api_key temperature = self.temperature model_name = self.model_name diff --git a/src/backend/base/langflow/components/models/OllamaModel.py b/src/backend/base/langflow/components/models/OllamaModel.py index a09a30730..5ef47c8ef 100644 --- a/src/backend/base/langflow/components/models/OllamaModel.py +++ b/src/backend/base/langflow/components/models/OllamaModel.py @@ -2,7 +2,7 @@ from langchain_community.chat_models import ChatOllama from langchain_core.language_models.chat_models import BaseChatModel from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, Output, StrInput @@ -177,7 +177,7 @@ class ChatOllamaComponent(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel | BaseChatModel: + def build_model(self) -> LanguageModel | BaseChatModel: # Mapping mirostat settings to their corresponding values mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2} diff --git a/src/backend/base/langflow/components/models/OpenAIModel.py b/src/backend/base/langflow/components/models/OpenAIModel.py index 932655c16..6ad632fa3 100644 --- a/src/backend/base/langflow/components/models/OpenAIModel.py +++ b/src/backend/base/langflow/components/models/OpenAIModel.py @@ -7,7 +7,7 @@ from pydantic.v1 import SecretStr from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent from langflow.base.models.openai_constants import MODEL_NAMES -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel from langflow.inputs import ( BoolInput, DictInput, @@ -89,7 +89,7 @@ class OpenAIModelComponent(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: # self.output_schea is a list of dictionaries # let's convert it to a dictionary output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {}) diff --git a/src/backend/base/langflow/components/models/VertexAiModel.py b/src/backend/base/langflow/components/models/VertexAiModel.py index 28be22b8a..e70aa2d06 100644 --- a/src/backend/base/langflow/components/models/VertexAiModel.py +++ b/src/backend/base/langflow/components/models/VertexAiModel.py @@ -1,7 +1,9 @@ from langchain_google_vertexai import ChatVertexAI + + from langflow.base.constants import STREAM_INFO_TEXT from langflow.base.models.model import LCModelComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.io import BoolInput, FileInput, FloatInput, IntInput, MessageInput, MultilineInput, Output, StrInput @@ -60,7 +62,7 @@ class ChatVertexAIComponent(LCModelComponent): self.status = result return result - def build_model(self) -> BaseLanguageModel: + def build_model(self) -> LanguageModel: credentials = self.credentials location = self.location max_output_tokens = self.max_output_tokens diff --git a/src/backend/base/langflow/components/retrievers/MultiQueryRetriever.py b/src/backend/base/langflow/components/retrievers/MultiQueryRetriever.py index d9197ece2..f7b2eda6b 100644 --- a/src/backend/base/langflow/components/retrievers/MultiQueryRetriever.py +++ b/src/backend/base/langflow/components/retrievers/MultiQueryRetriever.py @@ -3,7 +3,7 @@ from typing import Optional from langchain.retrievers import MultiQueryRetriever from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, BaseRetriever, PromptTemplate, Text +from langflow.field_typing import BaseRetriever, LanguageModel, PromptTemplate, Text class MultiQueryRetrieverComponent(CustomComponent): @@ -39,7 +39,7 @@ class MultiQueryRetrieverComponent(CustomComponent): def build( self, - llm: BaseLanguageModel, + llm: LanguageModel, retriever: BaseRetriever, prompt: Optional[Text] = None, parser_key: str = "lines", diff --git a/src/backend/base/langflow/components/retrievers/SelfQueryRetriever.py b/src/backend/base/langflow/components/retrievers/SelfQueryRetriever.py index cbe001de3..441e08415 100644 --- a/src/backend/base/langflow/components/retrievers/SelfQueryRetriever.py +++ b/src/backend/base/langflow/components/retrievers/SelfQueryRetriever.py @@ -4,7 +4,7 @@ from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, Text +from langflow.field_typing import LanguageModel, Text from langflow.schema import Data from langflow.schema.message import Message @@ -45,7 +45,7 @@ class SelfQueryRetrieverComponent(CustomComponent): vectorstore: VectorStore, attribute_infos: list[Data], document_content_description: Text, - llm: BaseLanguageModel, + llm: LanguageModel, ) -> Data: metadata_field_infos = [AttributeInfo(**value.data) for value in attribute_infos] self_query_retriever = SelfQueryRetriever.from_llm( diff --git a/src/backend/base/langflow/components/retrievers/VectaraSelfQueryRetriver.py b/src/backend/base/langflow/components/retrievers/VectaraSelfQueryRetriver.py index 0c5c4fff5..093a5db4a 100644 --- a/src/backend/base/langflow/components/retrievers/VectaraSelfQueryRetriver.py +++ b/src/backend/base/langflow/components/retrievers/VectaraSelfQueryRetriver.py @@ -3,11 +3,11 @@ from typing import List from langchain.chains.query_constructor.base import AttributeInfo from langchain.retrievers.self_query.base import SelfQueryRetriever -from langchain_core.language_models import BaseLanguageModel from langchain_core.retrievers import BaseRetriever from langchain_core.vectorstores import VectorStore from langflow.custom import CustomComponent +from langflow.field_typing.constants import LanguageModel class VectaraSelfQueryRetriverComponent(CustomComponent): @@ -38,7 +38,7 @@ class VectaraSelfQueryRetriverComponent(CustomComponent): self, vectorstore: VectorStore, document_content_description: str, - llm: BaseLanguageModel, + llm: LanguageModel, metadata_field_info: List[str], ) -> BaseRetriever: metadata_field_obj = [] diff --git a/src/backend/base/langflow/components/toolkits/OpenAPIToolkit.py b/src/backend/base/langflow/components/toolkits/OpenAPIToolkit.py index a24798cef..0639dae0c 100644 --- a/src/backend/base/langflow/components/toolkits/OpenAPIToolkit.py +++ b/src/backend/base/langflow/components/toolkits/OpenAPIToolkit.py @@ -6,7 +6,7 @@ from langchain_community.tools.json.tool import JsonSpec from langchain_community.utilities.requests import TextRequestsWrapper from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel +from langflow.field_typing import LanguageModel class OpenAPIToolkitComponent(CustomComponent): @@ -19,7 +19,7 @@ class OpenAPIToolkitComponent(CustomComponent): "requests_wrapper": {"display_name": "Text Requests Wrapper"}, } - def build(self, llm: BaseLanguageModel, path: str, allow_dangerous_requests: bool = False) -> BaseToolkit: + def build(self, llm: LanguageModel, path: str, allow_dangerous_requests: bool = False) -> BaseToolkit: if path.endswith("yaml") or path.endswith("yml"): yaml_dict = yaml.load(open(path, "r"), Loader=yaml.FullLoader) spec = JsonSpec(dict_=yaml_dict) diff --git a/src/backend/base/langflow/components/toolkits/VectorStoreRouterToolkit.py b/src/backend/base/langflow/components/toolkits/VectorStoreRouterToolkit.py index 13fff14a2..6e5b5d613 100644 --- a/src/backend/base/langflow/components/toolkits/VectorStoreRouterToolkit.py +++ b/src/backend/base/langflow/components/toolkits/VectorStoreRouterToolkit.py @@ -3,7 +3,7 @@ from typing import List, Union from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo, VectorStoreRouterToolkit from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, Tool +from langflow.field_typing import LanguageModel, Tool class VectorStoreRouterToolkitComponent(CustomComponent): @@ -16,9 +16,7 @@ class VectorStoreRouterToolkitComponent(CustomComponent): "llm": {"display_name": "LLM"}, } - def build( - self, vectorstores: List[VectorStoreInfo], llm: BaseLanguageModel - ) -> Union[Tool, VectorStoreRouterToolkit]: + def build(self, vectorstores: List[VectorStoreInfo], llm: LanguageModel) -> Union[Tool, VectorStoreRouterToolkit]: print("vectorstores", vectorstores) print("llm", llm) return VectorStoreRouterToolkit(vectorstores=vectorstores, llm=llm) diff --git a/src/backend/base/langflow/components/toolkits/VectorStoreToolkit.py b/src/backend/base/langflow/components/toolkits/VectorStoreToolkit.py index 2f788fcb9..fc63bb66f 100644 --- a/src/backend/base/langflow/components/toolkits/VectorStoreToolkit.py +++ b/src/backend/base/langflow/components/toolkits/VectorStoreToolkit.py @@ -3,7 +3,7 @@ from typing import Union from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo, VectorStoreToolkit from langflow.custom import CustomComponent -from langflow.field_typing import BaseLanguageModel, Tool +from langflow.field_typing import LanguageModel, Tool class VectorStoreToolkitComponent(CustomComponent): @@ -19,6 +19,6 @@ class VectorStoreToolkitComponent(CustomComponent): def build( self, vectorstore_info: VectorStoreInfo, - llm: BaseLanguageModel, + llm: LanguageModel, ) -> Union[Tool, VectorStoreToolkit]: return VectorStoreToolkit(vectorstore_info=vectorstore_info, llm=llm) diff --git a/src/backend/base/langflow/field_typing/constants.py b/src/backend/base/langflow/field_typing/constants.py index 3e9ca973f..f5d643e74 100644 --- a/src/backend/base/langflow/field_typing/constants.py +++ b/src/backend/base/langflow/field_typing/constants.py @@ -15,7 +15,9 @@ from langchain_core.tools import Tool from langchain_core.vectorstores import VectorStore from langchain_text_splitters import TextSplitter + NestedDict = Dict[str, Union[str, Dict]] +type LanguageModel = Union[BaseLanguageModel, BaseLLM, BaseChatModel] class Object: @@ -58,4 +60,5 @@ CUSTOM_COMPONENT_SUPPORTED_TYPES = { "Text": Text, "Object": Object, "Callable": Callable, + "LanguageModel": LanguageModel, } diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json index d95f8eea3..28a40397f 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, world!).json @@ -611,9 +611,9 @@ "display_name": "Language Model", "method": "build_model", "name": "model_output", - "selected": "BaseLanguageModel", + "selected": "LanguageModel", "types": [ - "BaseLanguageModel" + "LanguageModel" ], "value": "__UNDEFINED__" } @@ -637,7 +637,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writer.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writer.json index 1067e735f..67f08628d 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writer.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Blog Writer.json @@ -1,1331 +1,1331 @@ { - "data": { - "edges": [ - { - "data": { - "sourceHandle": { - "dataType": "TextInput", - "id": "TextInput-xeTQg", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "instructions", - "id": "Prompt-GPCfZ", - "inputTypes": [ - "Message", - "Text" - ], - "type": "str" - } - }, - "id": "reactflow__edge-TextInput-xeTQg{œdataTypeœ:œTextInputœ,œidœ:œTextInput-xeTQgœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-GPCfZ{œfieldNameœ:œinstructionsœ,œidœ:œPrompt-GPCfZœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", - "source": "TextInput-xeTQg", - "sourceHandle": "{œdataTypeœ: œTextInputœ, œidœ: œTextInput-xeTQgœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-GPCfZ", - "targetHandle": "{œfieldNameœ: œinstructionsœ, œidœ: œPrompt-GPCfZœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" - }, - { - "data": { - "sourceHandle": { - "dataType": "URL", - "id": "URL-ry4Vn", - "name": "data", - "output_types": [ - "Data" - ] - }, - "targetHandle": { - "fieldName": "data", - "id": "ParseData-jFtFL", - "inputTypes": [ - "Data" - ], - "type": "other" - } - }, - "id": "reactflow__edge-URL-ry4Vn{œdataTypeœ:œURLœ,œidœ:œURL-ry4Vnœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-ParseData-jFtFL{œfieldNameœ:œdataœ,œidœ:œParseData-jFtFLœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", - "selected": false, - "source": "URL-ry4Vn", - "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-ry4Vnœ, œnameœ: œdataœ, œoutput_typesœ: [œDataœ]}", - "target": "ParseData-jFtFL", - "targetHandle": "{œfieldNameœ: œdataœ, œidœ: œParseData-jFtFLœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" - }, - { - "data": { - "sourceHandle": { - "dataType": "URL", - "id": "URL-PAQgR", - "name": "data", - "output_types": [ - "Data" - ] - }, - "targetHandle": { - "fieldName": "data", - "id": "ParseData-vdPxZ", - "inputTypes": [ - "Data" - ], - "type": "other" - } - }, - "id": "reactflow__edge-URL-PAQgR{œdataTypeœ:œURLœ,œidœ:œURL-PAQgRœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-ParseData-vdPxZ{œfieldNameœ:œdataœ,œidœ:œParseData-vdPxZœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", - "source": "URL-PAQgR", - "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-PAQgRœ, œnameœ: œdataœ, œoutput_typesœ: [œDataœ]}", - "target": "ParseData-vdPxZ", - "targetHandle": "{œfieldNameœ: œdataœ, œidœ: œParseData-vdPxZœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" - }, - { - "data": { - "sourceHandle": { - "dataType": "ParseData", - "id": "ParseData-jFtFL", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "reference_2", - "id": "Prompt-GPCfZ", - "inputTypes": [ - "Message", - "Text" - ], - "type": "str" - } - }, - "id": "reactflow__edge-ParseData-jFtFL{œdataTypeœ:œParseDataœ,œidœ:œParseData-jFtFLœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-GPCfZ{œfieldNameœ:œreference_2œ,œidœ:œPrompt-GPCfZœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", - "source": "ParseData-jFtFL", - "sourceHandle": "{œdataTypeœ: œParseDataœ, œidœ: œParseData-jFtFLœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-GPCfZ", - "targetHandle": "{œfieldNameœ: œreference_2œ, œidœ: œPrompt-GPCfZœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" - }, - { - "data": { - "sourceHandle": { - "dataType": "ParseData", - "id": "ParseData-vdPxZ", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "reference_1", - "id": "Prompt-GPCfZ", - "inputTypes": [ - "Message", - "Text" - ], - "type": "str" - } - }, - "id": "reactflow__edge-ParseData-vdPxZ{œdataTypeœ:œParseDataœ,œidœ:œParseData-vdPxZœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-GPCfZ{œfieldNameœ:œreference_1œ,œidœ:œPrompt-GPCfZœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", - "source": "ParseData-vdPxZ", - "sourceHandle": "{œdataTypeœ: œParseDataœ, œidœ: œParseData-vdPxZœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-GPCfZ", - "targetHandle": "{œfieldNameœ: œreference_1œ, œidœ: œPrompt-GPCfZœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" - }, - { - "data": { - "sourceHandle": { - "dataType": "OpenAIModel", - "id": "OpenAIModel-NXVBv", - "name": "text_output", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-n5T1m", - "inputTypes": [ - "Message", - "str" - ], - "type": "str" - } - }, - "id": "reactflow__edge-OpenAIModel-NXVBv{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-NXVBvœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-n5T1m{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-n5T1mœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "source": "OpenAIModel-NXVBv", - "sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-NXVBvœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}", - "target": "ChatOutput-n5T1m", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-n5T1mœ, œinputTypesœ: [œMessageœ, œstrœ], œtypeœ: œstrœ}" - }, - { - "data": { - "sourceHandle": { - "dataType": "Prompt", - "id": "Prompt-GPCfZ", - "name": "prompt", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "input_value", - "id": "OpenAIModel-NXVBv", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "reactflow__edge-Prompt-GPCfZ{œdataTypeœ:œPromptœ,œidœ:œPrompt-GPCfZœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-NXVBv{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-NXVBvœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "source": "Prompt-GPCfZ", - "sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-GPCfZœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", - "target": "OpenAIModel-NXVBv", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-NXVBvœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" - } - ], - "nodes": [ - { - "data": { - "description": "Create a prompt template with dynamic variables.", - "display_name": "Prompt", - "id": "Prompt-GPCfZ", - "node": { - "base_classes": [ - "object", - "str", - "Text" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": { - "template": [ - "reference_1", - "reference_2", - "instructions" - ] + "data": { + "edges": [ + { + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-xeTQg", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "instructions", + "id": "Prompt-GPCfZ", + "inputTypes": [ + "Message", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-TextInput-xeTQg{œdataTypeœ:œTextInputœ,œidœ:œTextInput-xeTQgœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-GPCfZ{œfieldNameœ:œinstructionsœ,œidœ:œPrompt-GPCfZœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "source": "TextInput-xeTQg", + "sourceHandle": "{œdataTypeœ: œTextInputœ, œidœ: œTextInput-xeTQgœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-GPCfZ", + "targetHandle": "{œfieldNameœ: œinstructionsœ, œidœ: œPrompt-GPCfZœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" }, - "description": "Create a prompt template with dynamic variables.", - "display_name": "Prompt", - "documentation": "", - "error": null, - "field_order": [], - "frozen": false, - "full_path": null, - "icon": "prompts", - "is_composition": null, - "is_input": null, - "is_output": null, - "name": "", - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Prompt Message", - "method": "build_prompt", - "name": "prompt", - "selected": "Message", - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.custom import Component\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = await Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n" - }, - "instructions": { - "advanced": false, - "display_name": "instructions", - "dynamic": false, - "field_type": "str", - "fileTypes": [], - "file_path": "", - "info": "", - "input_types": [ - "Message", - "Text" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "instructions", - "password": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "reference_1": { - "advanced": false, - "display_name": "reference_1", - "dynamic": false, - "field_type": "str", - "fileTypes": [], - "file_path": "", - "info": "", - "input_types": [ - "Message", - "Text" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "reference_1", - "password": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "reference_2": { - "advanced": false, - "display_name": "reference_2", - "dynamic": false, - "field_type": "str", - "fileTypes": [], - "file_path": "", - "info": "", - "input_types": [ - "Message", - "Text" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "reference_2", - "password": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "template": { - "advanced": false, - "display_name": "Template", - "dynamic": false, - "fileTypes": [], - "file_path": "", - "info": "", - "input_types": [ - "Text" - ], - "list": false, - "load_from_db": false, - "multiline": false, - "name": "template", - "password": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "prompt", - "value": "Reference 1:\n\n{reference_1}\n\n---\n\nReference 2:\n\n{reference_2}\n\n---\n\n{instructions}\n\nBlog: \n\n\n" - } - } - }, - "type": "Prompt" - }, - "dragging": false, - "height": 619, - "id": "Prompt-GPCfZ", - "position": { - "x": 1378.0386633467044, - "y": 547.0254869963999 - }, - "positionAbsolute": { - "x": 1378.0386633467044, - "y": 547.0254869963999 - }, - "selected": false, - "type": "genericNode", - "width": 384 - }, - { - "data": { - "id": "URL-ry4Vn", - "node": { - "base_classes": [ - "Record" - ], - "beta": false, - "custom_fields": { - "urls": null + { + "data": { + "sourceHandle": { + "dataType": "URL", + "id": "URL-ry4Vn", + "name": "data", + "output_types": [ + "Data" + ] + }, + "targetHandle": { + "fieldName": "data", + "id": "ParseData-jFtFL", + "inputTypes": [ + "Data" + ], + "type": "other" + } + }, + "id": "reactflow__edge-URL-ry4Vn{œdataTypeœ:œURLœ,œidœ:œURL-ry4Vnœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-ParseData-jFtFL{œfieldNameœ:œdataœ,œidœ:œParseData-jFtFLœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "selected": false, + "source": "URL-ry4Vn", + "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-ry4Vnœ, œnameœ: œdataœ, œoutput_typesœ: [œDataœ]}", + "target": "ParseData-jFtFL", + "targetHandle": "{œfieldNameœ: œdataœ, œidœ: œParseData-jFtFLœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" }, - "description": "Fetch content from one or more URLs.", - "display_name": "URL", - "documentation": "", - "field_formatters": {}, - "field_order": [], - "frozen": false, - "icon": "layout-template", - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Data", - "method": "fetch_content", - "name": "data", - "selected": "Data", - "types": [ - "Data" - ], - "value": "__UNDEFINED__" - } - ], - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "import re\n\nfrom langchain_community.document_loaders.web_base import WebBaseLoader\n\nfrom langflow.custom import Component\nfrom langflow.io import Output, TextInput\nfrom langflow.schema import Data\n\n\nclass URLComponent(Component):\n display_name = \"URL\"\n description = \"Fetch content from one or more URLs.\"\n icon = \"layout-template\"\n\n inputs = [\n TextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs, separated by commas.\",\n is_list=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"fetch_content\"),\n ]\n\n def ensure_url(self, string: str) -> str:\n \"\"\"\n Ensures the given string is a URL by adding 'http://' if it doesn't start with 'http://' or 'https://'.\n Raises an error if the string is not a valid URL.\n\n Parameters:\n string (str): The string to be checked and possibly modified.\n\n Returns:\n str: The modified string that is ensured to be a URL.\n\n Raises:\n ValueError: If the string is not a valid URL.\n \"\"\"\n if not string.startswith((\"http://\", \"https://\")):\n string = \"http://\" + string\n\n # Basic URL validation regex\n url_regex = re.compile(\n r\"^(http://|https://)?\" # http:// or https://\n r\"(([a-zA-Z0-9\\.-]+)\" # domain\n r\"(\\.[a-zA-Z]{2,}))\" # top-level domain\n r\"(:[0-9]{1,5})?\" # optional port\n r\"(\\/.*)?$\" # optional path\n )\n\n if not re.match(url_regex, string):\n raise ValueError(f\"Invalid URL: {string}\")\n\n return string\n\n def fetch_content(self) -> list[Data]:\n urls = [self.ensure_url(url.strip()) for url in self.urls if url.strip()]\n loader = WebBaseLoader(web_paths=urls, encoding=\"utf-8\")\n docs = loader.load()\n data = [Data(text=doc.page_content, **doc.metadata) for doc in docs]\n self.status = data\n return data\n" - }, - "urls": { - "advanced": false, - "display_name": "URLs", - "dynamic": false, - "info": "Enter one or more URLs, separated by commas.", - "list": true, - "load_from_db": false, - "name": "urls", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": [ - "https://www.promptingguide.ai/introduction/basics" - ] - } - } - }, - "type": "URL" - }, - "dragging": false, - "height": 301, - "id": "URL-ry4Vn", - "position": { - "x": 129.9069887328102, - "y": 1026.1629590683015 - }, - "positionAbsolute": { - "x": 129.9069887328102, - "y": 1026.1629590683015 - }, - "selected": false, - "type": "genericNode", - "width": 384 - }, - { - "data": { - "id": "URL-PAQgR", - "node": { - "base_classes": [ - "Record" - ], - "beta": false, - "custom_fields": { - "urls": null + { + "data": { + "sourceHandle": { + "dataType": "URL", + "id": "URL-PAQgR", + "name": "data", + "output_types": [ + "Data" + ] + }, + "targetHandle": { + "fieldName": "data", + "id": "ParseData-vdPxZ", + "inputTypes": [ + "Data" + ], + "type": "other" + } + }, + "id": "reactflow__edge-URL-PAQgR{œdataTypeœ:œURLœ,œidœ:œURL-PAQgRœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-ParseData-vdPxZ{œfieldNameœ:œdataœ,œidœ:œParseData-vdPxZœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "source": "URL-PAQgR", + "sourceHandle": "{œdataTypeœ: œURLœ, œidœ: œURL-PAQgRœ, œnameœ: œdataœ, œoutput_typesœ: [œDataœ]}", + "target": "ParseData-vdPxZ", + "targetHandle": "{œfieldNameœ: œdataœ, œidœ: œParseData-vdPxZœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" }, - "description": "Fetch content from one or more URLs.", - "display_name": "URL", - "documentation": "", - "field_formatters": {}, - "field_order": [], - "frozen": false, - "icon": "layout-template", - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Data", - "method": "fetch_content", - "name": "data", - "selected": "Data", - "types": [ - "Data" - ], - "value": "__UNDEFINED__" - } - ], - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "import re\n\nfrom langchain_community.document_loaders.web_base import WebBaseLoader\n\nfrom langflow.custom import Component\nfrom langflow.io import Output, TextInput\nfrom langflow.schema import Data\n\n\nclass URLComponent(Component):\n display_name = \"URL\"\n description = \"Fetch content from one or more URLs.\"\n icon = \"layout-template\"\n\n inputs = [\n TextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs, separated by commas.\",\n is_list=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"fetch_content\"),\n ]\n\n def ensure_url(self, string: str) -> str:\n \"\"\"\n Ensures the given string is a URL by adding 'http://' if it doesn't start with 'http://' or 'https://'.\n Raises an error if the string is not a valid URL.\n\n Parameters:\n string (str): The string to be checked and possibly modified.\n\n Returns:\n str: The modified string that is ensured to be a URL.\n\n Raises:\n ValueError: If the string is not a valid URL.\n \"\"\"\n if not string.startswith((\"http://\", \"https://\")):\n string = \"http://\" + string\n\n # Basic URL validation regex\n url_regex = re.compile(\n r\"^(http://|https://)?\" # http:// or https://\n r\"(([a-zA-Z0-9\\.-]+)\" # domain\n r\"(\\.[a-zA-Z]{2,}))\" # top-level domain\n r\"(:[0-9]{1,5})?\" # optional port\n r\"(\\/.*)?$\" # optional path\n )\n\n if not re.match(url_regex, string):\n raise ValueError(f\"Invalid URL: {string}\")\n\n return string\n\n def fetch_content(self) -> list[Data]:\n urls = [self.ensure_url(url.strip()) for url in self.urls if url.strip()]\n loader = WebBaseLoader(web_paths=urls, encoding=\"utf-8\")\n docs = loader.load()\n data = [Data(text=doc.page_content, **doc.metadata) for doc in docs]\n self.status = data\n return data\n" - }, - "urls": { - "advanced": false, - "display_name": "URLs", - "dynamic": false, - "info": "Enter one or more URLs, separated by commas.", - "list": true, - "load_from_db": false, - "name": "urls", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": [ - "https://www.promptingguide.ai/techniques/prompt_chaining" - ] - } + { + "data": { + "sourceHandle": { + "dataType": "ParseData", + "id": "ParseData-jFtFL", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "reference_2", + "id": "Prompt-GPCfZ", + "inputTypes": [ + "Message", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-ParseData-jFtFL{œdataTypeœ:œParseDataœ,œidœ:œParseData-jFtFLœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-GPCfZ{œfieldNameœ:œreference_2œ,œidœ:œPrompt-GPCfZœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "source": "ParseData-jFtFL", + "sourceHandle": "{œdataTypeœ: œParseDataœ, œidœ: œParseData-jFtFLœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-GPCfZ", + "targetHandle": "{œfieldNameœ: œreference_2œ, œidœ: œPrompt-GPCfZœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "data": { + "sourceHandle": { + "dataType": "ParseData", + "id": "ParseData-vdPxZ", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "reference_1", + "id": "Prompt-GPCfZ", + "inputTypes": [ + "Message", + "Text" + ], + "type": "str" + } + }, + "id": "reactflow__edge-ParseData-vdPxZ{œdataTypeœ:œParseDataœ,œidœ:œParseData-vdPxZœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-GPCfZ{œfieldNameœ:œreference_1œ,œidœ:œPrompt-GPCfZœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "source": "ParseData-vdPxZ", + "sourceHandle": "{œdataTypeœ: œParseDataœ, œidœ: œParseData-vdPxZœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-GPCfZ", + "targetHandle": "{œfieldNameœ: œreference_1œ, œidœ: œPrompt-GPCfZœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + }, + { + "data": { + "sourceHandle": { + "dataType": "OpenAIModel", + "id": "OpenAIModel-NXVBv", + "name": "text_output", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-n5T1m", + "inputTypes": [ + "Message", + "str" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-NXVBv{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-NXVBvœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-n5T1m{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-n5T1mœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-NXVBv", + "sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-NXVBvœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}", + "target": "ChatOutput-n5T1m", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-n5T1mœ, œinputTypesœ: [œMessageœ, œstrœ], œtypeœ: œstrœ}" + }, + { + "data": { + "sourceHandle": { + "dataType": "Prompt", + "id": "Prompt-GPCfZ", + "name": "prompt", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-NXVBv", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-GPCfZ{œdataTypeœ:œPromptœ,œidœ:œPrompt-GPCfZœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-NXVBv{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-NXVBvœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "Prompt-GPCfZ", + "sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-GPCfZœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", + "target": "OpenAIModel-NXVBv", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-NXVBvœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" } - }, - "type": "URL" - }, - "dragging": false, - "height": 301, - "id": "URL-PAQgR", - "position": { - "x": 109.01828882212544, - "y": 635.7038211214808 - }, - "positionAbsolute": { - "x": 109.01828882212544, - "y": 635.7038211214808 - }, - "selected": false, - "type": "genericNode", - "width": 384 - }, - { - "data": { - "description": "Get text inputs from the Playground.", - "display_name": "Instructions", - "edited": false, - "id": "TextInput-xeTQg", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Get text inputs from the Playground.", - "display_name": "Instructions", - "documentation": "", - "edited": true, - "field_order": [ - "input_value" - ], - "frozen": false, - "icon": "type", - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Text", - "hidden": false, - "method": "text_response", - "name": "text", - "selected": "Message", - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.base.io.text import TextComponent\nfrom langflow.io import TextInput\nfrom langflow.io import Output\nfrom langflow.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n\n inputs = [\n TextInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n message = Message(\n text=self.input_value,\n )\n return message\n" - }, - "input_value": { - "advanced": false, - "display_name": "Text", - "dynamic": false, - "info": "Text to be passed as input.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "input_value", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "Use the references above for style to write a new blog/tutorial about prompt engineering techniques. Suggest non-covered topics." - } + ], + "nodes": [ + { + "data": { + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "id": "Prompt-GPCfZ", + "node": { + "base_classes": [ + "object", + "str", + "Text" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": { + "template": [ + "reference_1", + "reference_2", + "instructions" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "documentation": "", + "error": null, + "field_order": [], + "frozen": false, + "full_path": null, + "icon": "prompts", + "is_composition": null, + "is_input": null, + "is_output": null, + "name": "", + "output_types": [], + "outputs": [ + { + "cache": true, + "display_name": "Prompt Message", + "method": "build_prompt", + "name": "prompt", + "selected": "Message", + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import Component\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = await Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n" + }, + "instructions": { + "advanced": false, + "display_name": "instructions", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Message", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "instructions", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "reference_1": { + "advanced": false, + "display_name": "reference_1", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Message", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "reference_1", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "reference_2": { + "advanced": false, + "display_name": "reference_2", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Message", + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "reference_2", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "template": { + "advanced": false, + "display_name": "Template", + "dynamic": false, + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Text" + ], + "list": false, + "load_from_db": false, + "multiline": false, + "name": "template", + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "prompt", + "value": "Reference 1:\n\n{reference_1}\n\n---\n\nReference 2:\n\n{reference_2}\n\n---\n\n{instructions}\n\nBlog: \n\n\n" + } + } + }, + "type": "Prompt" + }, + "dragging": false, + "height": 619, + "id": "Prompt-GPCfZ", + "position": { + "x": 1378.0386633467044, + "y": 547.0254869963999 + }, + "positionAbsolute": { + "x": 1378.0386633467044, + "y": 547.0254869963999 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "URL-ry4Vn", + "node": { + "base_classes": [ + "Record" + ], + "beta": false, + "custom_fields": { + "urls": null + }, + "description": "Fetch content from one or more URLs.", + "display_name": "URL", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "layout-template", + "output_types": [], + "outputs": [ + { + "cache": true, + "display_name": "Data", + "method": "fetch_content", + "name": "data", + "selected": "Data", + "types": [ + "Data" + ], + "value": "__UNDEFINED__" + } + ], + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "import re\n\nfrom langchain_community.document_loaders.web_base import WebBaseLoader\n\nfrom langflow.custom import Component\nfrom langflow.io import Output, TextInput\nfrom langflow.schema import Data\n\n\nclass URLComponent(Component):\n display_name = \"URL\"\n description = \"Fetch content from one or more URLs.\"\n icon = \"layout-template\"\n\n inputs = [\n TextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs, separated by commas.\",\n is_list=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"fetch_content\"),\n ]\n\n def ensure_url(self, string: str) -> str:\n \"\"\"\n Ensures the given string is a URL by adding 'http://' if it doesn't start with 'http://' or 'https://'.\n Raises an error if the string is not a valid URL.\n\n Parameters:\n string (str): The string to be checked and possibly modified.\n\n Returns:\n str: The modified string that is ensured to be a URL.\n\n Raises:\n ValueError: If the string is not a valid URL.\n \"\"\"\n if not string.startswith((\"http://\", \"https://\")):\n string = \"http://\" + string\n\n # Basic URL validation regex\n url_regex = re.compile(\n r\"^(http://|https://)?\" # http:// or https://\n r\"(([a-zA-Z0-9\\.-]+)\" # domain\n r\"(\\.[a-zA-Z]{2,}))\" # top-level domain\n r\"(:[0-9]{1,5})?\" # optional port\n r\"(\\/.*)?$\" # optional path\n )\n\n if not re.match(url_regex, string):\n raise ValueError(f\"Invalid URL: {string}\")\n\n return string\n\n def fetch_content(self) -> list[Data]:\n urls = [self.ensure_url(url.strip()) for url in self.urls if url.strip()]\n loader = WebBaseLoader(web_paths=urls, encoding=\"utf-8\")\n docs = loader.load()\n data = [Data(text=doc.page_content, **doc.metadata) for doc in docs]\n self.status = data\n return data\n" + }, + "urls": { + "advanced": false, + "display_name": "URLs", + "dynamic": false, + "info": "Enter one or more URLs, separated by commas.", + "list": true, + "load_from_db": false, + "name": "urls", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": [ + "https://www.promptingguide.ai/introduction/basics" + ] + } + } + }, + "type": "URL" + }, + "dragging": false, + "height": 301, + "id": "URL-ry4Vn", + "position": { + "x": 129.9069887328102, + "y": 1026.1629590683015 + }, + "positionAbsolute": { + "x": 129.9069887328102, + "y": 1026.1629590683015 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "URL-PAQgR", + "node": { + "base_classes": [ + "Record" + ], + "beta": false, + "custom_fields": { + "urls": null + }, + "description": "Fetch content from one or more URLs.", + "display_name": "URL", + "documentation": "", + "field_formatters": {}, + "field_order": [], + "frozen": false, + "icon": "layout-template", + "output_types": [], + "outputs": [ + { + "cache": true, + "display_name": "Data", + "method": "fetch_content", + "name": "data", + "selected": "Data", + "types": [ + "Data" + ], + "value": "__UNDEFINED__" + } + ], + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "import re\n\nfrom langchain_community.document_loaders.web_base import WebBaseLoader\n\nfrom langflow.custom import Component\nfrom langflow.io import Output, TextInput\nfrom langflow.schema import Data\n\n\nclass URLComponent(Component):\n display_name = \"URL\"\n description = \"Fetch content from one or more URLs.\"\n icon = \"layout-template\"\n\n inputs = [\n TextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs, separated by commas.\",\n is_list=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"fetch_content\"),\n ]\n\n def ensure_url(self, string: str) -> str:\n \"\"\"\n Ensures the given string is a URL by adding 'http://' if it doesn't start with 'http://' or 'https://'.\n Raises an error if the string is not a valid URL.\n\n Parameters:\n string (str): The string to be checked and possibly modified.\n\n Returns:\n str: The modified string that is ensured to be a URL.\n\n Raises:\n ValueError: If the string is not a valid URL.\n \"\"\"\n if not string.startswith((\"http://\", \"https://\")):\n string = \"http://\" + string\n\n # Basic URL validation regex\n url_regex = re.compile(\n r\"^(http://|https://)?\" # http:// or https://\n r\"(([a-zA-Z0-9\\.-]+)\" # domain\n r\"(\\.[a-zA-Z]{2,}))\" # top-level domain\n r\"(:[0-9]{1,5})?\" # optional port\n r\"(\\/.*)?$\" # optional path\n )\n\n if not re.match(url_regex, string):\n raise ValueError(f\"Invalid URL: {string}\")\n\n return string\n\n def fetch_content(self) -> list[Data]:\n urls = [self.ensure_url(url.strip()) for url in self.urls if url.strip()]\n loader = WebBaseLoader(web_paths=urls, encoding=\"utf-8\")\n docs = loader.load()\n data = [Data(text=doc.page_content, **doc.metadata) for doc in docs]\n self.status = data\n return data\n" + }, + "urls": { + "advanced": false, + "display_name": "URLs", + "dynamic": false, + "info": "Enter one or more URLs, separated by commas.", + "list": true, + "load_from_db": false, + "name": "urls", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": [ + "https://www.promptingguide.ai/techniques/prompt_chaining" + ] + } + } + }, + "type": "URL" + }, + "dragging": false, + "height": 301, + "id": "URL-PAQgR", + "position": { + "x": 109.01828882212544, + "y": 635.7038211214808 + }, + "positionAbsolute": { + "x": 109.01828882212544, + "y": 635.7038211214808 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "description": "Get text inputs from the Playground.", + "display_name": "Instructions", + "edited": false, + "id": "TextInput-xeTQg", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Get text inputs from the Playground.", + "display_name": "Instructions", + "documentation": "", + "edited": false, + "field_order": [ + "input_value" + ], + "frozen": false, + "icon": "type", + "output_types": [], + "outputs": [ + { + "cache": true, + "display_name": "Text", + "hidden": false, + "method": "text_response", + "name": "text", + "selected": "Message", + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.base.io.text import TextComponent\nfrom langflow.io import TextInput\nfrom langflow.io import Output\nfrom langflow.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n\n inputs = [\n TextInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n message = Message(\n text=self.input_value,\n )\n return message\n" + }, + "input_value": { + "advanced": false, + "display_name": "Text", + "dynamic": false, + "info": "Text to be passed as input.", + "input_types": [ + "Message" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Use the references above for style to write a new blog/tutorial about prompt engineering techniques. Suggest non-covered topics." + } + } + }, + "type": "TextInput" + }, + "dragging": false, + "height": 309, + "id": "TextInput-xeTQg", + "position": { + "x": 668.3436449795839, + "y": 213.40493638517057 + }, + "positionAbsolute": { + "x": 668.3436449795839, + "y": 213.40493638517057 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "ParseData-jFtFL", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Convert Data into plain text following a specified template.", + "display_name": "Parse Data", + "documentation": "", + "field_order": [ + "data", + "template", + "sep" + ], + "frozen": false, + "icon": "braces", + "output_types": [], + "outputs": [ + { + "cache": true, + "display_name": "Text", + "method": "parse_data", + "name": "text", + "selected": "Message", + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n" + }, + "data": { + "advanced": false, + "display_name": "Data", + "dynamic": false, + "info": "The data to convert to text.", + "input_types": [ + "Data" + ], + "list": false, + "name": "data", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "other", + "value": "" + }, + "sep": { + "advanced": true, + "display_name": "Separator", + "dynamic": false, + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "sep", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "---" + }, + "template": { + "advanced": false, + "display_name": "Template", + "dynamic": false, + "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.", + "multiline": true, + "name": "template", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "{text}" + } + } + }, + "type": "ParseData" + }, + "dragging": false, + "height": 377, + "id": "ParseData-jFtFL", + "position": { + "x": 697.109388389247, + "y": 993.1273555676513 + }, + "positionAbsolute": { + "x": 697.109388389247, + "y": 993.1273555676513 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "ParseData-vdPxZ", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Convert Data into plain text following a specified template.", + "display_name": "Parse Data", + "documentation": "", + "field_order": [ + "data", + "template", + "sep" + ], + "frozen": false, + "icon": "braces", + "output_types": [], + "outputs": [ + { + "cache": true, + "display_name": "Text", + "method": "parse_data", + "name": "text", + "selected": "Message", + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n" + }, + "data": { + "advanced": false, + "display_name": "Data", + "dynamic": false, + "info": "The data to convert to text.", + "input_types": [ + "Data" + ], + "list": false, + "name": "data", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "other", + "value": "" + }, + "sep": { + "advanced": true, + "display_name": "Separator", + "dynamic": false, + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "sep", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "---" + }, + "template": { + "advanced": false, + "display_name": "Template", + "dynamic": false, + "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.", + "multiline": true, + "name": "template", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "{text}" + } + } + }, + "type": "ParseData" + }, + "dragging": false, + "height": 377, + "id": "ParseData-vdPxZ", + "position": { + "x": 674.3059180422167, + "y": 594.1081812719365 + }, + "positionAbsolute": { + "x": 674.3059180422167, + "y": 594.1081812719365 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "OpenAIModel-NXVBv", + "node": { + "base_classes": [ + "BaseLanguageModel", + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "documentation": "", + "field_order": [ + "input_value", + "max_tokens", + "model_kwargs", + "output_schema", + "model_name", + "openai_api_base", + "openai_api_key", + "temperature", + "stream", + "system_message", + "seed" + ], + "frozen": false, + "icon": "OpenAI", + "output_types": [], + "outputs": [ + { + "cache": true, + "display_name": "Text", + "method": "text_response", + "name": "text_output", + "selected": "Message", + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + }, + { + "cache": true, + "display_name": "Language Model", + "method": "build_model", + "name": "model_output", + "selected": "LanguageModel", + "types": [ + "LanguageModel" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" + }, + "input_value": { + "advanced": false, + "display_name": "Input", + "dynamic": false, + "info": "", + "input_types": [ + "Message" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "max_tokens": { + "advanced": true, + "display_name": "Max Tokens", + "dynamic": false, + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "list": false, + "name": "max_tokens", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": "" + }, + "model_kwargs": { + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "", + "list": false, + "name": "model_kwargs", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "dict", + "value": {} + }, + "model_name": { + "advanced": false, + "display_name": "Model Name", + "dynamic": false, + "info": "", + "name": "model_name", + "options": [ + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "gpt-3.5-turbo" + }, + "openai_api_base": { + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "openai_api_base", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "openai_api_key": { + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "info": "The OpenAI API Key to use for the OpenAI model.", + "input_types": [], + "load_from_db": true, + "name": "openai_api_key", + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "output_schema": { + "advanced": true, + "display_name": "Schema", + "dynamic": false, + "info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.", + "list": true, + "name": "output_schema", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "dict", + "value": {} + }, + "seed": { + "advanced": true, + "display_name": "Seed", + "dynamic": false, + "info": "The seed controls the reproducibility of the job.", + "list": false, + "name": "seed", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "int", + "value": 1 + }, + "stream": { + "advanced": true, + "display_name": "Stream", + "dynamic": false, + "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, + "name": "stream", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "bool", + "value": false + }, + "system_message": { + "advanced": true, + "display_name": "System Message", + "dynamic": false, + "info": "System message to pass to the model.", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "system_message", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "temperature": { + "advanced": false, + "display_name": "Temperature", + "dynamic": false, + "info": "", + "list": false, + "name": "temperature", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "float", + "value": 0.1 + } + } + }, + "type": "OpenAIModel" + }, + "dragging": false, + "height": 623, + "id": "OpenAIModel-NXVBv", + "position": { + "x": 1968.999112433115, + "y": 528.8142375467121 + }, + "positionAbsolute": { + "x": 1968.999112433115, + "y": 528.8142375467121 + }, + "selected": false, + "type": "genericNode", + "width": 384 + }, + { + "data": { + "id": "ChatOutput-n5T1m", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", + "documentation": "", + "field_order": [ + "input_value", + "sender", + "sender_name", + "session_id", + "data_template" + ], + "frozen": false, + "icon": "ChatOutput", + "output_types": [], + "outputs": [ + { + "cache": true, + "display_name": "Message", + "method": "message_response", + "name": "message", + "selected": "Message", + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.base.io.chat import ChatComponent\nfrom langflow.io import DropdownInput, Output, TextInput\nfrom langflow.schema.message import Message\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n inputs = [\n TextInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[\"Machine\", \"User\"],\n value=\"Machine\",\n advanced=True,\n info=\"Type of sender.\",\n ),\n TextInput(\n name=\"sender_name\", display_name=\"Sender Name\", info=\"Name of the sender.\", value=\"AI\", advanced=True\n ),\n TextInput(name=\"session_id\", display_name=\"Session ID\", info=\"Session ID for the message.\", advanced=True),\n TextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n def message_response(self) -> Message:\n message = Message(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n )\n if self.session_id and isinstance(message, Message) and isinstance(message.text, str):\n self.store_message(message)\n self.message.value = message\n\n self.status = message\n return message\n" + }, + "data_template": { + "advanced": true, + "display_name": "Data Template", + "dynamic": false, + "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.", + "input_types": [ + "Message", + "str" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "data_template", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "{text}" + }, + "input_value": { + "advanced": false, + "display_name": "Text", + "dynamic": false, + "info": "Message to be passed as output.", + "input_types": [ + "Message", + "str" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input_value", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "sender": { + "advanced": true, + "display_name": "Sender Type", + "dynamic": false, + "info": "Type of sender.", + "name": "sender", + "options": [ + "Machine", + "User" + ], + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "Machine" + }, + "sender_name": { + "advanced": true, + "display_name": "Sender Name", + "dynamic": false, + "info": "Name of the sender.", + "input_types": [ + "Message", + "str" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "sender_name", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "AI" + }, + "session_id": { + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "info": "Session ID for the message.", + "input_types": [ + "Message", + "str" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "session_id", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + } + } + }, + "type": "ChatOutput" + }, + "dragging": false, + "height": 309, + "id": "ChatOutput-n5T1m", + "position": { + "x": 2668.5087497211402, + "y": 859.3268817022193 + }, + "positionAbsolute": { + "x": 2668.5087497211402, + "y": 859.3268817022193 + }, + "selected": false, + "type": "genericNode", + "width": 384 } - }, - "type": "TextInput" - }, - "dragging": false, - "height": 309, - "id": "TextInput-xeTQg", - "position": { - "x": 668.3436449795839, - "y": 213.40493638517057 - }, - "positionAbsolute": { - "x": 668.3436449795839, - "y": 213.40493638517057 - }, - "selected": false, - "type": "genericNode", - "width": 384 - }, - { - "data": { - "id": "ParseData-jFtFL", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Convert Data into plain text following a specified template.", - "display_name": "Parse Data", - "documentation": "", - "field_order": [ - "data", - "template", - "sep" - ], - "frozen": false, - "icon": "braces", - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Text", - "method": "parse_data", - "name": "text", - "selected": "Message", - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n" - }, - "data": { - "advanced": false, - "display_name": "Data", - "dynamic": false, - "info": "The data to convert to text.", - "input_types": [ - "Data" - ], - "list": false, - "name": "data", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "other", - "value": "" - }, - "sep": { - "advanced": true, - "display_name": "Separator", - "dynamic": false, - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "sep", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "---" - }, - "template": { - "advanced": false, - "display_name": "Template", - "dynamic": false, - "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.", - "multiline": true, - "name": "template", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "{text}" - } - } - }, - "type": "ParseData" - }, - "dragging": false, - "height": 377, - "id": "ParseData-jFtFL", - "position": { - "x": 697.109388389247, - "y": 993.1273555676513 - }, - "positionAbsolute": { - "x": 697.109388389247, - "y": 993.1273555676513 - }, - "selected": false, - "type": "genericNode", - "width": 384 - }, - { - "data": { - "id": "ParseData-vdPxZ", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Convert Data into plain text following a specified template.", - "display_name": "Parse Data", - "documentation": "", - "field_order": [ - "data", - "template", - "sep" - ], - "frozen": false, - "icon": "braces", - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Text", - "method": "parse_data", - "name": "text", - "selected": "Message", - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n" - }, - "data": { - "advanced": false, - "display_name": "Data", - "dynamic": false, - "info": "The data to convert to text.", - "input_types": [ - "Data" - ], - "list": false, - "name": "data", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "other", - "value": "" - }, - "sep": { - "advanced": true, - "display_name": "Separator", - "dynamic": false, - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "sep", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "---" - }, - "template": { - "advanced": false, - "display_name": "Template", - "dynamic": false, - "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.", - "multiline": true, - "name": "template", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "{text}" - } - } - }, - "type": "ParseData" - }, - "dragging": false, - "height": 377, - "id": "ParseData-vdPxZ", - "position": { - "x": 674.3059180422167, - "y": 594.1081812719365 - }, - "positionAbsolute": { - "x": 674.3059180422167, - "y": 594.1081812719365 - }, - "selected": false, - "type": "genericNode", - "width": 384 - }, - { - "data": { - "id": "OpenAIModel-NXVBv", - "node": { - "base_classes": [ - "BaseLanguageModel", - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Generates text using OpenAI LLMs.", - "display_name": "OpenAI", - "documentation": "", - "field_order": [ - "input_value", - "max_tokens", - "model_kwargs", - "output_schema", - "model_name", - "openai_api_base", - "openai_api_key", - "temperature", - "stream", - "system_message", - "seed" - ], - "frozen": false, - "icon": "OpenAI", - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Text", - "method": "text_response", - "name": "text_output", - "selected": "Message", - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - }, - { - "cache": true, - "display_name": "Language Model", - "method": "build_model", - "name": "model_output", - "selected": "BaseLanguageModel", - "types": [ - "BaseLanguageModel" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" - }, - "input_value": { - "advanced": false, - "display_name": "Input", - "dynamic": false, - "info": "", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "input_value", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "max_tokens": { - "advanced": true, - "display_name": "Max Tokens", - "dynamic": false, - "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", - "list": false, - "name": "max_tokens", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "int", - "value": "" - }, - "model_kwargs": { - "advanced": true, - "display_name": "Model Kwargs", - "dynamic": false, - "info": "", - "list": false, - "name": "model_kwargs", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "dict", - "value": {} - }, - "model_name": { - "advanced": false, - "display_name": "Model Name", - "dynamic": false, - "info": "", - "name": "model_name", - "options": [ - "gpt-4o", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125" - ], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "gpt-3.5-turbo" - }, - "openai_api_base": { - "advanced": true, - "display_name": "OpenAI API Base", - "dynamic": false, - "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "openai_api_base", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "openai_api_key": { - "advanced": false, - "display_name": "OpenAI API Key", - "dynamic": false, - "info": "The OpenAI API Key to use for the OpenAI model.", - "input_types": [], - "load_from_db": true, - "name": "openai_api_key", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "output_schema": { - "advanced": true, - "display_name": "Schema", - "dynamic": false, - "info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.", - "list": true, - "name": "output_schema", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "dict", - "value": {} - }, - "seed": { - "advanced": true, - "display_name": "Seed", - "dynamic": false, - "info": "The seed controls the reproducibility of the job.", - "list": false, - "name": "seed", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "int", - "value": 1 - }, - "stream": { - "advanced": true, - "display_name": "Stream", - "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "list": false, - "name": "stream", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "bool", - "value": false - }, - "system_message": { - "advanced": true, - "display_name": "System Message", - "dynamic": false, - "info": "System message to pass to the model.", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "system_message", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "temperature": { - "advanced": false, - "display_name": "Temperature", - "dynamic": false, - "info": "", - "list": false, - "name": "temperature", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "float", - "value": 0.1 - } - } - }, - "type": "OpenAIModel" - }, - "dragging": false, - "height": 623, - "id": "OpenAIModel-NXVBv", - "position": { - "x": 1968.999112433115, - "y": 528.8142375467121 - }, - "positionAbsolute": { - "x": 1968.999112433115, - "y": 528.8142375467121 - }, - "selected": false, - "type": "genericNode", - "width": 384 - }, - { - "data": { - "id": "ChatOutput-n5T1m", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Display a chat message in the Playground.", - "display_name": "Chat Output", - "documentation": "", - "field_order": [ - "input_value", - "sender", - "sender_name", - "session_id", - "data_template" - ], - "frozen": false, - "icon": "ChatOutput", - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Message", - "method": "message_response", - "name": "message", - "selected": "Message", - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.base.io.chat import ChatComponent\nfrom langflow.io import DropdownInput, Output, TextInput\nfrom langflow.schema.message import Message\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n\n inputs = [\n TextInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[\"Machine\", \"User\"],\n value=\"Machine\",\n advanced=True,\n info=\"Type of sender.\",\n ),\n TextInput(\n name=\"sender_name\", display_name=\"Sender Name\", info=\"Name of the sender.\", value=\"AI\", advanced=True\n ),\n TextInput(name=\"session_id\", display_name=\"Session ID\", info=\"Session ID for the message.\", advanced=True),\n TextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n def message_response(self) -> Message:\n message = Message(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n )\n if self.session_id and isinstance(message, Message) and isinstance(message.text, str):\n self.store_message(message)\n self.message.value = message\n\n self.status = message\n return message\n" - }, - "data_template": { - "advanced": true, - "display_name": "Data Template", - "dynamic": false, - "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.", - "input_types": [ - "Message", - "str" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "data_template", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "{text}" - }, - "input_value": { - "advanced": false, - "display_name": "Text", - "dynamic": false, - "info": "Message to be passed as output.", - "input_types": [ - "Message", - "str" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "input_value", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "sender": { - "advanced": true, - "display_name": "Sender Type", - "dynamic": false, - "info": "Type of sender.", - "name": "sender", - "options": [ - "Machine", - "User" - ], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "Machine" - }, - "sender_name": { - "advanced": true, - "display_name": "Sender Name", - "dynamic": false, - "info": "Name of the sender.", - "input_types": [ - "Message", - "str" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "sender_name", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "AI" - }, - "session_id": { - "advanced": true, - "display_name": "Session ID", - "dynamic": false, - "info": "Session ID for the message.", - "input_types": [ - "Message", - "str" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "session_id", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - } - } - }, - "type": "ChatOutput" - }, - "dragging": false, - "height": 309, - "id": "ChatOutput-n5T1m", - "position": { - "x": 2668.5087497211402, - "y": 859.3268817022193 - }, - "positionAbsolute": { - "x": 2668.5087497211402, - "y": 859.3268817022193 - }, - "selected": false, - "type": "genericNode", - "width": 384 - } - ], - "viewport": { - "x": 79.21823673415201, - "y": 55.69170988103667, - "zoom": 0.4417380169313782 - } - }, - "description": "This flow can be used to create a blog post following instructions from the user, using two other blogs as reference.", - "endpoint_name": "None-1", - "id": "4b20b4fa-5460-4d2a-af02-ff6b0637941e", - "is_component": false, - "last_tested_version": "1.0.0a59", - "name": "Blog Writer" + ], + "viewport": { + "x": 79.21823673415201, + "y": 55.69170988103667, + "zoom": 0.4417380169313782 + } + }, + "description": "This flow can be used to create a blog post following instructions from the user, using two other blogs as reference.", + "endpoint_name": "None-1", + "id": "4b20b4fa-5460-4d2a-af02-ff6b0637941e", + "is_component": false, + "last_tested_version": "1.0.0a59", + "name": "Blog Writer" } \ No newline at end of file diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json index cfab7017f..156408217 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Document QA.json @@ -630,7 +630,7 @@ "description": "A generic file loader.", "display_name": "File", "documentation": "", - "edited": true, + "edited": false, "field_order": [ "path", "silent_errors" @@ -785,9 +785,9 @@ "display_name": "Language Model", "method": "build_model", "name": "model_output", - "selected": "BaseLanguageModel", + "selected": "LanguageModel", "types": [ - "BaseLanguageModel" + "LanguageModel" ], "value": "__UNDEFINED__" } @@ -811,7 +811,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json index 5e8c59de1..ed2419a68 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Memory Conversation.json @@ -687,9 +687,9 @@ "display_name": "Language Model", "method": "build_model", "name": "model_output", - "selected": "BaseLanguageModel", + "selected": "LanguageModel", "types": [ - "BaseLanguageModel" + "LanguageModel" ], "value": "__UNDEFINED__" } @@ -713,7 +713,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json index b6b64802e..cbfbdb544 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Langflow Prompt Chaining.json @@ -1099,9 +1099,9 @@ "display_name": "Language Model", "method": "build_model", "name": "model_output", - "selected": "BaseLanguageModel", + "selected": "LanguageModel", "types": [ - "BaseLanguageModel" + "LanguageModel" ], "value": "__UNDEFINED__" } @@ -1124,7 +1124,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" }, "input_value": { "advanced": false, @@ -1512,9 +1512,9 @@ "display_name": "Language Model", "method": "build_model", "name": "model_output", - "selected": "BaseLanguageModel", + "selected": "LanguageModel", "types": [ - "BaseLanguageModel" + "LanguageModel" ], "value": "__UNDEFINED__" } @@ -1537,7 +1537,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json b/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json index e61c27829..d93bf9db9 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/VectorStore-RAG-Flows.json @@ -495,7 +495,7 @@ "description": "Display a text output in the Playground.", "display_name": "Extracted Chunks", "documentation": "", - "edited": true, + "edited": false, "field_order": [ "input_value" ], @@ -1042,9 +1042,9 @@ "display_name": "Language Model", "method": "build_model", "name": "model_output", - "selected": "BaseLanguageModel", + "selected": "LanguageModel", "types": [ - "BaseLanguageModel" + "LanguageModel" ], "value": "__UNDEFINED__" } @@ -1067,7 +1067,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n" }, "input_value": { "advanced": false, @@ -2168,7 +2168,7 @@ "description": "Split text into chunks of a specified length.", "display_name": "Recursive Character Text Splitter", "documentation": "https://docs.langflow.org/components/text-splitters#recursivecharactertextsplitter", - "edited": true, + "edited": false, "field_order": [ "chunk_size", "chunk_overlap", diff --git a/src/frontend/src/utils/styleUtils.ts b/src/frontend/src/utils/styleUtils.ts index 8d53ebb05..1bc4818b5 100644 --- a/src/frontend/src/utils/styleUtils.ts +++ b/src/frontend/src/utils/styleUtils.ts @@ -284,6 +284,7 @@ export const nodeColors: { [char: string]: string } = { Prompt: "#7c3aed", Embeddings: "#10b981", BaseLanguageModel: "#c026d3", + LanguageModel: "#c026d3", }; export const nodeNames: { [char: string]: string } = {