refactor: Migrate from BaseLanguageModel to field_typing.LanguageModel
This commit is contained in:
parent
0723ab0143
commit
e7043443ce
59 changed files with 1461 additions and 1455 deletions
|
|
@ -1,7 +1,7 @@
|
|||
from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_agent
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import AgentExecutor, BaseLanguageModel
|
||||
from langflow.field_typing import AgentExecutor, LanguageModel
|
||||
|
||||
|
||||
class CSVAgentComponent(CustomComponent):
|
||||
|
|
@ -11,7 +11,7 @@ class CSVAgentComponent(CustomComponent):
|
|||
|
||||
def build_config(self):
|
||||
return {
|
||||
"llm": {"display_name": "LLM", "type": BaseLanguageModel},
|
||||
"llm": {"display_name": "LLM", "type": LanguageModel},
|
||||
"path": {"display_name": "Path", "field_type": "file", "suffixes": [".csv"], "file_types": [".csv"]},
|
||||
"handle_parsing_errors": {"display_name": "Handle Parse Errors", "advanced": True},
|
||||
"agent_type": {
|
||||
|
|
@ -22,7 +22,7 @@ class CSVAgentComponent(CustomComponent):
|
|||
}
|
||||
|
||||
def build(
|
||||
self, llm: BaseLanguageModel, path: str, handle_parsing_errors: bool = True, agent_type: str = "openai-tools"
|
||||
self, llm: LanguageModel, path: str, handle_parsing_errors: bool = True, agent_type: str = "openai-tools"
|
||||
) -> AgentExecutor:
|
||||
# Instantiate and return the CSV agent class with the provided llm and path
|
||||
return create_csv_agent(
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from langchain_community.agent_toolkits import create_json_agent
|
|||
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class JsonAgentComponent(CustomComponent):
|
||||
|
|
@ -18,7 +18,7 @@ class JsonAgentComponent(CustomComponent):
|
|||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
toolkit: JsonToolkit,
|
||||
) -> AgentExecutor:
|
||||
return create_json_agent(llm=llm, toolkit=toolkit)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from langchain_community.agent_toolkits.sql.base import create_sql_agent
|
|||
from langchain_community.utilities import SQLDatabase
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class SQLAgentComponent(CustomComponent):
|
||||
|
|
@ -22,7 +22,7 @@ class SQLAgentComponent(CustomComponent):
|
|||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
database_uri: str,
|
||||
verbose: bool = False,
|
||||
) -> Union[AgentExecutor, Callable]:
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain.agents.tool_calling_agent.base import create_tool_calling_agent
|
|||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
from langflow.base.agents.agent import LCAgentComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text, Tool
|
||||
from langflow.field_typing import LanguageModel, Text, Tool
|
||||
from langflow.schema import Data
|
||||
|
||||
|
||||
|
|
@ -39,7 +39,7 @@ class ToolCallingAgentComponent(LCAgentComponent):
|
|||
async def build(
|
||||
self,
|
||||
input_value: str,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
tools: List[Tool],
|
||||
user_prompt: str = "{input}",
|
||||
message_history: Optional[List[Data]] = None,
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain.agents import AgentExecutor, create_vectorstore_agent
|
|||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreToolkit
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class VectorStoreAgentComponent(CustomComponent):
|
||||
|
|
@ -19,7 +19,7 @@ class VectorStoreAgentComponent(CustomComponent):
|
|||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
vector_store_toolkit: VectorStoreToolkit,
|
||||
) -> Union[AgentExecutor, Callable]:
|
||||
return create_vectorstore_agent(llm=llm, toolkit=vector_store_toolkit)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ from typing import Callable
|
|||
|
||||
from langchain.agents import create_vectorstore_router_agent
|
||||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
|
||||
from langchain_core.language_models.base import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
|
||||
|
|
@ -17,5 +17,5 @@ class VectorStoreRouterAgentComponent(CustomComponent):
|
|||
"vectorstoreroutertoolkit": {"display_name": "Vector Store Router Toolkit"},
|
||||
}
|
||||
|
||||
def build(self, llm: BaseLanguageModel, vectorstoreroutertoolkit: VectorStoreRouterToolkit) -> Callable:
|
||||
def build(self, llm: LanguageModel, vectorstoreroutertoolkit: VectorStoreRouterToolkit) -> Callable:
|
||||
return create_vectorstore_router_agent(llm=llm, toolkit=vectorstoreroutertoolkit)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain.agents import create_xml_agent
|
|||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
from langflow.base.agents.agent import LCAgentComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text, Tool
|
||||
from langflow.field_typing import LanguageModel, Text, Tool
|
||||
from langflow.schema import Data
|
||||
|
||||
|
||||
|
|
@ -72,7 +72,7 @@ class XMLAgentComponent(LCAgentComponent):
|
|||
async def build(
|
||||
self,
|
||||
input_value: str,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
tools: List[Tool],
|
||||
user_prompt: str = "{input}",
|
||||
system_message: str = "You are a helpful assistant",
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Optional
|
|||
from langchain.chains import ConversationChain
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, Text
|
||||
from langflow.field_typing import BaseMemory, LanguageModel, Text
|
||||
|
||||
|
||||
class ConversationChainComponent(CustomComponent):
|
||||
|
|
@ -27,7 +27,7 @@ class ConversationChainComponent(CustomComponent):
|
|||
def build(
|
||||
self,
|
||||
input_value: Text,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
memory: Optional[BaseMemory] = None,
|
||||
) -> Text:
|
||||
if memory is None:
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain.chains.llm import LLMChain
|
|||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, Text
|
||||
from langflow.field_typing import BaseMemory, LanguageModel, Text
|
||||
|
||||
|
||||
class LLMChainComponent(CustomComponent):
|
||||
|
|
@ -21,7 +21,7 @@ class LLMChainComponent(CustomComponent):
|
|||
def build(
|
||||
self,
|
||||
template: Text,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
memory: Optional[BaseMemory] = None,
|
||||
) -> Text:
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from langchain.chains import LLMCheckerChain
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
|
||||
|
||||
class LLMCheckerChainComponent(CustomComponent):
|
||||
|
|
@ -21,7 +21,7 @@ class LLMCheckerChainComponent(CustomComponent):
|
|||
def build(
|
||||
self,
|
||||
input_value: Text,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
) -> Text:
|
||||
chain = LLMCheckerChain.from_llm(llm=llm)
|
||||
response = chain.invoke({chain.input_key: input_value})
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Optional
|
|||
from langchain.chains import LLMChain, LLMMathChain
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, Text
|
||||
from langflow.field_typing import BaseMemory, LanguageModel, Text
|
||||
|
||||
|
||||
class LLMMathChainComponent(CustomComponent):
|
||||
|
|
@ -27,7 +27,7 @@ class LLMMathChainComponent(CustomComponent):
|
|||
def build(
|
||||
self,
|
||||
input_value: Text,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
llm_chain: LLMChain,
|
||||
input_key: str = "question",
|
||||
output_key: str = "answer",
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain.chains.retrieval_qa.base import RetrievalQA
|
|||
from langchain_core.documents import Document
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever, Text
|
||||
from langflow.field_typing import BaseMemory, BaseRetriever, LanguageModel, Text
|
||||
from langflow.schema import Data
|
||||
|
||||
|
||||
|
|
@ -29,7 +29,7 @@ class RetrievalQAComponent(CustomComponent):
|
|||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
chain_type: str,
|
||||
retriever: BaseRetriever,
|
||||
input_value: str = "",
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain.chains import RetrievalQAWithSourcesChain
|
|||
from langchain_core.documents import Document
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseMemory, BaseRetriever, Text
|
||||
from langflow.field_typing import BaseMemory, BaseRetriever, LanguageModel, Text
|
||||
|
||||
|
||||
class RetrievalQAWithSourcesChainComponent(CustomComponent):
|
||||
|
|
@ -32,7 +32,7 @@ class RetrievalQAWithSourcesChainComponent(CustomComponent):
|
|||
self,
|
||||
input_value: Text,
|
||||
retriever: BaseRetriever,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
chain_type: str,
|
||||
memory: Optional[BaseMemory] = None,
|
||||
return_source_documents: Optional[bool] = True,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from langchain_core.prompts import PromptTemplate
|
|||
from langchain_core.runnables import Runnable
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
|
||||
|
||||
class SQLGeneratorComponent(CustomComponent):
|
||||
|
|
@ -35,7 +35,7 @@ class SQLGeneratorComponent(CustomComponent):
|
|||
self,
|
||||
input_value: Text,
|
||||
db: SQLDatabase,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
top_k: int = 5,
|
||||
prompt: Optional[Text] = None,
|
||||
) -> Text:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from langchain_core.prompts.chat import HumanMessagePromptTemplate, SystemMessag
|
|||
|
||||
from langflow.base.agents.agent import LCAgentComponent
|
||||
from langflow.base.agents.utils import AGENTS, AgentSpec, get_agents_list
|
||||
from langflow.field_typing import BaseLanguageModel, Text, Tool
|
||||
from langflow.field_typing import LanguageModel, Text, Tool
|
||||
from langflow.schema import Data
|
||||
from langflow.schema.dotdict import dotdict
|
||||
|
||||
|
|
@ -145,7 +145,7 @@ class AgentComponent(LCAgentComponent):
|
|||
self,
|
||||
agent_name: str,
|
||||
input_value: str,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
tools: List[Tool],
|
||||
system_message: str = "You are a helpful assistant. Help the user answer any questions.",
|
||||
user_prompt: str = "{input}",
|
||||
|
|
|
|||
|
|
@ -2,14 +2,14 @@ from langchain_core.messages import BaseMessage
|
|||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
|
||||
|
||||
class ShouldRunNextComponent(CustomComponent):
|
||||
display_name = "Should Run Next"
|
||||
description = "Determines if a vertex is runnable."
|
||||
|
||||
def build(self, llm: BaseLanguageModel, question: str, context: str, retries: int = 3) -> Text:
|
||||
def build(self, llm: LanguageModel, question: str, context: str, retries: int = 3) -> Text:
|
||||
template = "Given the following question and the context below, answer with a yes or no.\n\n{error_message}\n\nQuestion: {question}\n\nContext: {context}\n\nAnswer:"
|
||||
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Optional
|
|||
from langchain_community.llms.bedrock import Bedrock
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class AmazonBedrockComponent(CustomComponent):
|
||||
|
|
@ -46,7 +46,7 @@ class AmazonBedrockComponent(CustomComponent):
|
|||
endpoint_url: Optional[str] = None,
|
||||
streaming: bool = False,
|
||||
cache: Optional[bool] = None,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
try:
|
||||
output = Bedrock(
|
||||
credentials_profile_name=credentials_profile_name,
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from pydantic.v1 import SecretStr
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class ChatAntropicSpecsComponent(CustomComponent):
|
||||
|
|
@ -57,7 +57,7 @@ class ChatAntropicSpecsComponent(CustomComponent):
|
|||
max_tokens: Optional[int] = 1000,
|
||||
temperature: Optional[float] = None,
|
||||
api_endpoint: Optional[str] = None,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
# Set default API endpoint if not provided
|
||||
if not api_endpoint:
|
||||
api_endpoint = "https://api.anthropic.com"
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_openai import AzureChatOpenAI
|
||||
from pydantic.v1 import SecretStr
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class AzureChatOpenAISpecsComponent(CustomComponent):
|
||||
|
|
@ -81,7 +81,7 @@ class AzureChatOpenAISpecsComponent(CustomComponent):
|
|||
api_version: str,
|
||||
temperature: float = 0.7,
|
||||
max_tokens: Optional[int] = 1000,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
if api_key:
|
||||
azure_api_key = SecretStr(api_key)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
|
||||
|
||||
from pydantic.v1 import SecretStr
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class QianfanChatEndpointComponent(CustomComponent):
|
||||
|
|
@ -80,7 +79,7 @@ class QianfanChatEndpointComponent(CustomComponent):
|
|||
temperature: Optional[float] = None,
|
||||
penalty_score: Optional[float] = None,
|
||||
endpoint: Optional[str] = None,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
try:
|
||||
output = QianfanChatEndpoint( # type: ignore
|
||||
model=model,
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Optional
|
|||
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class QianfanLLMEndpointComponent(CustomComponent):
|
||||
|
|
@ -78,7 +78,7 @@ class QianfanLLMEndpointComponent(CustomComponent):
|
|||
temperature: Optional[float] = None,
|
||||
penalty_score: Optional[float] = None,
|
||||
endpoint: Optional[str] = None,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
try:
|
||||
output = QianfanLLMEndpoint( # type: ignore
|
||||
model=model,
|
||||
|
|
|
|||
|
|
@ -3,9 +3,7 @@ from typing import Optional
|
|||
from langchain_anthropic import ChatAnthropic
|
||||
from pydantic.v1.types import SecretStr
|
||||
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class AnthropicLLM(CustomComponent):
|
||||
|
|
@ -69,7 +67,7 @@ class AnthropicLLM(CustomComponent):
|
|||
max_tokens: Optional[int] = 1000,
|
||||
temperature: Optional[float] = None,
|
||||
anthropic_api_url: Optional[str] = None,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
# Set default API endpoint if not provided
|
||||
if not anthropic_api_url:
|
||||
anthropic_api_url = "https://api.anthropic.com"
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Any, Dict, Optional
|
|||
from langchain_community.chat_models.litellm import ChatLiteLLM, ChatLiteLLMException
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class ChatLiteLLMComponent(CustomComponent):
|
||||
|
|
@ -116,7 +116,7 @@ class ChatLiteLLMComponent(CustomComponent):
|
|||
max_tokens: int = 256,
|
||||
max_retries: int = 6,
|
||||
verbose: bool = False,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
try:
|
||||
import litellm # type: ignore
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain_mistralai import ChatMistralAI
|
|||
from pydantic.v1 import SecretStr
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class MistralAIModelComponent(CustomComponent):
|
||||
|
|
@ -68,7 +68,7 @@ class MistralAIModelComponent(CustomComponent):
|
|||
mistral_api_key: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
mistral_api_base: Optional[str] = None,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
# Set default API endpoint if not provided
|
||||
if not mistral_api_base:
|
||||
mistral_api_base = "https://api.mistral.ai"
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from pydantic.v1 import SecretStr
|
|||
|
||||
from langflow.base.models.openai_constants import MODEL_NAMES
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, NestedDict
|
||||
from langflow.field_typing import LanguageModel, NestedDict
|
||||
|
||||
|
||||
class ChatOpenAIComponent(CustomComponent):
|
||||
|
|
@ -57,7 +57,7 @@ class ChatOpenAIComponent(CustomComponent):
|
|||
openai_api_base: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None,
|
||||
temperature: float = 0.7,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
if not openai_api_base:
|
||||
openai_api_base = "https://api.openai.com/v1"
|
||||
if openai_api_key:
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_community.chat_models.vertexai import ChatVertexAI
|
||||
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class ChatVertexAIComponent(CustomComponent):
|
||||
|
|
@ -70,7 +72,7 @@ class ChatVertexAIComponent(CustomComponent):
|
|||
top_k: int = 40,
|
||||
top_p: float = 0.95,
|
||||
verbose: bool = False,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
return ChatVertexAI(
|
||||
credentials=credentials,
|
||||
location=location,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_cohere import ChatCohere
|
||||
from langchain_core.language_models.base import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
from pydantic.v1 import SecretStr
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
|
|
@ -29,7 +29,7 @@ class CohereComponent(CustomComponent):
|
|||
cohere_api_key: str,
|
||||
max_tokens: Optional[int] = 256,
|
||||
temperature: float = 0.75,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
if cohere_api_key:
|
||||
api_key = SecretStr(cohere_api_key)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain_google_genai import ChatGoogleGenerativeAI # type: ignore
|
|||
from pydantic.v1.types import SecretStr
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, RangeSpec
|
||||
from langflow.field_typing import LanguageModel, RangeSpec
|
||||
|
||||
|
||||
class GoogleGenerativeAIComponent(CustomComponent):
|
||||
|
|
@ -62,7 +62,7 @@ class GoogleGenerativeAIComponent(CustomComponent):
|
|||
top_k: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
n: Optional[int] = 1,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
return ChatGoogleGenerativeAI(
|
||||
model=model,
|
||||
max_output_tokens=max_output_tokens or None, # type: ignore
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from pydantic.v1 import SecretStr
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.groq_constants import MODEL_NAMES
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class GroqModelSpecs(LCModelComponent):
|
||||
|
|
@ -74,7 +74,7 @@ class GroqModelSpecs(LCModelComponent):
|
|||
temperature: float = 0.1,
|
||||
n: Optional[int] = 1,
|
||||
stream: bool = False,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
return ChatGroq(
|
||||
model_name=model_name,
|
||||
max_tokens=max_tokens or None, # type: ignore
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Optional
|
|||
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class HuggingFaceEndpointsComponent(CustomComponent):
|
||||
|
|
@ -32,7 +32,7 @@ class HuggingFaceEndpointsComponent(CustomComponent):
|
|||
task: str = "text2text-generation",
|
||||
huggingfacehub_api_token: Optional[str] = None,
|
||||
model_kwargs: Optional[dict] = None,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
try:
|
||||
output = HuggingFaceEndpoint( # type: ignore
|
||||
endpoint_url=endpoint_url,
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import List, Optional
|
|||
from langchain_community.llms.ollama import Ollama
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class OllamaLLM(CustomComponent):
|
||||
|
|
@ -118,7 +118,7 @@ class OllamaLLM(CustomComponent):
|
|||
tfs_z: Optional[float] = None,
|
||||
top_k: Optional[int] = None,
|
||||
top_p: Optional[int] = None,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
if not base_url:
|
||||
base_url = "http://localhost:11434"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
from typing import Dict, Optional
|
||||
|
||||
from langchain_community.llms.vertexai import VertexAI
|
||||
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class VertexAIComponent(CustomComponent):
|
||||
|
|
@ -128,7 +130,7 @@ class VertexAIComponent(CustomComponent):
|
|||
top_p: float = 0.95,
|
||||
tuned_model_name: Optional[str] = None,
|
||||
verbose: bool = False,
|
||||
) -> BaseLanguageModel:
|
||||
) -> LanguageModel:
|
||||
return VertexAI(
|
||||
credentials=credentials,
|
||||
location=location,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ from langchain_aws import ChatBedrock
|
|||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput, Output, StrInput
|
||||
|
||||
|
||||
|
|
@ -78,7 +78,7 @@ class AmazonBedrockComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
model_id = self.model_id
|
||||
credentials_profile_name = self.credentials_profile_name
|
||||
region_name = self.region_name
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from pydantic.v1 import SecretStr
|
|||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, Output, SecretStrInput, TextInput
|
||||
|
||||
|
||||
|
|
@ -82,7 +82,7 @@ class AnthropicModelComponent(LCModelComponent):
|
|||
self.status = result.content
|
||||
return prefill + result.content
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
model = self.model
|
||||
anthropic_api_key = self.anthropic_api_key
|
||||
max_tokens = self.max_tokens
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ from pydantic.v1 import SecretStr
|
|||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, StrInput, SecretStrInput
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput
|
||||
|
||||
|
||||
class AzureChatOpenAIComponent(LCModelComponent):
|
||||
|
|
@ -87,7 +87,7 @@ class AzureChatOpenAIComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def model_response(self) -> BaseLanguageModel:
|
||||
def model_response(self) -> LanguageModel:
|
||||
model = self.model
|
||||
azure_endpoint = self.azure_endpoint
|
||||
azure_deployment = self.azure_deployment
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ from pydantic.v1 import SecretStr
|
|||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.io import BoolInput, FloatInput, Output, SecretStrInput, TextInput, DropdownInput
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, Output, SecretStrInput, TextInput
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, Output, SecretStrInput, TextInput
|
||||
|
||||
|
||||
class QianfanChatEndpointComponent(LCModelComponent):
|
||||
|
|
@ -98,7 +98,7 @@ class QianfanChatEndpointComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
model = self.model
|
||||
qianfan_ak = self.qianfan_ak
|
||||
qianfan_sk = self.qianfan_sk
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
from typing import Optional
|
||||
|
||||
from langchain_community.chat_models.litellm import ChatLiteLLM, ChatLiteLLMException
|
||||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.io import (
|
||||
BoolInput,
|
||||
DictInput,
|
||||
|
|
@ -137,7 +138,7 @@ class ChatLiteLLMModelComponent(LCModelComponent):
|
|||
self.status = message
|
||||
return message
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
try:
|
||||
import litellm # type: ignore
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from pydantic.v1 import SecretStr
|
|||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, FloatInput, MessageInput, Output, SecretStrInput, StrInput
|
||||
|
||||
|
||||
|
|
@ -46,7 +46,7 @@ class CohereComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel | BaseChatModel:
|
||||
def build_model(self) -> LanguageModel | BaseChatModel:
|
||||
cohere_api_key = self.cohere_api_key
|
||||
temperature = self.temperature
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ from pydantic.v1 import SecretStr
|
|||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput
|
||||
|
||||
|
||||
|
|
@ -82,7 +82,7 @@ class GoogleGenerativeAIComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
try:
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
except ImportError:
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from pydantic.v1 import SecretStr
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.groq_constants import MODEL_NAMES
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, Output, SecretStrInput, TextInput
|
||||
|
||||
|
||||
|
|
@ -82,7 +82,7 @@ class GroqModel(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
groq_api_key = self.groq_api_key
|
||||
model_name = self.model_name
|
||||
max_tokens = self.max_tokens
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
|
|||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, DictInput, DropdownInput, MessageInput, Output, SecretStrInput, StrInput
|
||||
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
endpoint_url = self.endpoint_url
|
||||
task = self.task
|
||||
huggingfacehub_api_token = self.huggingfacehub_api_token
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from pydantic.v1 import SecretStr
|
|||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageInput, Output, SecretStrInput, StrInput
|
||||
|
||||
|
||||
|
|
@ -79,7 +79,7 @@ class MistralAIModelComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
mistral_api_key = self.mistral_api_key
|
||||
temperature = self.temperature
|
||||
model_name = self.model_name
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ from langchain_community.chat_models import ChatOllama
|
|||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, Output, StrInput
|
||||
|
||||
|
||||
|
|
@ -177,7 +177,7 @@ class ChatOllamaComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel | BaseChatModel:
|
||||
def build_model(self) -> LanguageModel | BaseChatModel:
|
||||
# Mapping mirostat settings to their corresponding values
|
||||
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from pydantic.v1 import SecretStr
|
|||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.base.models.openai_constants import MODEL_NAMES
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
from langflow.inputs import (
|
||||
BoolInput,
|
||||
DictInput,
|
||||
|
|
@ -89,7 +89,7 @@ class OpenAIModelComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
# self.output_schea is a list of dictionaries
|
||||
# let's convert it to a dictionary
|
||||
output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
from langchain_google_vertexai import ChatVertexAI
|
||||
|
||||
|
||||
from langflow.base.constants import STREAM_INFO_TEXT
|
||||
from langflow.base.models.model import LCModelComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.io import BoolInput, FileInput, FloatInput, IntInput, MessageInput, MultilineInput, Output, StrInput
|
||||
|
||||
|
||||
|
|
@ -60,7 +62,7 @@ class ChatVertexAIComponent(LCModelComponent):
|
|||
self.status = result
|
||||
return result
|
||||
|
||||
def build_model(self) -> BaseLanguageModel:
|
||||
def build_model(self) -> LanguageModel:
|
||||
credentials = self.credentials
|
||||
location = self.location
|
||||
max_output_tokens = self.max_output_tokens
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Optional
|
|||
from langchain.retrievers import MultiQueryRetriever
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, BaseRetriever, PromptTemplate, Text
|
||||
from langflow.field_typing import BaseRetriever, LanguageModel, PromptTemplate, Text
|
||||
|
||||
|
||||
class MultiQueryRetrieverComponent(CustomComponent):
|
||||
|
|
@ -39,7 +39,7 @@ class MultiQueryRetrieverComponent(CustomComponent):
|
|||
|
||||
def build(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
retriever: BaseRetriever,
|
||||
prompt: Optional[Text] = None,
|
||||
parser_key: str = "lines",
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from langchain.retrievers.self_query.base import SelfQueryRetriever
|
|||
from langchain_core.vectorstores import VectorStore
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Text
|
||||
from langflow.field_typing import LanguageModel, Text
|
||||
from langflow.schema import Data
|
||||
from langflow.schema.message import Message
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ class SelfQueryRetrieverComponent(CustomComponent):
|
|||
vectorstore: VectorStore,
|
||||
attribute_infos: list[Data],
|
||||
document_content_description: Text,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
) -> Data:
|
||||
metadata_field_infos = [AttributeInfo(**value.data) for value in attribute_infos]
|
||||
self_query_retriever = SelfQueryRetriever.from_llm(
|
||||
|
|
|
|||
|
|
@ -3,11 +3,11 @@ from typing import List
|
|||
|
||||
from langchain.chains.query_constructor.base import AttributeInfo
|
||||
from langchain.retrievers.self_query.base import SelfQueryRetriever
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
from langchain_core.vectorstores import VectorStore
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing.constants import LanguageModel
|
||||
|
||||
|
||||
class VectaraSelfQueryRetriverComponent(CustomComponent):
|
||||
|
|
@ -38,7 +38,7 @@ class VectaraSelfQueryRetriverComponent(CustomComponent):
|
|||
self,
|
||||
vectorstore: VectorStore,
|
||||
document_content_description: str,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
metadata_field_info: List[str],
|
||||
) -> BaseRetriever:
|
||||
metadata_field_obj = []
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from langchain_community.tools.json.tool import JsonSpec
|
|||
from langchain_community.utilities.requests import TextRequestsWrapper
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel
|
||||
from langflow.field_typing import LanguageModel
|
||||
|
||||
|
||||
class OpenAPIToolkitComponent(CustomComponent):
|
||||
|
|
@ -19,7 +19,7 @@ class OpenAPIToolkitComponent(CustomComponent):
|
|||
"requests_wrapper": {"display_name": "Text Requests Wrapper"},
|
||||
}
|
||||
|
||||
def build(self, llm: BaseLanguageModel, path: str, allow_dangerous_requests: bool = False) -> BaseToolkit:
|
||||
def build(self, llm: LanguageModel, path: str, allow_dangerous_requests: bool = False) -> BaseToolkit:
|
||||
if path.endswith("yaml") or path.endswith("yml"):
|
||||
yaml_dict = yaml.load(open(path, "r"), Loader=yaml.FullLoader)
|
||||
spec = JsonSpec(dict_=yaml_dict)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import List, Union
|
|||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo, VectorStoreRouterToolkit
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Tool
|
||||
from langflow.field_typing import LanguageModel, Tool
|
||||
|
||||
|
||||
class VectorStoreRouterToolkitComponent(CustomComponent):
|
||||
|
|
@ -16,9 +16,7 @@ class VectorStoreRouterToolkitComponent(CustomComponent):
|
|||
"llm": {"display_name": "LLM"},
|
||||
}
|
||||
|
||||
def build(
|
||||
self, vectorstores: List[VectorStoreInfo], llm: BaseLanguageModel
|
||||
) -> Union[Tool, VectorStoreRouterToolkit]:
|
||||
def build(self, vectorstores: List[VectorStoreInfo], llm: LanguageModel) -> Union[Tool, VectorStoreRouterToolkit]:
|
||||
print("vectorstores", vectorstores)
|
||||
print("llm", llm)
|
||||
return VectorStoreRouterToolkit(vectorstores=vectorstores, llm=llm)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Union
|
|||
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo, VectorStoreToolkit
|
||||
|
||||
from langflow.custom import CustomComponent
|
||||
from langflow.field_typing import BaseLanguageModel, Tool
|
||||
from langflow.field_typing import LanguageModel, Tool
|
||||
|
||||
|
||||
class VectorStoreToolkitComponent(CustomComponent):
|
||||
|
|
@ -19,6 +19,6 @@ class VectorStoreToolkitComponent(CustomComponent):
|
|||
def build(
|
||||
self,
|
||||
vectorstore_info: VectorStoreInfo,
|
||||
llm: BaseLanguageModel,
|
||||
llm: LanguageModel,
|
||||
) -> Union[Tool, VectorStoreToolkit]:
|
||||
return VectorStoreToolkit(vectorstore_info=vectorstore_info, llm=llm)
|
||||
|
|
|
|||
|
|
@ -15,7 +15,9 @@ from langchain_core.tools import Tool
|
|||
from langchain_core.vectorstores import VectorStore
|
||||
from langchain_text_splitters import TextSplitter
|
||||
|
||||
|
||||
NestedDict = Dict[str, Union[str, Dict]]
|
||||
type LanguageModel = Union[BaseLanguageModel, BaseLLM, BaseChatModel]
|
||||
|
||||
|
||||
class Object:
|
||||
|
|
@ -58,4 +60,5 @@ CUSTOM_COMPONENT_SUPPORTED_TYPES = {
|
|||
"Text": Text,
|
||||
"Object": Object,
|
||||
"Callable": Callable,
|
||||
"LanguageModel": LanguageModel,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -611,9 +611,9 @@
|
|||
"display_name": "Language Model",
|
||||
"method": "build_model",
|
||||
"name": "model_output",
|
||||
"selected": "BaseLanguageModel",
|
||||
"selected": "LanguageModel",
|
||||
"types": [
|
||||
"BaseLanguageModel"
|
||||
"LanguageModel"
|
||||
],
|
||||
"value": "__UNDEFINED__"
|
||||
}
|
||||
|
|
@ -637,7 +637,7 @@
|
|||
"show": true,
|
||||
"title_case": false,
|
||||
"type": "code",
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
},
|
||||
"input_value": {
|
||||
"advanced": false,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -630,7 +630,7 @@
|
|||
"description": "A generic file loader.",
|
||||
"display_name": "File",
|
||||
"documentation": "",
|
||||
"edited": true,
|
||||
"edited": false,
|
||||
"field_order": [
|
||||
"path",
|
||||
"silent_errors"
|
||||
|
|
@ -785,9 +785,9 @@
|
|||
"display_name": "Language Model",
|
||||
"method": "build_model",
|
||||
"name": "model_output",
|
||||
"selected": "BaseLanguageModel",
|
||||
"selected": "LanguageModel",
|
||||
"types": [
|
||||
"BaseLanguageModel"
|
||||
"LanguageModel"
|
||||
],
|
||||
"value": "__UNDEFINED__"
|
||||
}
|
||||
|
|
@ -811,7 +811,7 @@
|
|||
"show": true,
|
||||
"title_case": false,
|
||||
"type": "code",
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
},
|
||||
"input_value": {
|
||||
"advanced": false,
|
||||
|
|
|
|||
|
|
@ -687,9 +687,9 @@
|
|||
"display_name": "Language Model",
|
||||
"method": "build_model",
|
||||
"name": "model_output",
|
||||
"selected": "BaseLanguageModel",
|
||||
"selected": "LanguageModel",
|
||||
"types": [
|
||||
"BaseLanguageModel"
|
||||
"LanguageModel"
|
||||
],
|
||||
"value": "__UNDEFINED__"
|
||||
}
|
||||
|
|
@ -713,7 +713,7 @@
|
|||
"show": true,
|
||||
"title_case": false,
|
||||
"type": "code",
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
},
|
||||
"input_value": {
|
||||
"advanced": false,
|
||||
|
|
|
|||
|
|
@ -1099,9 +1099,9 @@
|
|||
"display_name": "Language Model",
|
||||
"method": "build_model",
|
||||
"name": "model_output",
|
||||
"selected": "BaseLanguageModel",
|
||||
"selected": "LanguageModel",
|
||||
"types": [
|
||||
"BaseLanguageModel"
|
||||
"LanguageModel"
|
||||
],
|
||||
"value": "__UNDEFINED__"
|
||||
}
|
||||
|
|
@ -1124,7 +1124,7 @@
|
|||
"show": true,
|
||||
"title_case": false,
|
||||
"type": "code",
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
},
|
||||
"input_value": {
|
||||
"advanced": false,
|
||||
|
|
@ -1512,9 +1512,9 @@
|
|||
"display_name": "Language Model",
|
||||
"method": "build_model",
|
||||
"name": "model_output",
|
||||
"selected": "BaseLanguageModel",
|
||||
"selected": "LanguageModel",
|
||||
"types": [
|
||||
"BaseLanguageModel"
|
||||
"LanguageModel"
|
||||
],
|
||||
"value": "__UNDEFINED__"
|
||||
}
|
||||
|
|
@ -1537,7 +1537,7 @@
|
|||
"show": true,
|
||||
"title_case": false,
|
||||
"type": "code",
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
},
|
||||
"input_value": {
|
||||
"advanced": false,
|
||||
|
|
|
|||
|
|
@ -495,7 +495,7 @@
|
|||
"description": "Display a text output in the Playground.",
|
||||
"display_name": "Extracted Chunks",
|
||||
"documentation": "",
|
||||
"edited": true,
|
||||
"edited": false,
|
||||
"field_order": [
|
||||
"input_value"
|
||||
],
|
||||
|
|
@ -1042,9 +1042,9 @@
|
|||
"display_name": "Language Model",
|
||||
"method": "build_model",
|
||||
"name": "model_output",
|
||||
"selected": "BaseLanguageModel",
|
||||
"selected": "LanguageModel",
|
||||
"types": [
|
||||
"BaseLanguageModel"
|
||||
"LanguageModel"
|
||||
],
|
||||
"value": "__UNDEFINED__"
|
||||
}
|
||||
|
|
@ -1067,7 +1067,7 @@
|
|||
"show": true,
|
||||
"title_case": false,
|
||||
"type": "code",
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import BaseLanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> BaseLanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.constants import STREAM_INFO_TEXT\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.message import Message\nfrom langflow.template import Output\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n\n inputs = [\n MessageInput(name=\"input_value\", display_name=\"Input\"),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\", display_name=\"Model Name\", advanced=False, options=MODEL_NAMES, value=MODEL_NAMES[0]\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"openai_api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n BoolInput(name=\"stream\", display_name=\"Stream\", info=STREAM_INFO_TEXT, advanced=True),\n StrInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"System message to pass to the model.\",\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text_output\", method=\"text_response\"),\n Output(display_name=\"Language Model\", name=\"model_output\", method=\"build_model\"),\n ]\n\n def text_response(self) -> Message:\n input_value = self.input_value\n stream = self.stream\n system_message = self.system_message\n output = self.build_model()\n result = self.get_chat_result(output, stream, input_value, system_message)\n self.status = result\n return result\n\n def build_model(self) -> LanguageModel:\n # self.output_schea is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.openai_api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict)\n seed = self.seed\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs or {},\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n\n return output\n"
|
||||
},
|
||||
"input_value": {
|
||||
"advanced": false,
|
||||
|
|
@ -2168,7 +2168,7 @@
|
|||
"description": "Split text into chunks of a specified length.",
|
||||
"display_name": "Recursive Character Text Splitter",
|
||||
"documentation": "https://docs.langflow.org/components/text-splitters#recursivecharactertextsplitter",
|
||||
"edited": true,
|
||||
"edited": false,
|
||||
"field_order": [
|
||||
"chunk_size",
|
||||
"chunk_overlap",
|
||||
|
|
|
|||
|
|
@ -284,6 +284,7 @@ export const nodeColors: { [char: string]: string } = {
|
|||
Prompt: "#7c3aed",
|
||||
Embeddings: "#10b981",
|
||||
BaseLanguageModel: "#c026d3",
|
||||
LanguageModel: "#c026d3",
|
||||
};
|
||||
|
||||
export const nodeNames: { [char: string]: string } = {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue