Allow models to receive Record as well

This commit is contained in:
ogabrielluiz 2024-06-07 15:32:45 -03:00
commit 5a7263a7cb
11 changed files with 18 additions and 15 deletions

View file

@ -58,7 +58,7 @@ class AmazonBedrockComponent(LCModelComponent):
"advanced": True,
},
"cache": {"display_name": "Cache"},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"system_message": {
"display_name": "System Message",
"info": "System message to pass to the model.",

View file

@ -63,7 +63,7 @@ class AnthropicLLM(LCModelComponent):
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"advanced": True,

View file

@ -78,7 +78,7 @@ class AzureChatOpenAIComponent(LCModelComponent):
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -81,7 +81,7 @@ class QianfanChatEndpointComponent(LCModelComponent):
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -111,7 +111,7 @@ class ChatLiteLLMModelComponent(LCModelComponent):
"required": False,
"default": False,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -1,10 +1,11 @@
from typing import Optional
from langchain_cohere import ChatCohere
from pydantic.v1 import SecretStr
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langchain_cohere import ChatCohere
from langflow.field_typing import Text
class CohereComponent(LCModelComponent):
@ -42,7 +43,7 @@ class CohereComponent(LCModelComponent):
"type": "float",
"show": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
@ -69,3 +70,4 @@ class CohereComponent(LCModelComponent):
temperature=temperature,
)
return self.get_chat_result(output, stream, input_value, system_message)
return self.get_chat_result(output, stream, input_value, system_message)

View file

@ -2,9 +2,10 @@ from typing import Optional
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.field_typing import Text
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
class HuggingFaceEndpointsComponent(LCModelComponent):
@ -36,7 +37,7 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
"advanced": True,
},
"code": {"show": False},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,
@ -72,3 +73,4 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
output = ChatHuggingFace(llm=llm)
return self.get_chat_result(output, stream, input_value, system_message)
return self.get_chat_result(output, stream, input_value, system_message)

View file

@ -27,7 +27,7 @@ class MistralAIModelComponent(LCModelComponent):
def build_config(self):
return {
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,

View file

@ -194,7 +194,7 @@ class ChatOllamaComponent(LCModelComponent):
"info": "Template to use for generating text.",
"advanced": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,

View file

@ -28,7 +28,7 @@ class OpenAIModelComponent(LCModelComponent):
def build_config(self):
return {
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"max_tokens": {
"display_name": "Max Tokens",
"advanced": True,

View file

@ -1,6 +1,5 @@
from typing import Optional
from langflow.base.constants import STREAM_INFO_TEXT
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Text
@ -74,7 +73,7 @@ class ChatVertexAIComponent(LCModelComponent):
"value": False,
"advanced": True,
},
"input_value": {"display_name": "Input"},
"input_value": {"display_name": "Input", "input_types": ["Text", "Record"]},
"stream": {
"display_name": "Stream",
"info": STREAM_INFO_TEXT,