configurable cut off response
This commit is contained in:
parent
472f553ea0
commit
fd9c246cc9
3 changed files with 11 additions and 5 deletions
|
|
@ -45,12 +45,17 @@ class AgentConfig(TypedModel, type=AgentType.BASE):
|
|||
send_filler_audio: Union[bool, FillerAudioConfig] = False
|
||||
|
||||
|
||||
class CutOffResponse(BaseModel):
|
||||
messages: list[BaseMessage] = [BaseMessage(text="Sorry?")]
|
||||
|
||||
|
||||
class LLMAgentConfig(AgentConfig, type=AgentType.LLM):
|
||||
prompt_preamble: str
|
||||
expected_first_prompt: Optional[str] = None
|
||||
model_name: str = LLM_AGENT_DEFAULT_MODEL_NAME
|
||||
temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE
|
||||
max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS
|
||||
cut_off_response: Optional[CutOffResponse] = None
|
||||
|
||||
|
||||
class ChatGPTAlphaAgentConfig(AgentConfig, type=AgentType.CHAT_GPT_ALPHA):
|
||||
|
|
@ -66,6 +71,7 @@ class ChatGPTAgentConfig(AgentConfig, type=AgentType.CHAT_GPT):
|
|||
generate_responses: bool = False
|
||||
temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE
|
||||
max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS
|
||||
cut_off_response: Optional[CutOffResponse] = None
|
||||
|
||||
|
||||
class InformationRetrievalAgentConfig(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue