configurable cut off response

This commit is contained in:
Ajay Raj 2023-03-14 18:11:22 -07:00
commit fd9c246cc9
3 changed files with 11 additions and 5 deletions

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "vocode" name = "vocode"
version = "0.1.34" version = "0.1.35"
description = "The all-in-one voice SDK" description = "The all-in-one voice SDK"
authors = ["Ajay Raj <ajay@vocode.dev>"] authors = ["Ajay Raj <ajay@vocode.dev>"]
license = "MIT License" license = "MIT License"

View file

@ -19,6 +19,7 @@ from vocode.models.transcriber import (
) )
from vocode.models.agent import ( from vocode.models.agent import (
ChatGPTAgentConfig, ChatGPTAgentConfig,
CutOffResponse,
FillerAudioConfig, FillerAudioConfig,
RESTfulUserImplementedAgentConfig, RESTfulUserImplementedAgentConfig,
WebSocketUserImplementedAgentConfig, WebSocketUserImplementedAgentConfig,
@ -46,12 +47,11 @@ if __name__ == "__main__":
transcriber_config=DeepgramTranscriberConfig.from_input_device( transcriber_config=DeepgramTranscriberConfig.from_input_device(
microphone_input microphone_input
), ),
agent_config=WebSocketUserImplementedAgentConfig( agent_config=ChatGPTAgentConfig(
initial_message=BaseMessage(text="Hello!"), initial_message=BaseMessage(text="Hello!"),
prompt_preamble="The AI is having a pleasant conversation about life",
generate_responses=True, generate_responses=True,
respond=WebSocketUserImplementedAgentConfig.RouteConfig( cut_off_response=CutOffResponse(),
url="wss://9b1ff0eee874.ngrok.app/respond",
),
), ),
synthesizer_config=AzureSynthesizerConfig.from_output_device(speaker_output), synthesizer_config=AzureSynthesizerConfig.from_output_device(speaker_output),
) )

View file

@ -45,12 +45,17 @@ class AgentConfig(TypedModel, type=AgentType.BASE):
send_filler_audio: Union[bool, FillerAudioConfig] = False send_filler_audio: Union[bool, FillerAudioConfig] = False
class CutOffResponse(BaseModel):
messages: list[BaseMessage] = [BaseMessage(text="Sorry?")]
class LLMAgentConfig(AgentConfig, type=AgentType.LLM): class LLMAgentConfig(AgentConfig, type=AgentType.LLM):
prompt_preamble: str prompt_preamble: str
expected_first_prompt: Optional[str] = None expected_first_prompt: Optional[str] = None
model_name: str = LLM_AGENT_DEFAULT_MODEL_NAME model_name: str = LLM_AGENT_DEFAULT_MODEL_NAME
temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE
max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS
cut_off_response: Optional[CutOffResponse] = None
class ChatGPTAlphaAgentConfig(AgentConfig, type=AgentType.CHAT_GPT_ALPHA): class ChatGPTAlphaAgentConfig(AgentConfig, type=AgentType.CHAT_GPT_ALPHA):
@ -66,6 +71,7 @@ class ChatGPTAgentConfig(AgentConfig, type=AgentType.CHAT_GPT):
generate_responses: bool = False generate_responses: bool = False
temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE
max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS
cut_off_response: Optional[CutOffResponse] = None
class InformationRetrievalAgentConfig( class InformationRetrievalAgentConfig(