configurable cut off response
This commit is contained in:
parent
472f553ea0
commit
fd9c246cc9
3 changed files with 11 additions and 5 deletions
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "vocode"
|
||||
version = "0.1.34"
|
||||
version = "0.1.35"
|
||||
description = "The all-in-one voice SDK"
|
||||
authors = ["Ajay Raj <ajay@vocode.dev>"]
|
||||
license = "MIT License"
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ from vocode.models.transcriber import (
|
|||
)
|
||||
from vocode.models.agent import (
|
||||
ChatGPTAgentConfig,
|
||||
CutOffResponse,
|
||||
FillerAudioConfig,
|
||||
RESTfulUserImplementedAgentConfig,
|
||||
WebSocketUserImplementedAgentConfig,
|
||||
|
|
@ -46,12 +47,11 @@ if __name__ == "__main__":
|
|||
transcriber_config=DeepgramTranscriberConfig.from_input_device(
|
||||
microphone_input
|
||||
),
|
||||
agent_config=WebSocketUserImplementedAgentConfig(
|
||||
agent_config=ChatGPTAgentConfig(
|
||||
initial_message=BaseMessage(text="Hello!"),
|
||||
prompt_preamble="The AI is having a pleasant conversation about life",
|
||||
generate_responses=True,
|
||||
respond=WebSocketUserImplementedAgentConfig.RouteConfig(
|
||||
url="wss://9b1ff0eee874.ngrok.app/respond",
|
||||
),
|
||||
cut_off_response=CutOffResponse(),
|
||||
),
|
||||
synthesizer_config=AzureSynthesizerConfig.from_output_device(speaker_output),
|
||||
)
|
||||
|
|
|
|||
|
|
@ -45,12 +45,17 @@ class AgentConfig(TypedModel, type=AgentType.BASE):
|
|||
send_filler_audio: Union[bool, FillerAudioConfig] = False
|
||||
|
||||
|
||||
class CutOffResponse(BaseModel):
|
||||
messages: list[BaseMessage] = [BaseMessage(text="Sorry?")]
|
||||
|
||||
|
||||
class LLMAgentConfig(AgentConfig, type=AgentType.LLM):
|
||||
prompt_preamble: str
|
||||
expected_first_prompt: Optional[str] = None
|
||||
model_name: str = LLM_AGENT_DEFAULT_MODEL_NAME
|
||||
temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE
|
||||
max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS
|
||||
cut_off_response: Optional[CutOffResponse] = None
|
||||
|
||||
|
||||
class ChatGPTAlphaAgentConfig(AgentConfig, type=AgentType.CHAT_GPT_ALPHA):
|
||||
|
|
@ -66,6 +71,7 @@ class ChatGPTAgentConfig(AgentConfig, type=AgentType.CHAT_GPT):
|
|||
generate_responses: bool = False
|
||||
temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE
|
||||
max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS
|
||||
cut_off_response: Optional[CutOffResponse] = None
|
||||
|
||||
|
||||
class InformationRetrievalAgentConfig(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue