diff --git a/pyproject.toml b/pyproject.toml index 725f106..1c8ca49 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "vocode" -version = "0.1.32" +version = "0.1.33" description = "The all-in-one voice SDK" authors = ["Ajay Raj "] license = "MIT License" diff --git a/vocode/models/agent.py b/vocode/models/agent.py index 39d31ee..10845e8 100644 --- a/vocode/models/agent.py +++ b/vocode/models/agent.py @@ -5,6 +5,9 @@ from vocode.models.message import BaseMessage from .model import TypedModel, BaseModel FILLER_AUDIO_DEFAULT_SILENCE_THRESHOLD_SECONDS = 0.5 +LLM_AGENT_DEFAULT_TEMPERATURE = 1.0 +LLM_AGENT_DEFAULT_MAX_TOKENS = 256 +LLM_AGENT_DEFAULT_MODEL_NAME = "text-curie-001" class AgentType(str, Enum): @@ -33,17 +36,24 @@ class AgentConfig(TypedModel, type=AgentType.BASE): class LLMAgentConfig(AgentConfig, type=AgentType.LLM): prompt_preamble: str expected_first_prompt: Optional[str] = None + model_name: str = LLM_AGENT_DEFAULT_MODEL_NAME + temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE + max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS class ChatGPTAlphaAgentConfig(AgentConfig, type=AgentType.CHAT_GPT_ALPHA): prompt_preamble: str expected_first_prompt: Optional[str] = None + temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE + max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS class ChatGPTAgentConfig(AgentConfig, type=AgentType.CHAT_GPT): prompt_preamble: str expected_first_prompt: Optional[str] = None generate_responses: bool = False + temperature: float = LLM_AGENT_DEFAULT_TEMPERATURE + max_tokens: int = LLM_AGENT_DEFAULT_MAX_TOKENS class InformationRetrievalAgentConfig(