🤖 Build voice-based LLM agents. Modular + open source. https://vocode.dev
Find a file
2023-02-27 21:52:41 -08:00
vocode ajay comments 2023-02-27 11:21:50 -08:00
.gitignore poetry 2023-02-25 20:57:55 -08:00
poetry.lock poetry 2023-02-25 20:57:55 -08:00
pyproject.toml update readme 2023-02-27 21:52:41 -08:00
README.md update readme 2023-02-27 21:52:41 -08:00
requirements.txt python SDK 2023-02-24 10:47:17 -08:00
simple_conversation.py use queue for output and put it in a thread, threads are back baby 2023-02-27 00:57:11 -08:00

vocode Python SDK

import asyncio
import signal

from vocode.conversation import Conversation
from vocode.helpers import create_microphone_input_and_speaker_output
from vocode.models.transcriber import DeepgramTranscriberConfig
from vocode.models.agent import LLMAgentConfig
from vocode.models.synthesizer import AzureSynthesizerConfig

if __name__ == "__main__":
    microphone_input, speaker_output = create_microphone_input_and_speaker_output(use_first_available_device=True)

    conversation = Conversation(
        input_device=microphone_input,
        output_device=speaker_output,
        transcriber_config=DeepgramTranscriberConfig.from_input_device(microphone_input),
        agent_config=LLMAgentConfig(prompt_preamble="The AI is having a pleasant conversation about life."),
        synthesizer_config=AzureSynthesizerConfig.from_output_device(speaker_output)
    )
    signal.signal(signal.SIGINT, lambda _0, _1: conversation.deactivate())
    asyncio.run(conversation.start())