fix openai env vars

This commit is contained in:
Ajay Raj 2023-03-28 11:08:19 -07:00
commit 389e2b2515
4 changed files with 9 additions and 4 deletions

View file

@ -46,17 +46,17 @@ async def main():
conversation = StreamingConversation( conversation = StreamingConversation(
output_device=speaker_output, output_device=speaker_output,
transcriber=DeepgramTranscriberConfig.from_input_device( transcriber_config=DeepgramTranscriberConfig.from_input_device(
microphone_input, endpointing_config=PunctuationEndpointingConfig() microphone_input, endpointing_config=PunctuationEndpointingConfig()
), ),
agent=ChatGPTAgentConfig( agent_config=ChatGPTAgentConfig(
initial_message=BaseMessage(text="What up"), initial_message=BaseMessage(text="What up"),
prompt_preamble="""You are a helpful gen Z AI assistant. You use slang like um, but, and like a LOT. All of your responses are 10 words or less. Be super chill, use slang like prompt_preamble="""You are a helpful gen Z AI assistant. You use slang like um, but, and like a LOT. All of your responses are 10 words or less. Be super chill, use slang like
hella, down, fire, totally, but like, slay, vibing, queen, go off, bet, sus, simp, cap, big yikes, main character, dank""", hella, down, fire, totally, but like, slay, vibing, queen, go off, bet, sus, simp, cap, big yikes, main character, dank""",
generate_responses=True, generate_responses=True,
cut_off_response=CutOffResponse(), cut_off_response=CutOffResponse(),
), ),
synthesizer=AzureSynthesizerConfig.from_output_device(speaker_output), synthesizer_config=AzureSynthesizerConfig.from_output_device(speaker_output),
logger=logger, logger=logger,
) )
await conversation.start() await conversation.start()

View file

@ -3,6 +3,8 @@ from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate from langchain.prompts import PromptTemplate
from pydantic import BaseModel from pydantic import BaseModel
from vocode import getenv
TEMPLATE = """ TEMPLATE = """
Read the following conversation classify the final emotion of the Bot as one of [{emotions}]. Read the following conversation classify the final emotion of the Bot as one of [{emotions}].
Output the degree of emotion as a value between 0 and 1 in the format EMOTION,DEGREE: ex. {example_emotion},0.5 Output the degree of emotion as a value between 0 and 1 in the format EMOTION,DEGREE: ex. {example_emotion},0.5
@ -22,7 +24,7 @@ class BotSentimentAnalyser:
def __init__(self, emotions: list[str], model_name: str = "text-davinci-003"): def __init__(self, emotions: list[str], model_name: str = "text-davinci-003"):
self.model_name = model_name self.model_name = model_name
self.llm = OpenAI( self.llm = OpenAI(
model_name=self.model_name, model_name=self.model_name, openai_api_key=getenv("OPENAI_API_KEY")
) )
assert len(emotions) > 0 assert len(emotions) > 0
self.emotions = [e.lower() for e in emotions] self.emotions = [e.lower() for e in emotions]

View file

@ -57,6 +57,7 @@ class ChatGPTAgent(BaseAgent):
model_name=self.agent_config.model_name, model_name=self.agent_config.model_name,
temperature=self.agent_config.temperature, temperature=self.agent_config.temperature,
max_tokens=self.agent_config.max_tokens, max_tokens=self.agent_config.max_tokens,
openai_api_key=openai.api_key,
) )
self.conversation = ConversationChain( self.conversation = ConversationChain(
memory=self.memory, prompt=self.prompt, llm=self.llm memory=self.memory, prompt=self.prompt, llm=self.llm

View file

@ -5,6 +5,7 @@ from langchain import OpenAI
from langchain.llms import OpenAIChat from langchain.llms import OpenAIChat
from typing import Generator from typing import Generator
import logging import logging
from vocode import getenv
from vocode.streaming.agent.base_agent import BaseAgent from vocode.streaming.agent.base_agent import BaseAgent
from vocode.streaming.agent.utils import stream_llm_response from vocode.streaming.agent.utils import stream_llm_response
@ -43,6 +44,7 @@ class LLMAgent(BaseAgent):
model_name=self.agent_config.model_name, model_name=self.agent_config.model_name,
temperature=self.agent_config.temperature, temperature=self.agent_config.temperature,
max_tokens=self.agent_config.max_tokens, max_tokens=self.agent_config.max_tokens,
openai_api_key=getenv("OPENAI_API_KEY"),
) )
self.stop_tokens = [f"{recipient}:"] self.stop_tokens = [f"{recipient}:"]
self.first_response = ( self.first_response = (