diff --git a/setup.txt b/setup.txt new file mode 100644 index 0000000..e69de29 diff --git a/voice_assistant/config.py b/voice_assistant/config.py index f24695c..7645d91 100644 --- a/voice_assistant/config.py +++ b/voice_assistant/config.py @@ -21,9 +21,14 @@ class Config: LOCAL_MODEL_PATH (str): Path to the local model. """ # Model selection + + TRANSCRIPTION_MODEL = 'deepgram' # possible values: openai, groq, deepgram, fastwhisperapi - RESPONSE_MODEL = 'openai' # possible values: openai, groq, ollama - TTS_MODEL = 'openai' # possible values: openai, deepgram, elevenlabs, melotts, cartesia + + RESPONSE_MODEL = 'ollama' # possible values: openai, groq, ollama + TTS_MODEL = 'deepgram' # possible values: openai, deepgram, elevenlabs, melotts, cartesia + + # currently using the MeloTTS for local models. here is how to get started: # https://github.com/myshell-ai/MeloTTS/blob/main/docs/install.md#linux-and-macos-install