John Langley commited on
Commit
97c030c
·
1 Parent(s): b923173

change to cpu

Browse files
Files changed (1) hide show
  1. app.py +21 -19
app.py CHANGED
@@ -53,21 +53,22 @@ mistral_llm = Llama(model_path=mistral_model_path,n_gpu_layers=35,max_new_tokens
53
 
54
  # Load XTTS Model
55
  print("Loading XTTS model")
56
- os.environ["COQUI_TOS_AGREED"] = "1"
57
- tts_model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
58
- ModelManager().download_model(tts_model_name)
59
- tts_model_path = os.path.join(get_user_data_dir("tts"), tts_model_name.replace("/", "--"))
60
- config = XttsConfig()
61
- config.load_json(os.path.join(tts_model_path, "config.json"))
62
- xtts_model = Xtts.init_from_config(config)
63
- xtts_model.load_checkpoint(
64
- config,
65
- checkpoint_path=os.path.join(tts_model_path, "model.pth"),
66
- vocab_path=os.path.join(tts_model_path, "vocab.json"),
67
- eval=True,
68
- use_deepspeed=True,
69
- )
70
- xtts_model.cuda()
 
71
 
72
  ###### Set up Gradio Interface ######
73
 
@@ -141,10 +142,11 @@ with gr.Blocks(title="Voice chat with LLM") as demo:
141
  def handle_speech_generation(sentence, chatbot_history, chatbot_voice):
142
  if sentence != "":
143
  print("Processing sentence")
144
- generated_speech = generate_speech_for_sentence(chatbot_history, chatbot_voice, sentence, xtts_model, xtts_supported_languages=config.languages, return_as_byte=True)
145
- if generated_speech is not None:
146
- _, audio_dict = generated_speech
147
- yield (sentence, chatbot_history, audio_dict["value"])
 
148
 
149
  if initial_greeting:
150
  # Process only the initial greeting if specified
 
53
 
54
  # Load XTTS Model
55
  print("Loading XTTS model")
56
+ #os.environ["COQUI_TOS_AGREED"] = "1"
57
+ #tts_model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
58
+ #ModelManager().download_model(tts_model_name)
59
+ #tts_model_path = os.path.join(get_user_data_dir("tts"), tts_model_name.replace("/", "--"))
60
+ #config = XttsConfig()
61
+ #config.load_json(os.path.join(tts_model_path, "config.json"))
62
+ #xtts_model = Xtts.init_from_config(config)
63
+ #xtts_model.load_checkpoint(
64
+ # config,
65
+ # checkpoint_path=os.path.join(tts_model_path, "model.pth"),
66
+ # vocab_path=os.path.join(tts_model_path, "vocab.json"),
67
+ # eval=True,
68
+ # use_deepspeed=True,
69
+ #)
70
+ #xtts_model.cuda()
71
+ print("UN-Loading XTTS model")
72
 
73
  ###### Set up Gradio Interface ######
74
 
 
142
  def handle_speech_generation(sentence, chatbot_history, chatbot_voice):
143
  if sentence != "":
144
  print("Processing sentence")
145
+ yield (sentence, chatbot_history, None)
146
+ # generated_speech = generate_speech_for_sentence(chatbot_history, chatbot_voice, sentence, xtts_model, xtts_supported_languages=config.languages, return_as_byte=True)
147
+ # if generated_speech is not None:
148
+ # _, audio_dict = generated_speech
149
+ # yield (sentence, chatbot_history, audio_dict["value"])
150
 
151
  if initial_greeting:
152
  # Process only the initial greeting if specified