John Langley commited on
Commit
6ce11f9
·
1 Parent(s): f59fc96

trying things with cpu

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -56,7 +56,7 @@ mistral_llm = Llama(model_path=mistral_model_path,n_gpu_layers=35,max_new_tokens
56
  # Load XTTS Model
57
  print("Loading XTTS model")
58
  model_name = "tts_models/multilingual/multi-dataset/xtts_v2" # move in v2, since xtts_v1 is generated keyerror, I guess you can select it with old github's release.
59
-
60
  #m = ModelManager().download_model(model_name)
61
  #print(m)
62
  m = model_name
@@ -64,7 +64,7 @@ m = model_name
64
  xtts_model = TTS(model_name, gpu=False)
65
  xtts_model.to("cpu") # no GPU or Amd
66
  #tts.to("cuda") # cuda only
67
- #os.environ["COQUI_TOS_AGREED"] = "1"
68
  #tts_model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
69
  #ModelManager().download_model(tts_model_name)
70
  #tts_model_path = os.path.join(get_user_data_dir("tts"), tts_model_name.replace("/", "--"))
 
56
  # Load XTTS Model
57
  print("Loading XTTS model")
58
  model_name = "tts_models/multilingual/multi-dataset/xtts_v2" # move in v2, since xtts_v1 is generated keyerror, I guess you can select it with old github's release.
59
+ os.environ["COQUI_TOS_AGREED"] = "1"
60
  #m = ModelManager().download_model(model_name)
61
  #print(m)
62
  m = model_name
 
64
  xtts_model = TTS(model_name, gpu=False)
65
  xtts_model.to("cpu") # no GPU or Amd
66
  #tts.to("cuda") # cuda only
67
+
68
  #tts_model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
69
  #ModelManager().download_model(tts_model_name)
70
  #tts_model_path = os.path.join(get_user_data_dir("tts"), tts_model_name.replace("/", "--"))