Fabrice-TIERCELIN commited on
Commit
8af8cfc
·
verified ·
1 Parent(s): a5aaaf2

Up to 9 generations

Browse files
Files changed (1) hide show
  1. app.py +108 -3
app.py CHANGED
@@ -40,7 +40,11 @@ def update_output(output_number):
40
  gr.update(visible = (2 <= output_number)),
41
  gr.update(visible = (3 <= output_number)),
42
  gr.update(visible = (4 <= output_number)),
43
- gr.update(visible = (5 <= output_number))
 
 
 
 
44
  ]
45
 
46
  def predict0(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()):
@@ -58,6 +62,18 @@ def predict3(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, g
58
  def predict4(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()):
59
  return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 4, generation_number, temperature, is_randomize_seed, seed, progress)
60
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  def predict(
62
  prompt,
63
  language,
@@ -251,7 +267,7 @@ Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, wh
251
  )
252
  generation_number = gr.Slider(
253
  minimum = 1,
254
- maximum = 5,
255
  step = 1,
256
  value = 1,
257
  label = "Generation number",
@@ -265,6 +281,7 @@ Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, wh
265
  step = .1,
266
  value = .75,
267
  label = "Temperature",
 
268
  elem_id = "temperature-id"
269
  )
270
  randomize_seed = gr.Checkbox(
@@ -321,6 +338,34 @@ Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, wh
321
  elem_id = "synthesised-audio-5-id",
322
  visible = False
323
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
  information = gr.HTML()
325
 
326
  submit.click(fn = update_output, inputs = [
@@ -329,7 +374,11 @@ Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, wh
329
  synthesised_audio_2,
330
  synthesised_audio_3,
331
  synthesised_audio_4,
332
- synthesised_audio_5
 
 
 
 
333
  ], queue = False, show_progress = False).success(predict0, inputs = [
334
  prompt,
335
  language,
@@ -400,6 +449,62 @@ Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, wh
400
  ], outputs = [
401
  synthesised_audio_5,
402
  information
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
  ], scroll_to_output = True)
404
 
405
  interface.queue(max_size = 5).launch(debug=True)
 
40
  gr.update(visible = (2 <= output_number)),
41
  gr.update(visible = (3 <= output_number)),
42
  gr.update(visible = (4 <= output_number)),
43
+ gr.update(visible = (5 <= output_number)),
44
+ gr.update(visible = (6 <= output_number)),
45
+ gr.update(visible = (7 <= output_number)),
46
+ gr.update(visible = (8 <= output_number)),
47
+ gr.update(visible = (9 <= output_number))
48
  ]
49
 
50
  def predict0(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()):
 
62
  def predict4(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()):
63
  return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 4, generation_number, temperature, is_randomize_seed, seed, progress)
64
 
65
+ def predict5(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()):
66
+ return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 5, generation_number, temperature, is_randomize_seed, seed, progress)
67
+
68
+ def predict6(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()):
69
+ return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 6, generation_number, temperature, is_randomize_seed, seed, progress)
70
+
71
+ def predict7(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()):
72
+ return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 7, generation_number, temperature, is_randomize_seed, seed, progress)
73
+
74
+ def predict8(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()):
75
+ return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 8, generation_number, temperature, is_randomize_seed, seed, progress)
76
+
77
  def predict(
78
  prompt,
79
  language,
 
267
  )
268
  generation_number = gr.Slider(
269
  minimum = 1,
270
+ maximum = 9,
271
  step = 1,
272
  value = 1,
273
  label = "Generation number",
 
281
  step = .1,
282
  value = .75,
283
  label = "Temperature",
284
+ info = "Maybe useless",
285
  elem_id = "temperature-id"
286
  )
287
  randomize_seed = gr.Checkbox(
 
338
  elem_id = "synthesised-audio-5-id",
339
  visible = False
340
  )
341
+
342
+ synthesised_audio_6 = gr.Audio(
343
+ label="Synthesised Audio #6",
344
+ autoplay = False,
345
+ elem_id = "synthesised-audio-6-id",
346
+ visible = False
347
+ )
348
+
349
+ synthesised_audio_7 = gr.Audio(
350
+ label="Synthesised Audio #7",
351
+ autoplay = False,
352
+ elem_id = "synthesised-audio-7-id",
353
+ visible = False
354
+ )
355
+
356
+ synthesised_audio_8 = gr.Audio(
357
+ label="Synthesised Audio #8",
358
+ autoplay = False,
359
+ elem_id = "synthesised-audio-8-id",
360
+ visible = False
361
+ )
362
+
363
+ synthesised_audio_9 = gr.Audio(
364
+ label="Synthesised Audio #9",
365
+ autoplay = False,
366
+ elem_id = "synthesised-audio-9-id",
367
+ visible = False
368
+ )
369
  information = gr.HTML()
370
 
371
  submit.click(fn = update_output, inputs = [
 
374
  synthesised_audio_2,
375
  synthesised_audio_3,
376
  synthesised_audio_4,
377
+ synthesised_audio_5,
378
+ synthesised_audio_6,
379
+ synthesised_audio_7,
380
+ synthesised_audio_8,
381
+ synthesised_audio_9
382
  ], queue = False, show_progress = False).success(predict0, inputs = [
383
  prompt,
384
  language,
 
449
  ], outputs = [
450
  synthesised_audio_5,
451
  information
452
+ ], scroll_to_output = True).success(predict5, inputs = [
453
+ prompt,
454
+ language,
455
+ gender,
456
+ audio_file_pth,
457
+ mic_file_path,
458
+ use_mic,
459
+ generation_number,
460
+ temperature,
461
+ randomize_seed,
462
+ seed
463
+ ], outputs = [
464
+ synthesised_audio_6,
465
+ information
466
+ ], scroll_to_output = True).success(predict6, inputs = [
467
+ prompt,
468
+ language,
469
+ gender,
470
+ audio_file_pth,
471
+ mic_file_path,
472
+ use_mic,
473
+ generation_number,
474
+ temperature,
475
+ randomize_seed,
476
+ seed
477
+ ], outputs = [
478
+ synthesised_audio_7,
479
+ information
480
+ ], scroll_to_output = True).success(predict7, inputs = [
481
+ prompt,
482
+ language,
483
+ gender,
484
+ audio_file_pth,
485
+ mic_file_path,
486
+ use_mic,
487
+ generation_number,
488
+ temperature,
489
+ randomize_seed,
490
+ seed
491
+ ], outputs = [
492
+ synthesised_audio_8,
493
+ information
494
+ ], scroll_to_output = True).success(predict8, inputs = [
495
+ prompt,
496
+ language,
497
+ gender,
498
+ audio_file_pth,
499
+ mic_file_path,
500
+ use_mic,
501
+ generation_number,
502
+ temperature,
503
+ randomize_seed,
504
+ seed
505
+ ], outputs = [
506
+ synthesised_audio_9,
507
+ information
508
  ], scroll_to_output = True)
509
 
510
  interface.queue(max_size = 5).launch(debug=True)