Blane187 commited on
Commit
c7c81d6
1 Parent(s): 4e7cf0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -168
app.py CHANGED
@@ -172,19 +172,6 @@ else:
172
  parallel_workers=8
173
  )
174
 
175
- def calculate_remaining_time(epochs, seconds_per_epoch):
176
- total_seconds = epochs * seconds_per_epoch
177
-
178
- hours = total_seconds // 3600
179
- minutes = (total_seconds % 3600) // 60
180
- seconds = total_seconds % 60
181
-
182
- if hours == 0:
183
- return f"{int(minutes)} minutes"
184
- elif hours == 1:
185
- return f"{int(hours)} hour and {int(minutes)} minutes"
186
- else:
187
- return f"{int(hours)} hours and {int(minutes)} minutes"
188
 
189
  def inf_handler(audio, model_name):
190
  model_found = False
@@ -274,9 +261,18 @@ def upload_model(index_file, pth_file, model_name):
274
  MODELS.append({"model": pth_file, "index": index_file, "model_name": model_name})
275
  return "Uploaded!"
276
 
277
- with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose"), title="Ilaria RVC 💖") as app:
278
- gr.Markdown("## Ilaria RVC 💖")
279
- gr.Markdown("**Help keeping up the GPU donating on [Ko-Fi](https://ko-fi.com/ilariaowo)**")
 
 
 
 
 
 
 
 
 
280
  with gr.Tab("Inference"):
281
  sound_gui = gr.Audio(value=None,type="filepath",autoplay=False,visible=True,)
282
  def update():
@@ -358,157 +354,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose")
358
 
359
  uvr5_button.click(inference, [uvr5_audio_file, uvr5_model], [uvr5_output_voc, uvr5_output_inst])
360
 
361
- with gr.Tab("Extra"):
362
- with gr.Accordion("Model Information", open=False):
363
- def json_to_markdown_table(json_data):
364
- table = "| Key | Value |\n| --- | --- |\n"
365
- for key, value in json_data.items():
366
- table += f"| {key} | {value} |\n"
367
- return table
368
- def model_info(name):
369
- for model in MODELS:
370
- if model["model_name"] == name:
371
- print(model["model"])
372
- info = model_handler.model_info(model["model"])
373
- info2 = {
374
- "Model Name": model["model_name"],
375
- "Model Config": info['config'],
376
- "Epochs Trained": info['epochs'],
377
- "Sample Rate": info['sr'],
378
- "Pitch Guidance": info['f0'],
379
- "Model Precision": info['size'],
380
- }
381
- return gr.Markdown(json_to_markdown_table(info2))
382
-
383
- return "Model not found"
384
- def update():
385
- print(MODELS)
386
- return gr.Dropdown(label="Model", choices=[model["model_name"] for model in MODELS])
387
- with gr.Row():
388
- model_info_dropdown = gr.Dropdown(label="Model", choices=[model["model_name"] for model in MODELS])
389
- refresh_button = gr.Button("Refresh Models")
390
- refresh_button.click(update, outputs=[model_info_dropdown])
391
- model_info_button = gr.Button("Get Model Information")
392
- model_info_output = gr.Textbox(value="Waiting...",label="Output", interactive=False)
393
- model_info_button.click(model_info, [model_info_dropdown], [model_info_output])
394
-
395
-
396
-
397
- with gr.Accordion("Training Time Calculator", open=False):
398
- with gr.Column():
399
- epochs_input = gr.Number(label="Number of Epochs")
400
- seconds_input = gr.Number(label="Seconds per Epoch")
401
- calculate_button = gr.Button("Calculate Time Remaining")
402
- remaining_time_output = gr.Textbox(label="Remaining Time", interactive=False)
403
-
404
- calculate_button.click(calculate_remaining_time,inputs=[epochs_input, seconds_input],outputs=[remaining_time_output])
405
-
406
- with gr.Accordion("Model Fusion", open=False):
407
- with gr.Group():
408
- def merge(ckpt_a, ckpt_b, alpha_a, sr_, if_f0_, info__, name_to_save0, version_2):
409
- for model in MODELS:
410
- if model["model_name"] == ckpt_a:
411
- ckpt_a = model["model"]
412
- if model["model_name"] == ckpt_b:
413
- ckpt_b = model["model"]
414
-
415
- path = model_handler.merge(ckpt_a, ckpt_b, alpha_a, sr_, if_f0_, info__, name_to_save0, version_2)
416
- if path == "Fail to merge the models. The model architectures are not the same.":
417
- return "Fail to merge the models. The model architectures are not the same."
418
- else:
419
- MODELS.append({"model": path, "index": None, "model_name": name_to_save0})
420
- return "Merged, saved as " + name_to_save0
421
-
422
- gr.Markdown(value="Strongly suggested to use only very clean models.")
423
- with gr.Row():
424
- def update():
425
- print(MODELS)
426
- return gr.Dropdown(label="Model A", choices=[model["model_name"] for model in MODELS]), gr.Dropdown(label="Model B", choices=[model["model_name"] for model in MODELS])
427
- refresh_button_fusion = gr.Button("Refresh Models")
428
- ckpt_a = gr.Dropdown(label="Model A", choices=[model["model_name"] for model in MODELS])
429
- ckpt_b = gr.Dropdown(label="Model B", choices=[model["model_name"] for model in MODELS])
430
- refresh_button_fusion.click(update, outputs=[ckpt_a, ckpt_b])
431
- alpha_a = gr.Slider(
432
- minimum=0,
433
- maximum=1,
434
- label="Weight of the first model over the second",
435
- value=0.5,
436
- interactive=True,
437
- )
438
- with gr.Group():
439
- with gr.Row():
440
- sr_ = gr.Radio(
441
- label="Sample rate of both models",
442
- choices=["32k","40k", "48k"],
443
- value="32k",
444
- interactive=True,
445
- )
446
- if_f0_ = gr.Radio(
447
- label="Pitch Guidance",
448
- choices=["Yes", "Nah"],
449
- value="Yes",
450
- interactive=True,
451
- )
452
- info__ = gr.Textbox(
453
- label="Add informations to the model",
454
- value="",
455
- max_lines=8,
456
- interactive=True,
457
- visible=False
458
- )
459
- name_to_save0 = gr.Textbox(
460
- label="Final Model name",
461
- value="",
462
- max_lines=1,
463
- interactive=True,
464
- )
465
- version_2 = gr.Radio(
466
- label="Versions of the models",
467
- choices=["v1", "v2"],
468
- value="v2",
469
- interactive=True,
470
- )
471
- with gr.Group():
472
- with gr.Row():
473
- but6 = gr.Button("Fuse the two models", variant="primary")
474
- info4 = gr.Textbox(label="Output", value="", max_lines=8)
475
- but6.click(
476
- merge,
477
- [ckpt_a,ckpt_b,alpha_a,sr_,if_f0_,info__,name_to_save0,version_2,],info4,api_name="ckpt_merge",)
478
-
479
- with gr.Accordion("Model Quantization", open=False):
480
- gr.Markdown("Quantize the model to a lower precision. - soon™ or never™ 😎")
481
-
482
- with gr.Accordion("Debug", open=False):
483
- def json_to_markdown_table(json_data):
484
- table = "| Key | Value |\n| --- | --- |\n"
485
- for key, value in json_data.items():
486
- table += f"| {key} | {value} |\n"
487
- return table
488
- gr.Markdown("View the models that are currently loaded in the instance.")
489
-
490
- gr.Markdown(json_to_markdown_table({"Models": len(MODELS), "UVR Models": len(UVR_5_MODELS)}))
491
-
492
- gr.Markdown("View the current status of the instance.")
493
- status = {
494
- "Status": "Running", # duh lol
495
- "Models": len(MODELS),
496
- "UVR Models": len(UVR_5_MODELS),
497
- "CPU Usage": f"{psutil.cpu_percent()}%",
498
- "RAM Usage": f"{psutil.virtual_memory().percent}%",
499
- "CPU": f"{cpuinfo.get_cpu_info()['brand_raw']}",
500
- "System Uptime": f"{round(time.time() - psutil.boot_time(), 2)} seconds",
501
- "System Load Average": f"{psutil.getloadavg()}",
502
- "====================": "====================",
503
- "CPU Cores": psutil.cpu_count(),
504
- "CPU Threads": psutil.cpu_count(logical=True),
505
- "RAM Total": f"{round(psutil.virtual_memory().total / 1024**3, 2)} GB",
506
- "RAM Used": f"{round(psutil.virtual_memory().used / 1024**3, 2)} GB",
507
- "CPU Frequency": f"{psutil.cpu_freq().current} MHz",
508
- "====================": "====================",
509
- "GPU": "A100 - Do a request (Inference, you won't see it either way)",
510
- }
511
- gr.Markdown(json_to_markdown_table(status))
512
 
513
  with gr.Tab("Credits"):
514
  gr.Markdown(
@@ -527,4 +373,4 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose")
527
  ![ilaria](https://i.ytimg.com/vi/5PWqt2Wg-us/maxresdefault.jpg)
528
  ''')
529
 
530
- app.queue(api_open=False).launch(show_api=False)
 
172
  parallel_workers=8
173
  )
174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
  def inf_handler(audio, model_name):
177
  model_found = False
 
261
  MODELS.append({"model": pth_file, "index": index_file, "model_name": model_name})
262
  return "Uploaded!"
263
 
264
+ with gr.Blocks(
265
+ title="CoverGen Lite - Politrees",
266
+ css="footer{display:none !important}",
267
+ theme=gr.themes.Soft(
268
+ primary_hue="green",
269
+ secondary_hue="green",
270
+ neutral_hue="neutral",
271
+ spacing_size="sm",
272
+ radius_size="lg",
273
+ )) as app:
274
+
275
+ gr.Markdown("## Ilaria RVC Mod💖")
276
  with gr.Tab("Inference"):
277
  sound_gui = gr.Audio(value=None,type="filepath",autoplay=False,visible=True,)
278
  def update():
 
354
 
355
  uvr5_button.click(inference, [uvr5_audio_file, uvr5_model], [uvr5_output_voc, uvr5_output_inst])
356
 
357
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
 
359
  with gr.Tab("Credits"):
360
  gr.Markdown(
 
373
  ![ilaria](https://i.ytimg.com/vi/5PWqt2Wg-us/maxresdefault.jpg)
374
  ''')
375
 
376
+ app.launch(share=True)