Spaces:
Running
on
Zero
Running
on
Zero
Nithya
commited on
Commit
·
fdfa43a
1
Parent(s):
3d6b478
smalle updates
Browse files
app.py
CHANGED
@@ -194,17 +194,18 @@ def load_pitch_model(model_selection):
|
|
194 |
qt_path = os.path.join(pitch_path, 'qt.joblib') if model_type == 'diffusion' else None, \
|
195 |
device = 'cpu'
|
196 |
)
|
197 |
-
return pitch_model, pitch_qt, pitch_task_fn, invert_pitch_fn
|
198 |
|
199 |
@debug_profile
|
200 |
def container_generate(model_selection, task_selection, audio, singer_id, t0):
|
201 |
global pitch_model, pitch_qt, pitch_task_fn, invert_pitch_fn, model_loaded
|
202 |
# load pitch model
|
203 |
if model_loaded is None or model_loaded != model_selection:
|
204 |
-
pitch_model, pitch_qt, pitch_task_fn, invert_pitch_fn = load_pitch_model(model_selection)
|
205 |
model_loaded = model_selection
|
206 |
else:
|
207 |
logging.log(logging.INFO, f'using existing model: {model_selection}')
|
|
|
208 |
# extract pitch from input
|
209 |
if audio is None:
|
210 |
return None, None
|
@@ -247,9 +248,9 @@ def container_generate(model_selection, task_selection, audio, singer_id, t0):
|
|
247 |
elif singer_id == 'Singer 2':
|
248 |
singer = [27]
|
249 |
if task_selection == 'Call and Response':
|
250 |
-
partial_generate = partial(generate, num_samples=1, num_steps=100, singers=singer, outfolder=None, pitch_qt=pitch_qt, type='response', invert_pitch_fn=invert_pitch_fn, model_type=
|
251 |
else:
|
252 |
-
partial_generate = partial(generate, num_samples=1, num_steps=100, singers=singer, outfolder=None, pitch_qt=pitch_qt, type='reinterp', invert_pitch_fn=invert_pitch_fn, t0=t0, model_type=
|
253 |
audio, output_plot = partial_generate(f0)
|
254 |
return audio, user_input_plot, output_plot
|
255 |
|
@@ -270,8 +271,8 @@ def toggle_visibility(selection):
|
|
270 |
return gr.update(visible=False)
|
271 |
|
272 |
def toggle_options(selection, options = ['Call and Response', 'Melodic Reinterpretation']):
|
273 |
-
|
274 |
-
if selection == "
|
275 |
return gr.update(choices=options)
|
276 |
else:
|
277 |
return gr.update(choices=options[:-1])
|
@@ -308,7 +309,7 @@ with gr.Blocks(css=css) as demo:
|
|
308 |
""")
|
309 |
model_dropdown = gr.Dropdown(["Diffusion Pitch Generator", "Autoregressive Pitch Generator"], label="Select a model type")
|
310 |
task_dropdown = gr.Dropdown(label="Select a task", choices=["Call and Response", "Melodic Reinterpretation"])
|
311 |
-
model_dropdown.change(toggle_options, outputs=task_dropdown)
|
312 |
t0 = gr.Slider(label="Faithfulness to the input (For melodic reinterpretation task only)", minimum=0.0, maximum=1.0, step=0.01, value=0.3, visible=False)
|
313 |
task_dropdown.change(toggle_visibility, inputs=task_dropdown, outputs=t0)
|
314 |
singer_dropdown = gr.Dropdown(label="Select a singer", choices=["Singer 1", "Singer 2"])
|
|
|
194 |
qt_path = os.path.join(pitch_path, 'qt.joblib') if model_type == 'diffusion' else None, \
|
195 |
device = 'cpu'
|
196 |
)
|
197 |
+
return pitch_model, pitch_qt, pitch_task_fn, invert_pitch_fn, model_type
|
198 |
|
199 |
@debug_profile
|
200 |
def container_generate(model_selection, task_selection, audio, singer_id, t0):
|
201 |
global pitch_model, pitch_qt, pitch_task_fn, invert_pitch_fn, model_loaded
|
202 |
# load pitch model
|
203 |
if model_loaded is None or model_loaded != model_selection:
|
204 |
+
pitch_model, pitch_qt, pitch_task_fn, invert_pitch_fn, model_type = load_pitch_model(model_selection)
|
205 |
model_loaded = model_selection
|
206 |
else:
|
207 |
logging.log(logging.INFO, f'using existing model: {model_selection}')
|
208 |
+
model_type = pitch_paths[model_selection][0]
|
209 |
# extract pitch from input
|
210 |
if audio is None:
|
211 |
return None, None
|
|
|
248 |
elif singer_id == 'Singer 2':
|
249 |
singer = [27]
|
250 |
if task_selection == 'Call and Response':
|
251 |
+
partial_generate = partial(generate, num_samples=1, num_steps=100, singers=singer, outfolder=None, pitch_qt=pitch_qt, type='response', invert_pitch_fn=invert_pitch_fn, model_type=model_type)
|
252 |
else:
|
253 |
+
partial_generate = partial(generate, num_samples=1, num_steps=100, singers=singer, outfolder=None, pitch_qt=pitch_qt, type='reinterp', invert_pitch_fn=invert_pitch_fn, t0=t0, model_type=model_type)
|
254 |
audio, output_plot = partial_generate(f0)
|
255 |
return audio, user_input_plot, output_plot
|
256 |
|
|
|
271 |
return gr.update(visible=False)
|
272 |
|
273 |
def toggle_options(selection, options = ['Call and Response', 'Melodic Reinterpretation']):
|
274 |
+
|
275 |
+
if selection == "Diffusion Pitch Generator":
|
276 |
return gr.update(choices=options)
|
277 |
else:
|
278 |
return gr.update(choices=options[:-1])
|
|
|
309 |
""")
|
310 |
model_dropdown = gr.Dropdown(["Diffusion Pitch Generator", "Autoregressive Pitch Generator"], label="Select a model type")
|
311 |
task_dropdown = gr.Dropdown(label="Select a task", choices=["Call and Response", "Melodic Reinterpretation"])
|
312 |
+
model_dropdown.change(toggle_options, inputs=model_dropdown, outputs=task_dropdown)
|
313 |
t0 = gr.Slider(label="Faithfulness to the input (For melodic reinterpretation task only)", minimum=0.0, maximum=1.0, step=0.01, value=0.3, visible=False)
|
314 |
task_dropdown.change(toggle_visibility, inputs=task_dropdown, outputs=t0)
|
315 |
singer_dropdown = gr.Dropdown(label="Select a singer", choices=["Singer 1", "Singer 2"])
|