Spaces:
Runtime error
Runtime error
UI update
Browse files
app.py
CHANGED
@@ -48,6 +48,7 @@ model_repos = {"e4e": ("akhaliq/JoJoGAN_e4e_ffhq_encode", "e4e_ffhq_encode.pt"),
|
|
48 |
"shrek": ("rinong/stylegan-nada-models", "shrek.pt"),
|
49 |
"thanos": ("rinong/stylegan-nada-models", "thanos.pt"),
|
50 |
"ukiyoe": ("rinong/stylegan-nada-models", "ukiyoe.pt"),
|
|
|
51 |
"witcher": ("rinong/stylegan-nada-models", "witcher.pt"),
|
52 |
"grafitti_on_wall": ("rinong/stylegan-nada-models", "grafitti_on_wall.pt"),
|
53 |
"modernism": ("rinong/stylegan-nada-models", "modernism.pt"),
|
@@ -321,7 +322,22 @@ with blocks:
|
|
321 |
gr.Markdown("<h4 style='font-size: 110%;margin-top:.5em'>A note on social impact</h4><div>This model relies on StyleGAN and CLIP, both of which are prone to biases inherited from their training data and their architecture. These may include (but are not limited to) poor representation of minorities or the perpetution of societal biases, such as gender norms. In particular, StyleGAN editing may induce undesired changes in skin tones. Moreover, generative models can, and have been used to create deep fake imagery which may assist in the spread of propaganda. However, <a href='https://github.com/NVlabs/stylegan3-detector' target='_blank'>tools are available</a> for identifying StyleGAN generated imagery, and any 'realistic' results produced by this model should be easily identifiable through such tools.</div>")
|
322 |
|
323 |
with gr.Row():
|
324 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
325 |
|
326 |
with gr.Column():
|
327 |
style_choice = gr.inputs.CheckboxGroup(choices=editor.get_style_list(), type="value", label="Choose your styles!")
|
@@ -357,28 +373,6 @@ with blocks:
|
|
357 |
|
358 |
sc_edit_choices = [src_text_styleclip, tar_text_styleclip, alpha_styleclip, beta_styleclip]
|
359 |
|
360 |
-
with gr.Tabs():
|
361 |
-
with gr.TabItem("Edit Images"):
|
362 |
-
with gr.Row():
|
363 |
-
with gr.Column():
|
364 |
-
with gr.Row():
|
365 |
-
img_button = gr.Button("Edit Image")
|
366 |
-
with gr.Column():
|
367 |
-
img_output = gr.Gallery(label="Output Images")
|
368 |
-
|
369 |
-
with gr.TabItem("Create Video"):
|
370 |
-
with gr.Row():
|
371 |
-
with gr.Column():
|
372 |
-
with gr.Row():
|
373 |
-
vid_button = gr.Button("Generate Video")
|
374 |
-
loop_styles = gr.inputs.Checkbox(default=True, label="Loop video back to the initial style?")
|
375 |
-
with gr.Row():
|
376 |
-
with gr.Column():
|
377 |
-
gr.Markdown("Warning: Videos generation requires the synthesis of hundreds of frames and is expected to take several minutes.")
|
378 |
-
gr.Markdown("To reduce queue times, we significantly reduced the number of video frames. Using more than 3 styles will further reduce the frames per style, leading to quicker transitions. For better control, we recommend cloning the gradio app, adjusting <b>num_alphas</b> in <b>generate_videos.py</b>, and running the code locally.")
|
379 |
-
with gr.Column():
|
380 |
-
vid_output = gr.outputs.Video(label="Output Video")
|
381 |
-
|
382 |
edit_inputs = [editing_type_choice] + ig_edit_choices + sc_edit_choices
|
383 |
img_button.click(fn=editor.edit_image, inputs=edit_inputs + [input_img, style_choice], outputs=img_output)
|
384 |
vid_button.click(fn=editor.edit_video, inputs=edit_inputs + [input_img, style_choice, loop_styles], outputs=vid_output)
|
|
|
48 |
"shrek": ("rinong/stylegan-nada-models", "shrek.pt"),
|
49 |
"thanos": ("rinong/stylegan-nada-models", "thanos.pt"),
|
50 |
"ukiyoe": ("rinong/stylegan-nada-models", "ukiyoe.pt"),
|
51 |
+
"groot": ("rinong/stylegan-nada-models", "groot.pt"),
|
52 |
"witcher": ("rinong/stylegan-nada-models", "witcher.pt"),
|
53 |
"grafitti_on_wall": ("rinong/stylegan-nada-models", "grafitti_on_wall.pt"),
|
54 |
"modernism": ("rinong/stylegan-nada-models", "modernism.pt"),
|
|
|
322 |
gr.Markdown("<h4 style='font-size: 110%;margin-top:.5em'>A note on social impact</h4><div>This model relies on StyleGAN and CLIP, both of which are prone to biases inherited from their training data and their architecture. These may include (but are not limited to) poor representation of minorities or the perpetution of societal biases, such as gender norms. In particular, StyleGAN editing may induce undesired changes in skin tones. Moreover, generative models can, and have been used to create deep fake imagery which may assist in the spread of propaganda. However, <a href='https://github.com/NVlabs/stylegan3-detector' target='_blank'>tools are available</a> for identifying StyleGAN generated imagery, and any 'realistic' results produced by this model should be easily identifiable through such tools.</div>")
|
323 |
|
324 |
with gr.Row():
|
325 |
+
with gr.Column():
|
326 |
+
input_img = gr.inputs.Image(type="filepath", label="Input image")
|
327 |
+
|
328 |
+
with gr.Tabs():
|
329 |
+
with gr.TabItem("Edit Images"):
|
330 |
+
img_button = gr.Button("Edit Image")
|
331 |
+
img_output = gr.Gallery(label="Output Images")
|
332 |
+
|
333 |
+
with gr.TabItem("Create Video"):
|
334 |
+
with gr.Row():
|
335 |
+
vid_button = gr.Button("Generate Video")
|
336 |
+
loop_styles = gr.inputs.Checkbox(default=True, label="Loop video back to the initial style?")
|
337 |
+
with gr.Row():
|
338 |
+
gr.Markdown("Warning: Videos generation requires the synthesis of hundreds of frames and is expected to take several minutes.")
|
339 |
+
gr.Markdown("To reduce queue times, we significantly reduced the number of video frames. Using more than 3 styles will further reduce the frames per style, leading to quicker transitions. For better control, we recommend cloning the gradio app, adjusting <b>num_alphas</b> in <b>generate_videos.py</b>, and running the code locally.")
|
340 |
+
vid_output = gr.outputs.Video(label="Output Video")
|
341 |
|
342 |
with gr.Column():
|
343 |
style_choice = gr.inputs.CheckboxGroup(choices=editor.get_style_list(), type="value", label="Choose your styles!")
|
|
|
373 |
|
374 |
sc_edit_choices = [src_text_styleclip, tar_text_styleclip, alpha_styleclip, beta_styleclip]
|
375 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
376 |
edit_inputs = [editing_type_choice] + ig_edit_choices + sc_edit_choices
|
377 |
img_button.click(fn=editor.edit_image, inputs=edit_inputs + [input_img, style_choice], outputs=img_output)
|
378 |
vid_button.click(fn=editor.edit_video, inputs=edit_inputs + [input_img, style_choice, loop_styles], outputs=vid_output)
|