Spaces:
Runtime error
Runtime error
Changed to blocks API
Browse files
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🌖
|
|
4 |
colorFrom: blue
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.0b2
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
app.py
CHANGED
@@ -35,7 +35,21 @@ model_repos = {"e4e": ("akhaliq/JoJoGAN_e4e_ffhq_encode", "e4e_ffhq_encode.pt"),
|
|
35 |
"dlib": ("akhaliq/jojogan_dlib", "shape_predictor_68_face_landmarks.dat"),
|
36 |
"base": ("akhaliq/jojogan-stylegan2-ffhq-config-f", "stylegan2-ffhq-config-f.pt"),
|
37 |
"anime": ("rinong/stylegan-nada-models", "anime.pt"),
|
38 |
-
"joker": ("rinong/stylegan-nada-models", "joker.pt")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
}
|
40 |
|
41 |
def get_models():
|
@@ -113,22 +127,13 @@ class ImageEditor(object):
|
|
113 |
def predict(
|
114 |
self,
|
115 |
input, # Input image path
|
116 |
-
|
117 |
-
style_list, # Comma seperated list of models to use. Only accepts models from the output_style list
|
118 |
generate_video, # Generate a video instead of an output image
|
119 |
with_editing, # Apply latent space editing to the generated video
|
120 |
video_format # Choose gif to display in browser, mp4 for higher-quality downloadable video
|
121 |
):
|
122 |
|
123 |
-
|
124 |
-
styles = self.model_list
|
125 |
-
elif output_style == 'list - enter below':
|
126 |
-
styles = style_list.split(",")
|
127 |
-
for style in styles:
|
128 |
-
if style not in self.model_list:
|
129 |
-
raise ValueError(f"Encountered style '{style}' in the style_list which is not an available option.")
|
130 |
-
else:
|
131 |
-
styles = [output_style]
|
132 |
|
133 |
# @title Align image
|
134 |
input_image = self.run_alignment(str(input))
|
@@ -200,16 +205,46 @@ class ImageEditor(object):
|
|
200 |
|
201 |
editor = ImageEditor()
|
202 |
|
203 |
-
|
204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
|
206 |
-
|
|
|
207 |
|
208 |
-
|
209 |
-
|
210 |
-
gr.inputs.Textbox(lines=1, placeholder=None, default="joker,anime,modigliani", label="Style List", optional=True),
|
211 |
-
gr.inputs.Checkbox(default=False, label="Generate Video?", optional=False),
|
212 |
-
gr.inputs.Checkbox(default=False, label="With Editing?", optional=False),
|
213 |
-
gr.inputs.Radio(choices=["gif", "mp4"], type="value", default='mp4', label="Video Format")],
|
214 |
-
gr.outputs.Image(type="file"), title=title, description=description, article=article, allow_flagging=False, allow_screenshot=False).launch(enable_queue=True)
|
215 |
|
|
|
|
35 |
"dlib": ("akhaliq/jojogan_dlib", "shape_predictor_68_face_landmarks.dat"),
|
36 |
"base": ("akhaliq/jojogan-stylegan2-ffhq-config-f", "stylegan2-ffhq-config-f.pt"),
|
37 |
"anime": ("rinong/stylegan-nada-models", "anime.pt"),
|
38 |
+
"joker": ("rinong/stylegan-nada-models", "joker.pt"),
|
39 |
+
"simpson": ("rinong/stylegan-nada-models", "simpson.pt"),
|
40 |
+
"ssj": ("rinong/stylegan-nada-models", "ssj.pt"),
|
41 |
+
"white_walker": ("rinong/stylegan-nada-models", "white_walker.pt"),
|
42 |
+
"zuckerberg": ("rinong/stylegan-nada-models", "zuckerberg.pt"),
|
43 |
+
"cubism": ("rinong/stylegan-nada-models", "cubism.pt"),
|
44 |
+
"disney_princess": ("rinong/stylegan-nada-models", "disney_princess.pt"),
|
45 |
+
"edvard_munch": ("rinong/stylegan-nada-models", "edvard_munch.pt"),
|
46 |
+
"van_gogh": ("rinong/stylegan-nada-models", "van_gogh.pt"),
|
47 |
+
"oil": ("rinong/stylegan-nada-models", "oil.pt"),
|
48 |
+
"rick_morty": ("rinong/stylegan-nada-models", "rick_morty.pt"),
|
49 |
+
"botero": ("rinong/stylegan-nada-models", "botero.pt"),
|
50 |
+
"crochet": ("rinong/stylegan-nada-models", "crochet.pt"),
|
51 |
+
"modigliani": ("rinong/stylegan-nada-models", "modigliani.pt"),
|
52 |
+
"shrek": ("rinong/stylegan-nada-models", "shrek.pt"),
|
53 |
}
|
54 |
|
55 |
def get_models():
|
|
|
127 |
def predict(
|
128 |
self,
|
129 |
input, # Input image path
|
130 |
+
output_styles, # Which output style do you want to use?
|
|
|
131 |
generate_video, # Generate a video instead of an output image
|
132 |
with_editing, # Apply latent space editing to the generated video
|
133 |
video_format # Choose gif to display in browser, mp4 for higher-quality downloadable video
|
134 |
):
|
135 |
|
136 |
+
styles = output_styles
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
|
138 |
# @title Align image
|
139 |
input_image = self.run_alignment(str(input))
|
|
|
205 |
|
206 |
editor = ImageEditor()
|
207 |
|
208 |
+
def change_component_visibility(visible, components, assign_flipped = False):
|
209 |
+
if assign_flipped:
|
210 |
+
return [component.update(visible=not visible) for component in components]
|
211 |
+
return [component.update(visible=visible) for component in components]
|
212 |
+
|
213 |
+
blocks = gr.Blocks()
|
214 |
+
|
215 |
+
with blocks:
|
216 |
+
gr.Markdown("<h1><center>StyleGAN-NADA</center></h1>")
|
217 |
+
gr.Markdown(
|
218 |
+
"Demo for StyleGAN-NADA: CLIP-Guided Domain Adaptation of Image Generators (SIGGRAPH 2022)."
|
219 |
+
)
|
220 |
+
gr.Markdown(
|
221 |
+
"For more information about the paper and code for training your own models (with examples OR text), see below."
|
222 |
+
)
|
223 |
+
|
224 |
+
with gr.Column():
|
225 |
+
input_img = gr.inputs.Image(type="filepath", label="Input image")
|
226 |
+
style_choice = gr.inputs.CheckboxGroup(choices=editor.get_style_list(), type="value", label="Choose your styles!")
|
227 |
+
|
228 |
+
with gr.Row():
|
229 |
+
video_choice = gr.inputs.Checkbox(default=False, label="Generate Video?", optional=False)
|
230 |
+
|
231 |
+
edit_choice = gr.inputs.Checkbox(default=False, label="With Editing?", optional=False, visible=False)
|
232 |
+
vid_format_choice = gr.inputs.Radio(choices=["gif", "mp4"], type="value", default='mp4', label="Video Format", visible=False)
|
233 |
+
with gr.Row():
|
234 |
+
img_button = gr.Button("Edit Image")
|
235 |
+
vid_button = gr.Button("Edit Image")
|
236 |
+
|
237 |
+
with gr.Column():
|
238 |
+
img_output = gr.outputs.Image(type="file")
|
239 |
+
vid_output = gr.outputs.Video(visible=False)
|
240 |
+
|
241 |
+
video_choice.change(fn=change_component_visibility, inputs=[video_choice, [edit_choice, vid_format_choice, vid_output, vid_button]], outputs=[edit_choice, vid_format_choice, vid_output, vid_button])
|
242 |
+
video_choice.change(fn=change_component_visibility, inputs=[video_choice, [img_output, img_button], True], outputs=[img_output, img_button])
|
243 |
|
244 |
+
img_button.click(fn=editor.predict, inputs=[input_img, style_choice, video_choice, edit_choice, vid_format_choice], outputs=img_output)
|
245 |
+
vid_button.click(fn=editor.predict, inputs=[input_img, style_choice, video_choice, edit_choice, vid_format_choice], outputs=vid_output)
|
246 |
|
247 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2108.00946' target='_blank'>StyleGAN-NADA: CLIP-Guided Domain Adaptation of Image Generators</a> | <a href='https://stylegan-nada.github.io/' target='_blank'>Project Page</a> | <a href='https://github.com/rinongal/StyleGAN-nada' target='_blank'>Code</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=rinong_sgnada' alt='visitor badge'></center>"
|
248 |
+
gr.Markdown(article)
|
|
|
|
|
|
|
|
|
|
|
249 |
|
250 |
+
blocks.launch(enable_queue=True)
|