Spaces:
Sleeping
Sleeping
adaface-neurips
commited on
Commit
·
5948db2
1
Parent(s):
3736ac5
put model to cpu first, then gpu later
Browse files
app.py
CHANGED
@@ -54,7 +54,7 @@ adaface = AdaFaceWrapper(pipeline_name="text2img", base_model_path=base_model_pa
|
|
54 |
adaface_encoder_cfg_scales=args.adaface_encoder_cfg_scales,
|
55 |
enabled_encoders=args.enabled_encoders,
|
56 |
unet_types=None, extra_unet_dirpaths=args.extra_unet_dirpaths,
|
57 |
-
unet_weights=args.unet_weights, device=
|
58 |
|
59 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
60 |
if randomize_seed:
|
@@ -80,6 +80,8 @@ def generate_image(image_paths, guidance_scale, do_neg_id_prompt_weight, perturb
|
|
80 |
|
81 |
global adaface
|
82 |
|
|
|
|
|
83 |
if image_paths is None or len(image_paths) == 0:
|
84 |
raise gr.Error(f"Cannot find any input face image! Please upload a face image.")
|
85 |
|
@@ -127,7 +129,7 @@ def check_prompt_and_model_type(prompt, model_style_type):
|
|
127 |
if model_style_type != args.model_style_type:
|
128 |
adaface = AdaFaceWrapper(pipeline_name="text2img", base_model_path=base_model_path,
|
129 |
adaface_encoder_types=args.adaface_encoder_types,
|
130 |
-
adaface_ckpt_paths=args.adaface_ckpt_path, device=
|
131 |
# Update base model type.
|
132 |
args.model_style_type = model_style_type
|
133 |
|
@@ -142,6 +144,10 @@ title = r"""
|
|
142 |
description = r"""
|
143 |
<b>Official demo</b> for our working paper <b>AdaFace: A Versatile Face Encoder for Zero-Shot Diffusion Model Personalization</b>.<br>
|
144 |
|
|
|
|
|
|
|
|
|
145 |
❗️**Tips**❗️
|
146 |
1. Upload one or more images of a person. If multiple faces are detected, we use the largest one.
|
147 |
2. Check "Enhance Face" to highlight fine facial features.
|
|
|
54 |
adaface_encoder_cfg_scales=args.adaface_encoder_cfg_scales,
|
55 |
enabled_encoders=args.enabled_encoders,
|
56 |
unet_types=None, extra_unet_dirpaths=args.extra_unet_dirpaths,
|
57 |
+
unet_weights=args.unet_weights, device='cpu')
|
58 |
|
59 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
60 |
if randomize_seed:
|
|
|
80 |
|
81 |
global adaface
|
82 |
|
83 |
+
adaface.to(device)
|
84 |
+
|
85 |
if image_paths is None or len(image_paths) == 0:
|
86 |
raise gr.Error(f"Cannot find any input face image! Please upload a face image.")
|
87 |
|
|
|
129 |
if model_style_type != args.model_style_type:
|
130 |
adaface = AdaFaceWrapper(pipeline_name="text2img", base_model_path=base_model_path,
|
131 |
adaface_encoder_types=args.adaface_encoder_types,
|
132 |
+
adaface_ckpt_paths=args.adaface_ckpt_path, device='cpu')
|
133 |
# Update base model type.
|
134 |
args.model_style_type = model_style_type
|
135 |
|
|
|
144 |
description = r"""
|
145 |
<b>Official demo</b> for our working paper <b>AdaFace: A Versatile Face Encoder for Zero-Shot Diffusion Model Personalization</b>.<br>
|
146 |
|
147 |
+
❗️**What's New**❗️
|
148 |
+
- Support switching between two model styles: **Realistic** and **Anime**.
|
149 |
+
- If you just changed the model style, the first image/video generation will take extra 20~30 seconds for loading new model weight.
|
150 |
+
|
151 |
❗️**Tips**❗️
|
152 |
1. Upload one or more images of a person. If multiple faces are detected, we use the largest one.
|
153 |
2. Check "Enhance Face" to highlight fine facial features.
|