|
import os, json, re, sys, subprocess, gc, tqdm, math, time, random, threading, spaces, torch |
|
import numpy as np |
|
import gradio as gr |
|
from PIL import Image, ImageOps |
|
from moviepy import VideoFileClip |
|
from datetime import datetime, timedelta |
|
from huggingface_hub import hf_hub_download, snapshot_download, login |
|
HF_TOKEN=os.environ.get('HF_TOKEN') |
|
login(token=HF_TOKEN) |
|
import insightface |
|
from insightface.app import FaceAnalysis |
|
from facexlib.parsing import init_parsing_model |
|
from facexlib.utils.face_restoration_helper import FaceRestoreHelper |
|
from diffusers import CogVideoXDPMScheduler |
|
from diffusers.utils import load_image |
|
from diffusers.image_processor import VaeImageProcessor |
|
from diffusers.training_utils import free_memory |
|
from util.utils import * |
|
from util.rife_model import load_rife_model, rife_inference_with_latents |
|
from models.utils import process_face_embeddings |
|
from models.transformer_consisid import ConsisIDTransformer3DModel |
|
from models.pipeline_consisid import ConsisIDPipeline |
|
from models.eva_clip import create_model_and_transforms |
|
from models.eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD |
|
from models.eva_clip.utils_qformer import resize_numpy_image_long |
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" |
|
os.environ["ZERO_GPU_PATCH_TORCH_DEVICE"] = "True" |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
from accelerate import Accelerator |
|
accelerator=Accelerator() |
|
hf_hub_download(repo_id="ai-forever/Real-ESRGAN", filename="RealESRGAN_x4.pth", local_dir="model_real_esran") |
|
snapshot_download(repo_id="AlexWortega/RIFE", local_dir="model_rife") |
|
snapshot_download(repo_id="BestWishYsh/ConsisID-preview", local_dir="BestWishYsh/ConsisID-preview") |
|
|
|
model_path = "BestWishYsh/ConsisID-preview" |
|
lora_path = None |
|
lora_rank = 128 |
|
dtype = torch.bfloat16 |
|
|
|
if os.path.exists(os.path.join(model_path, "transformer_ema")): |
|
subfolder = "transformer_ema" |
|
else: |
|
subfolder = "transformer" |
|
|
|
transformer = ConsisIDTransformer3DModel.from_pretrained_cus(model_path, subfolder=subfolder) |
|
scheduler = CogVideoXDPMScheduler.from_pretrained(model_path, subfolder="scheduler") |
|
|
|
try: |
|
is_kps = transformer.config.is_kps |
|
except: |
|
is_kps = False |
|
|
|
|
|
face_helper = FaceRestoreHelper( |
|
upscale_factor=1, |
|
face_size=512, |
|
crop_ratio=(1, 1), |
|
det_model='retinaface_resnet50', |
|
save_ext='png', |
|
device=device, |
|
model_rootpath=os.path.join(model_path, "face_encoder") |
|
) |
|
face_helper.face_parse = None |
|
face_helper.face_parse = init_parsing_model(model_name='bisenet', device=device, model_rootpath=os.path.join(model_path, "face_encoder")) |
|
face_helper.face_det.eval() |
|
face_helper.face_parse.eval() |
|
|
|
model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', os.path.join(model_path, "face_encoder", "EVA02_CLIP_L_336_psz14_s6B.pt"), force_custom_clip=True) |
|
face_clip_model = model.visual |
|
face_clip_model.eval() |
|
|
|
eva_transform_mean = getattr(face_clip_model, 'image_mean', OPENAI_DATASET_MEAN) |
|
eva_transform_std = getattr(face_clip_model, 'image_std', OPENAI_DATASET_STD) |
|
if not isinstance(eva_transform_mean, (list, tuple)): |
|
eva_transform_mean = (eva_transform_mean,) * 3 |
|
if not isinstance(eva_transform_std, (list, tuple)): |
|
eva_transform_std = (eva_transform_std,) * 3 |
|
eva_transform_mean = eva_transform_mean |
|
eva_transform_std = eva_transform_std |
|
|
|
face_main_model = FaceAnalysis(name='antelopev2', root=os.path.join(model_path, "face_encoder"), providers=['CUDAExecutionProvider']) |
|
handler_ante = insightface.model_zoo.get_model(f'{model_path}/face_encoder/models/antelopev2/glintr100.onnx', providers=['CUDAExecutionProvider']) |
|
face_main_model.prepare(ctx_id=0, det_size=(640, 640)) |
|
handler_ante.prepare(ctx_id=0) |
|
|
|
face_clip_model.to(device, dtype=dtype) |
|
face_helper.face_det.to(device) |
|
face_helper.face_parse.to(device) |
|
transformer.to(device, dtype=dtype) |
|
|
|
pipe = accelerator.prepare(ConsisIDPipeline.from_pretrained(model_path, transformer=transformer, scheduler=scheduler, torch_dtype=dtype)) |
|
|
|
if lora_path: |
|
pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors", adapter_name="test_1") |
|
pipe.fuse_lora(lora_scale=1 / lora_rank) |
|
|
|
scheduler_args = {} |
|
if "variance_type" in pipe.scheduler.config: |
|
variance_type = pipe.scheduler.config.variance_type |
|
if variance_type in ["learned", "learned_range"]: |
|
variance_type = "fixed_small" |
|
scheduler_args["variance_type"] = variance_type |
|
|
|
pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, **scheduler_args) |
|
pipe.to(device) |
|
|
|
os.makedirs("./output", exist_ok=True) |
|
os.makedirs("./gradio_tmp", exist_ok=True) |
|
|
|
upscale_model = load_sd_upscale("model_real_esran/RealESRGAN_x4.pth", device) |
|
frame_interpolation_model = load_rife_model("model_rife") |
|
|
|
def convert_to_gif(video_path): |
|
clip = VideoFileClip(video_path) |
|
gif_path = video_path.replace(".mp4", ".gif") |
|
clip.write_gif(gif_path, fps=8) |
|
return gif_path |
|
|
|
@spaces.GPU(duration=180) |
|
def plex(prompt,image_input,stips,gscale,seed_value,scale_status,rife_status,progress=gr.Progress(track_tqdm=True)): |
|
seed = seed_value |
|
if seed == -1: |
|
seed = random.randint(0, np.iinfo(np.int32).max) |
|
id_image = np.array(ImageOps.exif_transpose(Image.fromarray(image_input)).convert("RGB")) |
|
id_image = resize_numpy_image_long(id_image, 1024) |
|
id_cond, id_vit_hidden, align_crop_face_image, face_kps = process_face_embeddings(face_helper, face_clip_model, handler_ante, eva_transform_mean, eva_transform_std, face_main_model, device, dtype, id_image, original_id_image=id_image, is_align_face=True, cal_uncond=False) |
|
if is_kps: |
|
kps_cond = face_kps |
|
else: |
|
kps_cond = None |
|
tensor = align_crop_face_image.cpu().detach() |
|
tensor = tensor.squeeze() |
|
tensor = tensor.permute(1, 2, 0) |
|
tensor = tensor.numpy() * 255 |
|
tensor = tensor.astype(np.uint8) |
|
image = ImageOps.exif_transpose(Image.fromarray(tensor)) |
|
prompt = prompt.strip('"') |
|
generator = torch.Generator(device).manual_seed(seed) if seed else None |
|
video_pt = pipe(prompt=prompt,image=image,num_videos_per_prompt=1,num_inference_steps=stips,num_frames=49,use_dynamic_cfg=False,guidance_scale=gscale,generator=generator,id_vit_hidden=id_vit_hidden,id_cond=id_cond,kps_cond=kps_cond,output_type="pt",) |
|
latents = video_pt.frames |
|
|
|
if scale_status: |
|
latents = upscale_batch_and_concatenate(upscale_model, latents, device) |
|
if rife_status: |
|
latents = rife_inference_with_latents(frame_interpolation_model, latents) |
|
|
|
batch_size = latents.shape[0] |
|
batch_video_frames = [] |
|
for batch_idx in range(batch_size): |
|
pt_image = latents[batch_idx] |
|
pt_image = torch.stack([pt_image[i] for i in range(pt_image.shape[0])]) |
|
|
|
image_np = VaeImageProcessor.pt_to_numpy(pt_image) |
|
image_pil = VaeImageProcessor.numpy_to_pil(image_np) |
|
batch_video_frames.append(image_pil) |
|
|
|
video_path = save_video(batch_video_frames[0], fps=math.ceil((len(batch_video_frames[0]) - 1) / 6)) |
|
video_update = gr.update(visible=True, value=video_path) |
|
gif_path = convert_to_gif(video_path) |
|
gif_update = gr.update(visible=True, value=gif_path) |
|
seed_update = gr.update(visible=True, value=seed) |
|
gc.collect() |
|
return video_path, video_update, gif_update, seed_update |
|
|
|
examples_images = [ |
|
["asserts/example_images/1.png", "A woman adorned with a delicate flower crown, is standing amidst a field of gently swaying wildflowers. Her eyes sparkle with a serene gaze, and a faint smile graces her lips, suggesting a moment of peaceful contentment. The shot is framed from the waist up, highlighting the gentle breeze lightly tousling her hair. The background reveals an expansive meadow under a bright blue sky, capturing the tranquility of a sunny afternoon."], |
|
["asserts/example_images/2.png", "The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel."], |
|
["asserts/example_images/3.png", "The video depicts a man sitting at an office desk, engaged in his work. He is dressed in a formal suit and appears to be focused on his computer screen. The office environment is well-organized, with shelves filled with binders and other office supplies neatly arranged. The man is holding a red cup, possibly containing a beverage, which he drinks from before setting it down on the desk. He then proceeds to type on the keyboard, indicating that he is working on something on his computer. The overall atmosphere of the video suggests a professional setting where the man is diligently working on his tasks."] |
|
] |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown(""" |
|
<div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;"> |
|
🤗ConsisID Space🤗 |
|
</div> |
|
<div style="text-align: center;"> |
|
<a href="https://huggingface.co/BestWishYsh/ConsisID">🤗 Model Hub</a> | |
|
<a href="https://huggingface.co/datasets/BestWishYsh/ConsisID-preview-Data">📚 Dataset</a> | |
|
<a href="https://github.com/PKU-YuanGroup/ConsisID">🌐 Github</a> | |
|
<a href="https://pku-yuangroup.github.io/ConsisID">📝 Page</a> | |
|
<a href="https://arxiv.org/pdf/2408.06072">📜 arxiv </a> |
|
</div> |
|
<div style="text-align: center;display: flex;justify-content: center;align-items: center;margin-top: 1em;margin-bottom: .5em;"> |
|
<span>This modified space uses less steps as a tradeoff for speed over quality, duplicate it to use privately</span> |
|
<a href="https://huggingface.co/spaces/BestWishYsh/ConsisID-Space?duplicate=true"><img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg.svg" width="160" style=" |
|
margin-left: .75em; |
|
"></a> |
|
</div> |
|
<div style="text-align: center; font-size: 15px; font-weight: bold; color: red; margin-bottom: 20px;"> |
|
⚠️ This demo is for academic research and experiential use only.⚠️ |
|
</div> |
|
""") |
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Accordion("IPT2V: Face Input", open=True): |
|
image_input = gr.Image(label="Input Image (should contain clear face)") |
|
prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=3) |
|
with gr.Accordion("Advanced", open=False): |
|
with gr.Group(): |
|
with gr.Column(): |
|
with gr.Row(): |
|
stips = gr.Slider(label="Steps", minimum=6, step=1, maximum=10, value=10) |
|
gscale = gr.Slider(label="Guidance scale", minimum=1, step=0.1, maximum=20, value=7.0) |
|
seed_param = gr.Slider(label="Inference Seed (Leave -1 for random)", minimum=0, step=32, maximum=np.iinfo(np.int32).max, value=-1) |
|
with gr.Row(): |
|
enable_scale = gr.Checkbox(label="Super-Resolution (720 × 480 -> 2880 × 1920) Real-ESRGAN", value=False, interactive=False) |
|
enable_rife = gr.Checkbox(label="Frame Interpolation (8fps -> 16fps) RIFE", value=True) |
|
|
|
generate_button = gr.Button("🎬 Generate Video") |
|
|
|
with gr.Column(): |
|
video_output = gr.Video(label="ConsisID Generate Video",) |
|
with gr.Row(): |
|
download_video_button = gr.File(label="📥 Download Video", visible=False) |
|
download_gif_button = gr.File(label="📥 Download GIF", visible=False) |
|
seed_text = gr.Number(label="Seed Used for Video Generation", visible=False) |
|
with gr.Accordion("Examples", open=False): |
|
examples_component_images = gr.Examples( |
|
examples_images, |
|
inputs=[image_input, prompt], |
|
cache_examples=False, |
|
) |
|
generate_button.click( |
|
fn=plex, |
|
inputs=[prompt, image_input, stips, gscale, seed_param, enable_scale, enable_rife], |
|
outputs=[video_output, download_video_button, download_gif_button, seed_text], |
|
) |
|
|
|
demo.queue(max_size=15, api_open=False) |
|
demo.launch(debug=True,inline=False,show_api=False,share=False) |