File size: 5,411 Bytes
da2362d
547bc6e
b877500
f8b1e61
da2362d
 
547bc6e
d8647a5
547bc6e
 
 
 
 
 
cecc2bb
b877500
cecc2bb
b877500
cecc2bb
 
33948f0
c21ae8a
 
 
33948f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe59321
33948f0
 
 
 
cecc2bb
099d06e
692f7c8
cb806f5
e223ebb
 
 
d92217b
e223ebb
68fca0e
7b352d4
cb806f5
e223ebb
 
 
 
 
 
8e1359f
d77c9a6
89623b1
d39e904
cb806f5
d39e904
 
14c1c9f
8e1359f
2dafefb
8e1359f
14c1c9f
8e1359f
 
 
 
4e2f588
 
8e1359f
 
 
 
14c1c9f
 
 
 
 
cb806f5
f8b1e61
8723e4f
f25b402
 
1dac678
93216ff
b183bcd
 
2dafefb
b183bcd
 
b877500
508b694
 
cb806f5
508b694
53cf322
508b694
b877500
8eef73c
508b694
 
53cf322
 
 
508b694
53cf322
cecc2bb
d62198c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import subprocess
import os
import gradio as gr
import spaces

subprocess.run(["git", "clone", "https://github.com/fat-ai/MuseV.git"])

os.chdir("./MuseV")
subprocess.run(["pip", "install", "-r", "requirements.txt"])
subprocess.run(["pip", "install", "--no-cache-dir", "-U", "openmim"])
subprocess.run(["mim", "install", "mmengine"])
subprocess.run(["mim", "install", "mmcv>=2.0.1"])
subprocess.run(["mim", "install", "mmdet>=3.1.0"])
subprocess.run(["mim", "install", "mmpose>=1.1.0"])
subprocess.run(["git", "clone", "--recursive", "https://github.com/fat-ai/MuseV.git"])
subprocess.run(["git", "clone", "https://huggingface.co/TMElyralab/MuseV", "./checkpoints"])

os.chdir("..")
command = "\"import sys; sys.path.append('./MuseV/MuseV'); sys.path.append('./MuseV/MuseV/MMCM'); sys.path.append('./MuseV/MuseV/diffusers/src'); sys.path.append('./MuseV/MuseV/controlnet_aux/src')\""
subprocess.run(["python","-c",command])

subprocess.run(["mv", "./MuseV/scripts/inference/text2video.py", "./MuseV/text2video.py"])
subprocess.run(["mv", "./MuseV/scripts/inference/video2video.py", "./MuseV/video2video.py"])

with open ("./MuseV/configs/model/motion_model.py","r+") as scrip:
    s = scrip.read()
    s = s.replace('/content/MuseV/checkpoints', "/home/user/app/MuseV/checkpoints")
    scrip.write(s)
    scrip.truncate()
    scrip.seek(0)
    
with open ("./MuseV/configs/model/ip_adapter.py","r+") as scrip:
    s = scrip.read()
    s = s.replace('/content/MuseV/checkpoints', "/home/user/app/MuseV/checkpoints")
    scrip.write(s)
    scrip.truncate()
    scrip.seek(0)

with open ("./MuseV/configs/model/T2I_all_model.py","r+") as scrip:
    s = scrip.read()
    s = s.replace('/content/MuseV/checkpoints', "/home/user/app/MuseV/checkpoints")
    scrip.write(s)
    scrip.truncate()
    scrip.seek(0)

from PIL import Image

def add_new_image(image):
    image = Image.fromarray(image)
    height = image.height
    width = image.width
    lr = 0.5
    ip_img = "${.condition_images}"
    image.save("/home/user/app/img.png")
    img_settings = f"""- condition_images: /home/user/app/img.png
  eye_blinks_factor: 1.8
  height: {height}
  img_length_ratio: {lr}
  ipadapter_image: {ip_img}
  name: image
  prompt: (masterpiece, best quality, highres:1),(1person, solo:1),(eye blinks:1.8),(head wave:1.3)
  refer_image: {ip_img}
  video_path: null
  width: {width}"""
    print(img_settings)
    with open ("/home/user/app/MuseV/configs/tasks/example.yaml","r+") as configs:
        configs.write(img_settings)
        configs.truncate()
        configs.seek(0)

def add_new_video(video):
    print(video)
    lr = 1.0
    ip_img = "${.condition_images}"
    img_settings = f"""- name: "dance2"
  prompt: "(best quality), ((masterpiece)), (highres), illustration, original, extremely detailed wallpaper"
  video_path:  ./MuseV/data/source_video/video1_girl_poseseq.mp4 
  condition_images: ./MuseV/data/images/cyber_girl.png
  refer_image: {ip_img}
  ipadapter_image: {ip_img}
  height: 960
  width: 512
  img_length_ratio: 1.0
  video_is_middle: True """
    print(img_settings)
    with open ("/home/user/app/MuseV/configs/tasks/example.yaml","r+") as configs:
        configs.write(img_settings)
        configs.truncate()
        configs.seek(0)
        
@spaces.GPU
def run(duration=240):
    #subprocess.run(["python", "./MuseV/text2video.py", "--sd_model_name", "majicmixRealv6Fp16", "--unet_model_name", "musev", "-test_data_path", "./MuseV/configs/tasks/example.yaml", "--n_batch", "1", "--target_datas", "image", "--vae_model_path", "./MuseV/checkpoints/vae/sd-vae-ft-mse", "--motion_speed", "5.0", "--time_size", "12", "--fps", "12"])
    subprocess.run(["python", "./MuseV/text2video.py", "--sd_model_name", "majicmixRealv6Fp16", "--unet_model_name", "musev_referencenet", "--referencenet_model_name", "musev_referencenet", "--ip_adapter_model_name", "musev_referencenet", "-test_data_path", "./MuseV/configs/tasks/example.yaml", "--output_dir", "./MuseV",  "--n_batch", "1",  "--target_datas", "image",  "--vision_clip_extractor_class_name", "ImageClipVisionFeatureExtractor", "--vision_clip_model_path", "./MuseV/checkpoints/IP-Adapter/models/image_encoder", "--motion_speed", "5.0", "--vae_model_path", "./MuseV/checkpoints/vae/sd-vae-ft-mse", "--time_size", "12", "--fps", "12"])
    return "./output.mp4"

@spaces.GPU
def run_video(duration=240):
    subprocess.run(["python", "./MuseV/video2video.py", "--sd_model_name", "fantasticmix_v10",  "--unet_model_name", "musev", "-test_data_path", "./MuseV/configs/tasks/example.yaml", "--output_dir", "./output",  "--n_batch", "1", "--controlnet_name", "dwpose_body_hand",  "--which2video", "video_middle",  "--target_datas",  "dance1", "--fps", "12", "--time_size", "12"])
    return "./output.mp4"
    
with gr.Blocks() as demo:
  title = gr.Markdown("""# MuseV Image2Vid & Vid2Vid """)
  subtitle1 = gr.Markdown("""Image2Vid""")
  image = gr.Image()
  button1 = gr.Button(value="Save Image")
  button1.click(fn=add_new_image,inputs=[image])
  button2 = gr.Button(value="Generate Img2Vid")
  video = gr.Video()
  button2.click(fn=run,outputs=video)
  subtitle2 = gr.Markdown("""Vid2Vid""")
  video_in = gr.Video()
  button3 = gr.Button(value="Save Video")
  button3.click(fn=add_new_video,inputs=[video_in])
  button4 = gr.Button(value="Generate Vid2Vid")
  video_out = gr.Video()
  button4.click(fn=run_video,outputs=video_out)  

demo.launch()