svjack commited on
Commit
4de18d2
1 Parent(s): 867e309

Upload 2 files

Browse files
Files changed (2) hide show
  1. produce_gif_script.py +221 -0
  2. xiangling_video_seed.csv +0 -0
produce_gif_script.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ !huggingface-cli download \
3
+ --repo-type dataset svjack/video-dataset-Lily-Bikini-rm-background-organized \
4
+ --local-dir video-dataset-Lily-Bikini-rm-background-organized
5
+
6
+ import re
7
+
8
+ def insert_content_in_string(insert_content, character_name, gender=None):
9
+ """
10
+ 在原始字符串中特定位置插入内容。
11
+
12
+ :param insert_content: 要插入的内容
13
+ :param character_name: 角色名称
14
+ :param gender: 性别(可选,可以是 "1boy" 或 "1girl")
15
+ :return: 修改后的字符串
16
+ """
17
+ # 根据 character_name 和 gender 生成 original_string
18
+ original_string = f"solo,{character_name}\(genshin impact\),{gender if gender else '1boy'},highres,"
19
+ # 根据 character_name 生成 target_pattern
20
+ target_pattern = re.escape(character_name)
21
+ # 插入内容
22
+ modified_string = re.sub(target_pattern, r'\g<0>' + insert_content, original_string)
23
+ return original_string ,modified_string
24
+
25
+ from datasets import load_dataset
26
+ character_name = "Xiangling"
27
+ gender = "1girl" # 可选参数
28
+ prompt_list = load_dataset("svjack/daily-actions-en-zh")["train"].to_pandas()["en"].map(
29
+ lambda x: ", {}".format(x)
30
+ ).map(
31
+ lambda insert_content: insert_content_in_string(insert_content, character_name, gender)[-1]
32
+ ).dropna().drop_duplicates().values.tolist()
33
+ print(len(prompt_list))
34
+
35
+ import pandas as pd
36
+ import pathlib
37
+ reference_video_list = pd.Series(
38
+ list(pathlib.Path("video-dataset-Lily-Bikini-rm-background-organized").rglob("*.mp4"))
39
+ ).map(str).values.tolist()
40
+ print(len(reference_video_list))
41
+
42
+ from itertools import product
43
+ pd.DataFrame(list(product(*[reference_video_list, prompt_list])))[[1, 0]].rename(
44
+ columns = {
45
+ 1: "prompt",
46
+ 0: "input_video"
47
+ }
48
+ ).to_csv("xiangling_video_seed.csv", index = False)
49
+
50
+ !python produce_gif_script.py xiangling_video_seed.csv "svjack/GenshinImpact_XL_Base" xiangling_gif_dir \
51
+ --num_frames 16 --temp_folder temp_frames --seed 0 --controlnet_conditioning_scale 0.3
52
+ '''
53
+
54
+ import sys
55
+ sys.path.insert(0, "diffusers-sdxl-controlnet/examples/community/")
56
+ from animatediff_controlnet_sdxl import *
57
+
58
+ import argparse
59
+ from moviepy.editor import VideoFileClip, ImageSequenceClip
60
+ import os
61
+ import torch
62
+ from diffusers.models import MotionAdapter
63
+ from diffusers import DDIMScheduler, AutoPipelineForText2Image, ControlNetModel
64
+ from diffusers.utils import export_to_gif
65
+ from PIL import Image
66
+ from controlnet_aux.processor import Processor
67
+ import pandas as pd
68
+ import random
69
+ from tqdm import tqdm
70
+
71
+ # 初始化 MotionAdapter 和 ControlNetModel
72
+ adapter = MotionAdapter.from_pretrained("a-r-r-o-w/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16)
73
+
74
+ def initialize_pipeline(model_id):
75
+ scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1)
76
+ controlnet = ControlNetModel.from_pretrained("thibaud/controlnet-openpose-sdxl-1.0", torch_dtype=torch.float16).to("cuda")
77
+
78
+ # 初始化 AnimateDiffSDXLControlnetPipeline
79
+ pipe = AnimateDiffSDXLControlnetPipeline.from_pretrained(
80
+ model_id,
81
+ controlnet=controlnet,
82
+ motion_adapter=adapter,
83
+ scheduler=scheduler,
84
+ torch_dtype=torch.float16,
85
+ ).to("cuda")
86
+ pipe.enable_vae_slicing()
87
+ pipe.enable_vae_tiling()
88
+ return pipe
89
+
90
+ # 全局初始化管道
91
+ pipe = None
92
+
93
+ def split_video_into_frames(input_video_path, num_frames, temp_folder='temp_frames'):
94
+ """
95
+ 将视频处理成指定帧数的视频,并保持原始的帧率。
96
+
97
+ :param input_video_path: 输入视频文件路径
98
+ :param num_frames: 目标帧数
99
+ :param temp_folder: 临时文件夹路径
100
+ """
101
+ clip = VideoFileClip(input_video_path)
102
+ original_duration = clip.duration
103
+ segment_duration = original_duration / num_frames
104
+
105
+ if not os.path.exists(temp_folder):
106
+ os.makedirs(temp_folder)
107
+
108
+ for i in range(num_frames):
109
+ frame_time = i * segment_duration
110
+ frame_path = os.path.join(temp_folder, f'frame_{i:04d}.png')
111
+ clip.save_frame(frame_path, t=frame_time)
112
+
113
+ frame_paths = [os.path.join(temp_folder, f'frame_{i:04d}.png') for i in range(num_frames)]
114
+ final_clip = ImageSequenceClip(frame_paths, fps=clip.fps)
115
+ final_clip.write_videofile("resampled_video.mp4", codec='libx264')
116
+
117
+ print(f"新的视频已保存到 resampled_video.mp4,包含 {num_frames} 个帧,并保持原始的帧率。")
118
+
119
+ def generate_video_with_prompt(input_video_path, prompt, model_id, gif_output_path, seed=0, num_frames=16, keep_imgs=False, temp_folder='temp_frames', num_inference_steps=50, guidance_scale=20, controlnet_conditioning_scale=0.5, width=512, height=768):
120
+ """
121
+ 生成带有文本提示的视频。
122
+
123
+ :param input_video_path: 输入视频文件路径
124
+ :param prompt: 文本提示
125
+ :param model_id: 模型ID
126
+ :param gif_output_path: GIF 输出文件路径
127
+ :param seed: 随机种子
128
+ :param num_frames: 目标帧数
129
+ :param keep_imgs: 是否保留临时图片
130
+ :param temp_folder: 临时文件夹路径
131
+ :param num_inference_steps: 推理步数
132
+ :param guidance_scale: 引导比例
133
+ :param controlnet_conditioning_scale: ControlNet 条件比例
134
+ :param width: 输出宽度
135
+ :param height: 输出高度
136
+ """
137
+ split_video_into_frames(input_video_path, num_frames, temp_folder)
138
+
139
+ folder_path = temp_folder
140
+ frames = os.listdir(folder_path)
141
+ frames = list(filter(lambda x: x.endswith(".png"), frames))
142
+ frames.sort()
143
+ conditioning_frames = list(map(lambda x: Image.open(os.path.join(folder_path, x)).resize((1024, 1024)), frames))[:num_frames]
144
+
145
+ p2 = Processor("openpose")
146
+ cn2 = [p2(frame) for frame in conditioning_frames]
147
+
148
+ negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"
149
+ generator = torch.Generator(device="cuda").manual_seed(seed)
150
+
151
+ global pipe
152
+ if pipe is None:
153
+ pipe = initialize_pipeline(model_id)
154
+
155
+ output = pipe(
156
+ prompt=prompt,
157
+ negative_prompt=negative_prompt,
158
+ num_inference_steps=num_inference_steps,
159
+ guidance_scale=guidance_scale,
160
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
161
+ width=width,
162
+ height=height,
163
+ num_frames=num_frames,
164
+ conditioning_frames=cn2,
165
+ generator=generator
166
+ )
167
+
168
+ frames = output.frames[0]
169
+ export_to_gif(frames, gif_output_path)
170
+
171
+ print(f"生成的 GIF 已保存到 {gif_output_path}")
172
+
173
+ if not keep_imgs:
174
+ # 删除临时文件夹
175
+ import shutil
176
+ shutil.rmtree(temp_folder)
177
+
178
+ def sanitize_prompt(prompt):
179
+ """
180
+ 将提示词中的空格和非英文字符替换为下划线。
181
+ """
182
+ return "".join([c if c.isalnum() or c in [",", ","] else '_' for c in prompt])
183
+
184
+ if __name__ == "__main__":
185
+ parser = argparse.ArgumentParser(description="生成带有文本提示的视频")
186
+ parser.add_argument("csv_file", help="CSV 文件路径")
187
+ parser.add_argument("model_id", help="模型ID")
188
+ parser.add_argument("output_dir", help="GIF 输出目录")
189
+ parser.add_argument("--seed", type=int, default=0, help="随机种子")
190
+ parser.add_argument("--num_frames", type=int, default=16, help="目标帧数")
191
+ parser.add_argument("--keep_imgs", action="store_true", help="是否保留临时图片")
192
+ parser.add_argument("--temp_folder", default='temp_frames', help="临时文件夹路径")
193
+ parser.add_argument("--num_inference_steps", type=int, default=50, help="推理步数")
194
+ parser.add_argument("--guidance_scale", type=float, default=20.0, help="引导比例")
195
+ parser.add_argument("--controlnet_conditioning_scale", type=float, default=0.5, help="ControlNet 条件比例")
196
+ parser.add_argument("--width", type=int, default=512, help="输出宽度")
197
+ parser.add_argument("--height", type=int, default=768, help="输出高度")
198
+
199
+ args = parser.parse_args()
200
+
201
+ # 读取CSV文件
202
+ df = pd.read_csv(args.csv_file)
203
+
204
+ for index, row in tqdm(df.iterrows(), total=df.shape[0]):
205
+ input_video = row['input_video']
206
+ prompt = row['prompt']
207
+
208
+ # 随机设定seed
209
+ seed = random.randint(0, 2**32 - 1)
210
+
211
+ # 处理提示词
212
+ sanitized_prompt = sanitize_prompt(prompt)
213
+
214
+ # 生成GIF输出路径,包含seed
215
+ if not os.path.exists(args.output_dir):
216
+ os.makedirs(args.output_dir)
217
+ gif_output_path = os.path.join(args.output_dir, f"{sanitized_prompt}_seed_{seed}.gif")
218
+
219
+ generate_video_with_prompt(input_video, prompt, args.model_id, gif_output_path, seed, args.num_frames,
220
+ args.keep_imgs, args.temp_folder, args.num_inference_steps, args.guidance_scale,
221
+ args.controlnet_conditioning_scale, args.width, args.height)
xiangling_video_seed.csv ADDED
The diff for this file is too large to render. See raw diff