EchoMimic / src /utils /util.py
fffiloni's picture
Upload 184 files
03a856a verified
raw
history blame
4.57 kB
import importlib
import os
import os.path as osp
import shutil
import sys
from pathlib import Path
import av
import numpy as np
import torch
import torchvision
from einops import rearrange
from PIL import Image
def seed_everything(seed):
import random
import numpy as np
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed % (2**32))
random.seed(seed)
def import_filename(filename):
spec = importlib.util.spec_from_file_location("mymodule", filename)
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
return module
def delete_additional_ckpt(base_path, num_keep):
dirs = []
for d in os.listdir(base_path):
if d.startswith("checkpoint-"):
dirs.append(d)
num_tot = len(dirs)
if num_tot <= num_keep:
return
# ensure ckpt is sorted and delete the ealier!
del_dirs = sorted(dirs, key=lambda x: int(x.split("-")[-1]))[: num_tot - num_keep]
for d in del_dirs:
path_to_dir = osp.join(base_path, d)
if osp.exists(path_to_dir):
shutil.rmtree(path_to_dir)
def save_videos_from_pil(pil_images, path, fps=8, audio_path=None):
import av
save_fmt = Path(path).suffix
os.makedirs(os.path.dirname(path), exist_ok=True)
width, height = pil_images[0].size
if save_fmt == ".mp4":
codec = "libx264"
container = av.open(path, "w")
stream = container.add_stream(codec, rate=fps)
stream.width = width
stream.height = height
for pil_image in pil_images:
# pil_image = Image.fromarray(image_arr).convert("RGB")
av_frame = av.VideoFrame.from_image(pil_image)
container.mux(stream.encode(av_frame))
container.mux(stream.encode())
container.close()
elif save_fmt == ".gif":
pil_images[0].save(
fp=path,
format="GIF",
append_images=pil_images[1:],
save_all=True,
duration=(1 / fps * 1000),
loop=0,
)
else:
raise ValueError("Unsupported file type. Use .mp4 or .gif.")
def save_videos_grid(videos: torch.Tensor, path: str, audio_path=None, rescale=False, n_rows=6, fps=8):
videos = rearrange(videos, "b c t h w -> t b c h w")
height, width = videos.shape[-2:]
outputs = []
for x in videos:
x = torchvision.utils.make_grid(x, nrow=n_rows) # (c h w)
x = x.transpose(0, 1).transpose(1, 2).squeeze(-1) # (h w c)
if rescale:
x = (x + 1.0) / 2.0 # -1,1 -> 0,1
x = (x * 255).numpy().astype(np.uint8)
x = Image.fromarray(x)
outputs.append(x)
os.makedirs(os.path.dirname(path), exist_ok=True)
save_videos_from_pil(outputs, path, fps, audio_path=audio_path)
def read_frames(video_path):
container = av.open(video_path)
video_stream = next(s for s in container.streams if s.type == "video")
frames = []
for packet in container.demux(video_stream):
for frame in packet.decode():
image = Image.frombytes(
"RGB",
(frame.width, frame.height),
frame.to_rgb().to_ndarray(),
)
frames.append(image)
return frames
def get_fps(video_path):
container = av.open(video_path)
video_stream = next(s for s in container.streams if s.type == "video")
fps = video_stream.average_rate
container.close()
return fps
def crop_and_pad(image, rect):
x0, y0, x1, y1 = rect
h, w = image.shape[:2]
# 确保坐标在图像范围内
x0, y0 = max(0, x0), max(0, y0)
x1, y1 = min(w, x1), min(h, y1)
# 计算原始框的宽度和高度
width = x1 - x0
height = y1 - y0
# 使用较小的边长作为裁剪正方形的边长
side_length = min(width, height)
# 计算正方形框中心点
center_x = (x0 + x1) // 2
center_y = (y0 + y1) // 2
# 重新计算正方形框的坐标
new_x0 = max(0, center_x - side_length // 2)
new_y0 = max(0, center_y - side_length // 2)
new_x1 = min(w, new_x0 + side_length)
new_y1 = min(h, new_y0 + side_length)
# 最终裁剪框的尺寸修正(确保是正方形)
if (new_x1 - new_x0) != (new_y1 - new_y0):
side_length = min(new_x1 - new_x0, new_y1 - new_y0)
new_x1 = new_x0 + side_length
new_y1 = new_y0 + side_length
# 裁剪图像
cropped_image = image[new_y0:new_y1, new_x0:new_x1]
return cropped_image