CREMA_DATA / crop_portrait.py
ameerazam08's picture
Upload folder using huggingface_hub
ba32b3e verified
# """
# Crop upper boddy in every video frame, square bounding box is averaged among all frames and fixed.
# """
# import os
# import cv2
# import argparse
# from tqdm import tqdm
# import face_recognition
# import torch
# import util
# import numpy as np
# import face_detection
# def crop_per_image(data_dir, dest_size, crop_level):
# fa = face_detection.FaceAlignment(face_detection.LandmarksType._2D, flip_input=False, device='cuda')
# image_list = util.get_file_list(os.path.join(data_dir, 'full'))
# batch_size = 5
# frames = []
# for i in tqdm(range(len(image_list))):
# frame = face_recognition.load_image_file(image_list[i])
# frames.append(frame)
# H, W, _ = frames[0].shape
# batches = [frames[i:i + batch_size] for i in range(0, len(frames), batch_size)]
# for idx in tqdm(range(len(batches))):
# fb = batches[idx]
# preds = fa.get_detections_for_batch(np.asarray(fb))
# for j, f in enumerate(preds):
# if f is None:
# print('no face in image {}'.format(idx * batch_size + j))
# else:
# left, top, right, bottom = f
# height = bottom - top
# width = right - left
# crop_size = int(height * crop_level)
# horizontal_delta = (crop_size - width) // 2
# vertical_delta = (crop_size - height) // 2
# left = max(left - horizontal_delta, 0)
# right = min(right + horizontal_delta, W)
# top = max(top - int(vertical_delta * 0.5), 0)
# bottom = min(bottom + int(vertical_delta * 1.5), H)
# crop_f = cv2.imread(image_list[idx * batch_size + j])
# crop_f = crop_f[top:bottom, left:right]
# crop_f = cv2.resize(crop_f, (dest_size, dest_size), interpolation=cv2.INTER_AREA)
# cv2.imwrite(os.path.join(data_dir, 'crop', os.path.basename(image_list[idx * batch_size + j])), crop_f)
# if __name__ == '__main__':
# parser = argparse.ArgumentParser(description='Process some integers.')
# parser.add_argument('--data_dir', type=str, default=None)
# parser.add_argument('--dest_size', type=int, default=256)
# parser.add_argument('--crop_level', type=float, default=1.0, help='Adjust crop image size.')
# parser.add_argument('--vertical_adjust', type=float, default=0.3, help='Adjust vertical location of portrait in image.')
# args = parser.parse_args()
# util.create_dir(os.path.join(args.data_dir,'crop'))
# util.create_dir(os.path.join(args.data_dir, 'crop_region'))
# crop_per_image(args.data_dir, dest_size=args.dest_size, crop_level=args.crop_level)
import os
import cv2
import argparse
from tqdm import tqdm
import face_recognition
import numpy as np
import face_detection
import util
def crop_per_frame_and_make_video(data_dir, dest_size, crop_level, video_out_path, fps=30):
# Initialize face alignment
fa = face_detection.FaceAlignment(face_detection.LandmarksType._2D, flip_input=False, device='cuda')
# Get list of images (frames)
image_list = util.get_file_list(os.path.join(data_dir, 'full'))
batch_size = 5
frames = []
# Load frames
for image_path in tqdm(image_list, desc='Loading images'):
frame = cv2.imread(image_path)
frames.append(frame)
H, W, _ = frames[0].shape
batches = [frames[i:i + batch_size] for i in range(0, len(frames), batch_size)]
cropped_frames = []
for idx, fb in enumerate(tqdm(batches, desc='Processing batches')):
preds = fa.get_detections_for_batch(np.asarray(fb))
for j, f in enumerate(preds):
if f is None:
print(f'No face in image {idx * batch_size + j}')
continue # Skip frames with no detected face
left, top, right, bottom = f
height = bottom - top
width = right - left
crop_size = int(height * crop_level)
horizontal_delta = (crop_size - width) // 2
vertical_delta = (crop_size - height) // 2
left = max(left - horizontal_delta, 0)
right = min(right + horizontal_delta, W)
top = max(top - int(vertical_delta * 0.5), 0)
bottom = min(bottom + int(vertical_delta * 1.5), H)
crop_f = fb[j][top:bottom, left:right]
crop_f = cv2.resize(crop_f, (dest_size, dest_size), interpolation=cv2.INTER_AREA)
cropped_frames.append(crop_f)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(video_out_path, fourcc, fps, (dest_size, dest_size))
# Write frames to video
for frame in tqdm(cropped_frames, desc='Compiling video'):
out.write(frame)
# Release everything when job is finished
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Crop video frames and compile into a video.')
parser.add_argument('--data_dir', type=str, required=True, help='Directory with video frames to process.')
parser.add_argument('--dest_size', type=int, default=256, help='Destination size of cropped images.')
parser.add_argument('--crop_level', type=float, default=1.0, help='Adjust crop size relative to face detection.')
parser.add_argument('--video_out_path', type=str, required=True, help='Output path for the resulting video.')
parser.add_argument('--fps', type=int, default=30, help='Frames per second for the output video.')
args = parser.parse_args()
util.create_dir(os.path.join(args.data_dir, 'crop'))
crop_per_frame_and_make_video(args.data_dir, dest_size=args.dest_size, crop_level=args.crop_level, video_out_path=args.video_out_path, fps=args.fps)