import os, llava, argparse import numpy as np from mmengine import load, dump from tqdm import tqdm from collections import defaultdict PROMPT_TEMPLATES = { "instruction": "Evaluate if this video follows the instruction: '{instruction}'. Use the following scoring criteria:\n\n- 0: The video does not follow the instruction at all.\n- 1: The video includes the correct object but performs the wrong action, or vice versa.\n- 2: The video follows the instruction and shows a tendency toward the intended action but does not fully achieve the goal.\n- 3: The video follows the instruction precisely and successfully achieves the intended goal.\n\nLet's analyze step-by-step and conclude with 'Score: [score]'.", "physical_laws": 'Watch the video and determine if it shows any \'{physical_laws}\' Let\'s think step-by-step and conclude with "Yes" or "No".', "common_sense": 'Does the video exhibit \'{common_sense}\'? Let\'s think step-by-step and conclude with "Yes" or "No".', } QUESTION_POOL = { "instruction": None, "physical_laws": [ "Violation of Newton's Law: Objects move without any external force.", "Violation of the Law of Conservation of Mass or Solid Constitutive Law: Objects deform or distort irregularly.", "Violation of Fluid Constitutive Law: Liquids flow in an unnatural or irregular manner.", "Violation of Non-physical Penetration: Objects unnaturally pass through each other.", "Violation of Gravity: Objects behave inconsistently with gravity, such as floating in the air.", ], "common_sense": [ "Poor Aesthetics: Visually unappealing or low-quality content.", "Temporal Inconsistency: Noticeable flickering, choppy motion, or abrupt appearance/disappearance of irrelevant objects.", ], } if __name__ == "__main__": parser = argparse.ArgumentParser( description="Script for evaluating the WorldModelBenchmark." ) parser.add_argument( "--judge", type=str, help="Path to judge model checkpoint.", ) parser.add_argument( "--video_dir", type=str, help="Path to the generated video directory.", ) parser.add_argument( "--save_name", type=str, help="Path to save evaluation results.", ) parser.add_argument( "--cot", action="store_true", help="Enable or disable Chain-of-Thought output." ) args = parser.parse_args() validation_set = load("./worldmodelbench.json") if args.cot: args.save_name += "_cot" results = None if os.path.exists(args.save_name): results = load(args.save_name) try: preds = results["preds"] accs = results["accs"] except: raise "Expected keys are not found in the results." else: model = llava.load(args.judge) preds = dict() accs = defaultdict(list) for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)): ## Load video video_name = v_i["first_frame"].split("/")[-1].split(".")[0] video = os.path.join(args.video_dir, video_name + ".mp4") if not os.path.exists(video): continue video = llava.Video(video) ## Traverse criterions for k in ["instruction", "physical_laws", "common_sense"]: preds_i = [] prompt_template = PROMPT_TEMPLATES[k] qs = QUESTION_POOL[k] if qs is not None: accs_i = [] for q in qs: if k == "physical_laws": text_prompt = prompt_template.format( physical_laws=q.lower() ) else: text_prompt = prompt_template.format(common_sense=q.lower()) if not args.cot: text_prompt = text_prompt.replace( "Let's think step-by-step and conclude with", "Answer with", ).replace( "Let's analyze step-by-step and conclude with", "Answer with", ) pred = model.generate_content([video, text_prompt]) preds_i.append(pred) ## Always ask for violations, so a "No" is preferred! accs_i.append("no" in pred.lower()) accs[k].extend(accs_i) else: text_prompt = prompt_template.format( instruction=v_i["text_instruction"] ) if not args.cot: text_prompt = text_prompt.replace( "Let's think step-by-step and conclude with", "Answer with" ).replace( "Let's analyze step-by-step and conclude with", "Answer with", ) pred = model.generate_content([video, text_prompt]) preds_i.append(pred) try: score = float(pred.split(":")[-1].strip(" .")) except: score = 0 accs[k].append(score) if video_name not in preds: preds[video_name] = dict() preds[video_name][k] = preds_i ## Save results # if results is None: # results = {"preds": preds, "accs": accs} # dump(results, f"./{args.save_name}.json", indent=4) ## Print results num_insts = len(preds) total_score = 0 for k, v in accs.items(): print(k + " details:") num_sub = len(v) // num_insts if num_sub == 1: print(f"-- overall score: {np.mean(v):.2f}.") total_score += np.mean(v) elif num_sub == 2: sub_scores = [] for i, sub in enumerate(["framewise", "temporal"]): print(f"-- {sub} score: {np.mean(v):.2f}.") sub_scores.append(np.mean(v)) print(f"-- overall score: {np.mean(sub_scores):.2f}.") total_score += np.mean(sub_scores) elif num_sub == 5: sub_scores = [] for i, sub in enumerate( ["newton", "mass", "fluid", "penetration", "gravity"] ): print(f"-- {sub} score: {np.mean(v):.2f}.") sub_scores.append(np.mean(v)) print(f"-- overall score: {np.mean(sub_scores):.2f}.") total_score += np.mean(sub_scores) else: raise ValueError("Unexpected number of subcategories!") print(f"\ntotal score: {total_score:.2f}.")