koukyo1994 commited on
Commit
c6cbb41
1 Parent(s): 1ed4199

Upload inference.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference.py +8 -2
inference.py CHANGED
@@ -20,6 +20,10 @@ TRAJ_TEMPLATE_PATH = Path("./assets/template_trajectory.json")
20
  PATH_START_ID = 9
21
  PATH_POINT_INTERVAL = 10
22
  N_ACTION_TOKENS = 6
 
 
 
 
23
 
24
  # change here if you want to use your own images
25
  CONDITIONING_FRAMES_DIR = Path("./assets/conditioning_frames")
@@ -109,6 +113,7 @@ if __name__ == "__main__":
109
  parser.add_argument("--cmd", type=str, default="curving_to_left/curving_to_left_moderate")
110
  parser.add_argument("--num_frames", type=int, default=25)
111
  parser.add_argument("--num_overlapping_frames", type=int, default=3)
 
112
  args = parser.parse_args()
113
 
114
  assert args.num_frames <= MAX_NUM_FRAMES, f"`num_frames` should be less than or equal to {MAX_NUM_FRAMES}"
@@ -122,8 +127,9 @@ if __name__ == "__main__":
122
  output_dir.mkdir(parents=True, exist_ok=True)
123
 
124
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
125
- tokenizer = AutoModel.from_pretrained("turing-motors/Terra", subfolder="lfq_tokenizer_B_256", trust_remote_code=True).to(device).eval()
126
- model = AutoModel.from_pretrained("turing-motors/Terra", subfolder="world_model", trust_remote_code=True).to(device).eval()
 
127
 
128
  conditioning_frames = load_images(CONDITIONING_FRAMES_PATH_LIST, IMAGE_SIZE).to(device)
129
  with torch.inference_mode(), torch.autocast(device_type="cuda"):
 
20
  PATH_START_ID = 9
21
  PATH_POINT_INTERVAL = 10
22
  N_ACTION_TOKENS = 6
23
+ WM_TOKENIZER_COMBINATION = {
24
+ "world_model": "lfq_tokenizer_B_256",
25
+ "world_model_v2": "lfq_tokenizer_B_256_ema",
26
+ }
27
 
28
  # change here if you want to use your own images
29
  CONDITIONING_FRAMES_DIR = Path("./assets/conditioning_frames")
 
113
  parser.add_argument("--cmd", type=str, default="curving_to_left/curving_to_left_moderate")
114
  parser.add_argument("--num_frames", type=int, default=25)
115
  parser.add_argument("--num_overlapping_frames", type=int, default=3)
116
+ parser.add_argument("--model_name", type=str, default="world_model_v2")
117
  args = parser.parse_args()
118
 
119
  assert args.num_frames <= MAX_NUM_FRAMES, f"`num_frames` should be less than or equal to {MAX_NUM_FRAMES}"
 
127
  output_dir.mkdir(parents=True, exist_ok=True)
128
 
129
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
130
+ tokenizer_name = WM_TOKENIZER_COMBINATION[args.model_name]
131
+ tokenizer = AutoModel.from_pretrained("turing-motors/Terra", subfolder=tokenizer_name, trust_remote_code=True).to(device).eval()
132
+ model = AutoModel.from_pretrained("turing-motors/Terra", subfolder=args.model_name, trust_remote_code=True).to(device).eval()
133
 
134
  conditioning_frames = load_images(CONDITIONING_FRAMES_PATH_LIST, IMAGE_SIZE).to(device)
135
  with torch.inference_mode(), torch.autocast(device_type="cuda"):