ALE-Pacman-v5 / agents /record_video.py
ledmands
Changed file structure for agents and updated readme
c36f44f
raw
history blame
1.39 kB
import gymnasium as gym
from stable_baselines3 import DQN
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import VecVideoRecorder, DummyVecEnv, VecEnv
env_id = "ALE/Pacman-v5"
video_folder = "videos/"
video_length = 1000 #steps
vec_env = DummyVecEnv([lambda: gym.make(env_id, render_mode="rgb_array")])
model = DQN.load("ALE-Pacman-v5")
# output: <stable_baselines3.common.vec_env.dummy_vec_env.DummyVecEnv object at 0x0000029974DC6550>
# vec_env = gym.make(env_id, render_mode="rgb_array")
# output <OrderEnforcing<PassiveEnvChecker<AtariEnv<ALE/Pacman-v5>>>>
# vec_env = Monitor(gym.make(env_id, render_mode="rgb_array"))
print("\n\n\n")
print(vec_env)
print("\n\n\n")
obs = vec_env.reset()
# Record the video starting at the first step
vec_env = VecVideoRecorder(vec_env,
video_folder,
record_video_trigger=lambda x: x == 0,
video_length=video_length,
# name_prefix=f"video-{env_id}"
)
# Once I make the environment, now I need to walk through it...???
# I want to act according to the policy that has been trained
obs = vec_env.reset()
print(vec_env)
for _ in range(video_length + 1):
action, states = model.predict(obs)
obs, _, _, _ = vec_env.step(action)
# # Save the video
vec_env.close()