File size: 1,389 Bytes
ebb75df 6aec4d8 ebb75df 6aec4d8 36f3504 ebb75df 6aec4d8 ebb75df 6aec4d8 36f3504 6aec4d8 ebb75df 6aec4d8 36f3504 6aec4d8 36f3504 6aec4d8 ebb75df 6aec4d8 ebb75df 6aec4d8 ebb75df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gymnasium as gym
from stable_baselines3 import DQN
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import VecVideoRecorder, DummyVecEnv, VecEnv
env_id = "ALE/Pacman-v5"
video_folder = "videos/"
video_length = 1000 #steps
vec_env = DummyVecEnv([lambda: gym.make(env_id, render_mode="rgb_array")])
model = DQN.load("ALE-Pacman-v5")
# output: <stable_baselines3.common.vec_env.dummy_vec_env.DummyVecEnv object at 0x0000029974DC6550>
# vec_env = gym.make(env_id, render_mode="rgb_array")
# output <OrderEnforcing<PassiveEnvChecker<AtariEnv<ALE/Pacman-v5>>>>
# vec_env = Monitor(gym.make(env_id, render_mode="rgb_array"))
print("\n\n\n")
print(vec_env)
print("\n\n\n")
obs = vec_env.reset()
# Record the video starting at the first step
vec_env = VecVideoRecorder(vec_env,
video_folder,
record_video_trigger=lambda x: x == 0,
video_length=video_length,
# name_prefix=f"video-{env_id}"
)
# Once I make the environment, now I need to walk through it...???
# I want to act according to the policy that has been trained
obs = vec_env.reset()
print(vec_env)
for _ in range(video_length + 1):
action, states = model.predict(obs)
obs, _, _, _ = vec_env.step(action)
# # Save the video
vec_env.close() |