lawls commited on
Commit
61c27cc
1 Parent(s): e59b7a8

Initial commit

Browse files
qrdqn-SpaceInvadersNoFrameskip-v4.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f82192c5468c745cfd2cb049ebcf77fb6e068370096f3f62a90c3fbf31b9e46
3
  size 38187921
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:066b17cacdebdd6b89c9a9a533eed1561eea3d7b5c475df3f27856b909c4a2ce
3
  size 38187921
qrdqn-SpaceInvadersNoFrameskip-v4/data CHANGED
@@ -4,9 +4,9 @@
4
  ":serialized:": "gAWVLAAAAAAAAACMGnNiM19jb250cmliLnFyZHFuLnBvbGljaWVzlIwJQ25uUG9saWN5lJOULg==",
5
  "__module__": "sb3_contrib.qrdqn.policies",
6
  "__doc__": "\n Policy class for QR-DQN when using images as input.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param n_quantiles: Number of quantiles\n :param net_arch: The specification of the network architecture.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
- "__init__": "<function CnnPolicy.__init__ at 0x177f2e7a0>",
8
  "__abstractmethods__": "frozenset()",
9
- "_abc_impl": "<_abc._abc_data object at 0x177f38d80>"
10
  },
11
  "verbose": 1,
12
  "policy_kwargs": {
@@ -91,13 +91,13 @@
91
  "__module__": "stable_baselines3.common.buffers",
92
  "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'next_observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'dones': <class 'numpy.ndarray'>, 'timeouts': <class 'numpy.ndarray'>}",
93
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
94
- "__init__": "<function ReplayBuffer.__init__ at 0x176d1fec0>",
95
- "add": "<function ReplayBuffer.add at 0x176d20040>",
96
- "sample": "<function ReplayBuffer.sample at 0x176d200e0>",
97
- "_get_samples": "<function ReplayBuffer._get_samples at 0x176d20180>",
98
- "_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at 0x176d20220>)>",
99
  "__abstractmethods__": "frozenset()",
100
- "_abc_impl": "<_abc._abc_data object at 0x176cc0700>"
101
  },
102
  "replay_buffer_kwargs": {},
103
  "train_freq": {
 
4
  ":serialized:": "gAWVLAAAAAAAAACMGnNiM19jb250cmliLnFyZHFuLnBvbGljaWVzlIwJQ25uUG9saWN5lJOULg==",
5
  "__module__": "sb3_contrib.qrdqn.policies",
6
  "__doc__": "\n Policy class for QR-DQN when using images as input.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param n_quantiles: Number of quantiles\n :param net_arch: The specification of the network architecture.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
+ "__init__": "<function CnnPolicy.__init__ at 0x28a72a840>",
8
  "__abstractmethods__": "frozenset()",
9
+ "_abc_impl": "<_abc._abc_data object at 0x28a738b40>"
10
  },
11
  "verbose": 1,
12
  "policy_kwargs": {
 
91
  "__module__": "stable_baselines3.common.buffers",
92
  "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'next_observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'dones': <class 'numpy.ndarray'>, 'timeouts': <class 'numpy.ndarray'>}",
93
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
94
+ "__init__": "<function ReplayBuffer.__init__ at 0x289b23f60>",
95
+ "add": "<function ReplayBuffer.add at 0x289b240e0>",
96
+ "sample": "<function ReplayBuffer.sample at 0x289b24180>",
97
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x289b24220>",
98
+ "_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at 0x289b242c0>)>",
99
  "__abstractmethods__": "frozenset()",
100
+ "_abc_impl": "<_abc._abc_data object at 0x289ac4640>"
101
  },
102
  "replay_buffer_kwargs": {},
103
  "train_freq": {
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 578.0, "std_reward": 134.3726162579266, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2023-12-24T01:37:03.608465"}
 
1
+ {"mean_reward": 578.0, "std_reward": 134.3726162579266, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2023-12-24T10:43:27.236379"}