Experiment2
Browse files
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
library_name:
|
3 |
tags:
|
4 |
- deep-reinforcement-learning
|
5 |
- reinforcement-learning
|
@@ -17,6 +17,14 @@ model-index:
|
|
17 |
dataset:
|
18 |
name: OmniIsaacGymEnvs-ShadowHand
|
19 |
type: OmniIsaacGymEnvs-ShadowHand
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
---
|
21 |
|
22 |
# OmniIsaacGymEnvs-ShadowHand-PPO
|
@@ -41,12 +49,12 @@ agent.load(path)
|
|
41 |
```python
|
42 |
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
|
43 |
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
|
44 |
-
cfg_ppo["rollouts"] =
|
45 |
-
cfg_ppo["learning_epochs"] =
|
46 |
-
cfg_ppo["mini_batches"] = 4
|
47 |
cfg_ppo["discount_factor"] = 0.99
|
48 |
-
cfg_ppo["lambda"] =
|
49 |
-
cfg_ppo["learning_rate"] =
|
50 |
cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
|
51 |
cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.016}
|
52 |
cfg_ppo["random_timesteps"] = 0
|
@@ -56,7 +64,7 @@ cfg_ppo["ratio_clip"] = 0.2
|
|
56 |
cfg_ppo["value_clip"] = 0.2
|
57 |
cfg_ppo["clip_predicted_values"] = True
|
58 |
cfg_ppo["entropy_loss_scale"] = 0.0
|
59 |
-
cfg_ppo["value_loss_scale"] = 2.
|
60 |
cfg_ppo["kl_threshold"] = 0
|
61 |
cfg_ppo["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01
|
62 |
cfg_ppo["state_preprocessor"] = RunningStandardScaler
|
@@ -66,4 +74,5 @@ cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
|
|
66 |
# logging to TensorBoard and writing checkpoints
|
67 |
cfg_ppo["experiment"]["write_interval"] = 800
|
68 |
cfg_ppo["experiment"]["checkpoint_interval"] = 8000
|
69 |
-
|
|
|
|
1 |
---
|
2 |
+
library_name: allennlp
|
3 |
tags:
|
4 |
- deep-reinforcement-learning
|
5 |
- reinforcement-learning
|
|
|
17 |
dataset:
|
18 |
name: OmniIsaacGymEnvs-ShadowHand
|
19 |
type: OmniIsaacGymEnvs-ShadowHand
|
20 |
+
license: openrail
|
21 |
+
datasets:
|
22 |
+
- togethercomputer/RedPajama-Data-1T
|
23 |
+
language:
|
24 |
+
- ak
|
25 |
+
metrics:
|
26 |
+
- accuracy
|
27 |
+
pipeline_tag: text-classification
|
28 |
---
|
29 |
|
30 |
# OmniIsaacGymEnvs-ShadowHand-PPO
|
|
|
49 |
```python
|
50 |
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
|
51 |
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
|
52 |
+
cfg_ppo["rollouts"] = 20 # memory_size
|
53 |
+
cfg_ppo["learning_epochs"] = 10
|
54 |
+
cfg_ppo["mini_batches"] = 4 # 16 * 8192 / 32768
|
55 |
cfg_ppo["discount_factor"] = 0.99
|
56 |
+
cfg_ppo["lambda"] = 1
|
57 |
+
cfg_ppo["learning_rate"] = 6e-4
|
58 |
cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
|
59 |
cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.016}
|
60 |
cfg_ppo["random_timesteps"] = 0
|
|
|
64 |
cfg_ppo["value_clip"] = 0.2
|
65 |
cfg_ppo["clip_predicted_values"] = True
|
66 |
cfg_ppo["entropy_loss_scale"] = 0.0
|
67 |
+
cfg_ppo["value_loss_scale"] = 2.5
|
68 |
cfg_ppo["kl_threshold"] = 0
|
69 |
cfg_ppo["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01
|
70 |
cfg_ppo["state_preprocessor"] = RunningStandardScaler
|
|
|
74 |
# logging to TensorBoard and writing checkpoints
|
75 |
cfg_ppo["experiment"]["write_interval"] = 800
|
76 |
cfg_ppo["experiment"]["checkpoint_interval"] = 8000
|
77 |
+
|
78 |
+
```
|