EntropicLettuce's picture
First Push
9ecdf4e verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8941066265106201,
"min": 0.8941066265106201,
"max": 2.880601644515991,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8566.435546875,
"min": 8566.435546875,
"max": 29690.361328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.65585708618164,
"min": 0.3789220154285431,
"max": 12.65585708618164,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2467.89208984375,
"min": 73.51087188720703,
"max": 2574.19873046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0684747336058268,
"min": 0.0640885880636465,
"max": 0.07402983223494397,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2738989344233072,
"min": 0.256354352254586,
"max": 0.36345636334576115,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20677873514154382,
"min": 0.10435694752959535,
"max": 0.27434642881739374,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8271149405661753,
"min": 0.4174277901183814,
"max": 1.3717321440869688,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.00029190600269799995,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.197302,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048653698,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.045454545454547,
"min": 3.0,
"max": 25.113636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1102.0,
"min": 132.0,
"max": 1355.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.045454545454547,
"min": 3.0,
"max": 25.113636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1102.0,
"min": 132.0,
"max": 1355.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1727816095",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1727816684"
},
"total": 589.8568485750001,
"count": 1,
"self": 0.5288183550001122,
"children": {
"run_training.setup": {
"total": 0.06540276799998423,
"count": 1,
"self": 0.06540276799998423
},
"TrainerController.start_learning": {
"total": 589.262627452,
"count": 1,
"self": 0.815750718998288,
"children": {
"TrainerController._reset_env": {
"total": 3.0840856979999955,
"count": 1,
"self": 3.0840856979999955
},
"TrainerController.advance": {
"total": 585.2791893230017,
"count": 18205,
"self": 0.4121119370074666,
"children": {
"env_step": {
"total": 584.8670773859942,
"count": 18205,
"self": 450.6717398619878,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.7866393080014,
"count": 18205,
"self": 2.303125979004278,
"children": {
"TorchPolicy.evaluate": {
"total": 131.48351332899713,
"count": 18205,
"self": 131.48351332899713
}
}
},
"workers": {
"total": 0.408698216005007,
"count": 18205,
"self": 0.0,
"children": {
"worker_root": {
"total": 587.3604775950013,
"count": 18205,
"is_parallel": true,
"self": 277.50858411799464,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0030624940000052447,
"count": 1,
"is_parallel": true,
"self": 0.0012730970000234265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017893969999818182,
"count": 10,
"is_parallel": true,
"self": 0.0017893969999818182
}
}
},
"UnityEnvironment.step": {
"total": 0.07644652999999835,
"count": 1,
"is_parallel": true,
"self": 0.0008643139999833238,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044516700000940546,
"count": 1,
"is_parallel": true,
"self": 0.00044516700000940546
},
"communicator.exchange": {
"total": 0.07282467999999653,
"count": 1,
"is_parallel": true,
"self": 0.07282467999999653
},
"steps_from_proto": {
"total": 0.0023123690000090846,
"count": 1,
"is_parallel": true,
"self": 0.00046401500003412366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001848353999974961,
"count": 10,
"is_parallel": true,
"self": 0.001848353999974961
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 309.8518934770066,
"count": 18204,
"is_parallel": true,
"self": 14.758842264003476,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.592925014994279,
"count": 18204,
"is_parallel": true,
"self": 7.592925014994279
},
"communicator.exchange": {
"total": 244.0235462120086,
"count": 18204,
"is_parallel": true,
"self": 244.0235462120086
},
"steps_from_proto": {
"total": 43.4765799860003,
"count": 18204,
"is_parallel": true,
"self": 8.451454347993405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.0251256380069,
"count": 182040,
"is_parallel": true,
"self": 35.0251256380069
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0006261990000666628,
"count": 1,
"self": 0.0006261990000666628,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 578.6501780520026,
"count": 752068,
"is_parallel": true,
"self": 17.034043859041276,
"children": {
"process_trajectory": {
"total": 312.45311123596105,
"count": 752068,
"is_parallel": true,
"self": 311.7214263799611,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7316848559999585,
"count": 4,
"is_parallel": true,
"self": 0.7316848559999585
}
}
},
"_update_policy": {
"total": 249.1630229570003,
"count": 90,
"is_parallel": true,
"self": 69.8412178360054,
"children": {
"TorchPPOOptimizer.update": {
"total": 179.3218051209949,
"count": 4584,
"is_parallel": true,
"self": 179.3218051209949
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.0829755129999512,
"count": 1,
"self": 0.001318317999903229,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08165719500004798,
"count": 1,
"self": 0.08165719500004798
}
}
}
}
}
}
}