APLunch's picture
First Push
fd67505 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6973260641098022,
"min": 1.6560695171356201,
"max": 3.149529457092285,
"count": 765
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33457.69140625,
"min": 18979.73046875,
"max": 85853.65625,
"count": 765
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 51.729166666666664,
"min": 40.134453781512605,
"max": 433.72727272727275,
"count": 765
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19864.0,
"min": 1728.0,
"max": 22012.0,
"count": 765
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1503.4222153489015,
"min": 1198.6534959461387,
"max": 1530.9758294152673,
"count": 765
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 288657.0653469891,
"min": 7210.449777128347,
"max": 356989.4800138407,
"count": 765
},
"SoccerTwos.Step.mean": {
"value": 8639952.0,
"min": 999862.0,
"max": 8639952.0,
"count": 765
},
"SoccerTwos.Step.sum": {
"value": 8639952.0,
"min": 999862.0,
"max": 8639952.0,
"count": 765
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.08352767676115036,
"min": -0.12734852731227875,
"max": 0.18192946910858154,
"count": 765
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -15.95378589630127,
"min": -23.941524505615234,
"max": 26.015914916992188,
"count": 765
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07900454849004745,
"min": -0.1311393827199936,
"max": 0.18299028277397156,
"count": 765
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -15.089868545532227,
"min": -24.654205322265625,
"max": 26.41413688659668,
"count": 765
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 765
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 765
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.20733821953778492,
"min": -0.32395813977995586,
"max": 0.875900000333786,
"count": 765
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -39.60159993171692,
"min": -59.902000188827515,
"max": 50.295599937438965,
"count": 765
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.20733821953778492,
"min": -0.32395813977995586,
"max": 0.875900000333786,
"count": 765
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -39.60159993171692,
"min": -59.902000188827515,
"max": 50.295599937438965,
"count": 765
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 765
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 765
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017286343254575818,
"min": 0.011345698032528162,
"max": 0.023778949840925635,
"count": 370
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017286343254575818,
"min": 0.011345698032528162,
"max": 0.023778949840925635,
"count": 370
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10663634488979976,
"min": 0.013408512560029824,
"max": 0.12825867583354314,
"count": 370
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10663634488979976,
"min": 0.013408512560029824,
"max": 0.12825867583354314,
"count": 370
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10817247877518336,
"min": 0.01368649530534943,
"max": 0.13054989725351335,
"count": 370
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10817247877518336,
"min": 0.01368649530534943,
"max": 0.13054989725351335,
"count": 370
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 370
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 370
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 370
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 370
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 370
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 370
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714949730",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ml-agents/config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos.x86_64 --run-id=SoccerTwos-0505 --no-graphics --resume --results-dir ./drive/MyDrive/ml-agents/results/",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714967350"
},
"total": 17620.125808391,
"count": 1,
"self": 0.48291332200096804,
"children": {
"run_training.setup": {
"total": 0.6506995189999998,
"count": 1,
"self": 0.6506995189999998
},
"TrainerController.start_learning": {
"total": 17618.99219555,
"count": 1,
"self": 11.972277904234943,
"children": {
"TrainerController._reset_env": {
"total": 8.113230823005267,
"count": 40,
"self": 8.113230823005267
},
"TrainerController.advance": {
"total": 17598.90119277576,
"count": 531961,
"self": 12.901594699058478,
"children": {
"env_step": {
"total": 13503.448067474645,
"count": 531961,
"self": 10433.27649414074,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3063.00353231766,
"count": 531961,
"self": 85.60644127860269,
"children": {
"TorchPolicy.evaluate": {
"total": 2977.397091039057,
"count": 959826,
"self": 2977.397091039057
}
}
},
"workers": {
"total": 7.168041016244274,
"count": 531960,
"self": 0.0,
"children": {
"worker_root": {
"total": 17589.435670211722,
"count": 531960,
"is_parallel": true,
"self": 8800.974009851296,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007636214999990898,
"count": 2,
"is_parallel": true,
"self": 0.005108238000104848,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00252797699988605,
"count": 8,
"is_parallel": true,
"self": 0.00252797699988605
}
}
},
"UnityEnvironment.step": {
"total": 0.039467563000016526,
"count": 1,
"is_parallel": true,
"self": 0.0011447839999618736,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009036370000217175,
"count": 1,
"is_parallel": true,
"self": 0.0009036370000217175
},
"communicator.exchange": {
"total": 0.03408657100004575,
"count": 1,
"is_parallel": true,
"self": 0.03408657100004575
},
"steps_from_proto": {
"total": 0.0033325709999871833,
"count": 2,
"is_parallel": true,
"self": 0.0006172000000219668,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027153709999652165,
"count": 8,
"is_parallel": true,
"self": 0.0027153709999652165
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.0916574839878308,
"count": 78,
"is_parallel": true,
"self": 0.01781331799190866,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07384416599592214,
"count": 312,
"is_parallel": true,
"self": 0.07384416599592214
}
}
},
"UnityEnvironment.step": {
"total": 8788.370002876438,
"count": 531959,
"is_parallel": true,
"self": 543.8803059925285,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 361.7545129028885,
"count": 531959,
"is_parallel": true,
"self": 361.7545129028885
},
"communicator.exchange": {
"total": 6157.555378543017,
"count": 531959,
"is_parallel": true,
"self": 6157.555378543017
},
"steps_from_proto": {
"total": 1725.1798054380033,
"count": 1063918,
"is_parallel": true,
"self": 289.157194034422,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1436.0226114035813,
"count": 4255672,
"is_parallel": true,
"self": 1436.0226114035813
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4082.5515306020584,
"count": 531960,
"self": 97.16858463125982,
"children": {
"process_trajectory": {
"total": 1890.426589565792,
"count": 531960,
"self": 1885.1257955897956,
"children": {
"RLTrainer._checkpoint": {
"total": 5.300793975996328,
"count": 16,
"self": 5.300793975996328
}
}
},
"_update_policy": {
"total": 2094.9563564050068,
"count": 371,
"self": 1231.874581490914,
"children": {
"TorchPOCAOptimizer.update": {
"total": 863.0817749140929,
"count": 11130,
"self": 863.0817749140929
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4930010365787894e-06,
"count": 1,
"self": 1.4930010365787894e-06
},
"TrainerController._save_models": {
"total": 0.005492553998919902,
"count": 1,
"self": 4.1317998693557456e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.005451236000226345,
"count": 1,
"self": 0.005451236000226345
}
}
}
}
}
}
}