poca-SoccerTwos / run_logs /timers.json
giggling-squid's picture
First Push
99a195e
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.93532133102417,
"min": 1.9228694438934326,
"max": 3.2956340312957764,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37901.33203125,
"min": 15953.810546875,
"max": 121993.4375,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 57.6551724137931,
"min": 45.728971962616825,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20064.0,
"min": 16388.0,
"max": 24572.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1570.3648351101099,
"min": 1183.4809839587417,
"max": 1579.7124319617544,
"count": 462
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 273243.48130915913,
"min": 2368.01158658732,
"max": 322791.4845244262,
"count": 462
},
"SoccerTwos.Step.mean": {
"value": 4999965.0,
"min": 9310.0,
"max": 4999965.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999965.0,
"min": 9310.0,
"max": 4999965.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.05254178121685982,
"min": -0.06817479431629181,
"max": 0.25315922498703003,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 9.1422700881958,
"min": -11.19851303100586,
"max": 27.594356536865234,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.05239120498299599,
"min": -0.06603290885686874,
"max": 0.24999599158763885,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 9.116069793701172,
"min": -12.472119331359863,
"max": 27.249563217163086,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.027245977144131715,
"min": -0.8125,
"max": 0.5217555546098285,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 4.7408000230789185,
"min": -43.884000182151794,
"max": 52.70480006933212,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.027245977144131715,
"min": -0.8125,
"max": 0.5217555546098285,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 4.7408000230789185,
"min": -43.884000182151794,
"max": 52.70480006933212,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019540009875587808,
"min": 0.011188588462149103,
"max": 0.023791296190271774,
"count": 239
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019540009875587808,
"min": 0.011188588462149103,
"max": 0.023791296190271774,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10733787094553311,
"min": 2.080253307212843e-06,
"max": 0.11480278223752975,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10733787094553311,
"min": 2.080253307212843e-06,
"max": 0.11480278223752975,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10846167479952176,
"min": 1.9527220464018075e-06,
"max": 0.11766273006796837,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10846167479952176,
"min": 1.9527220464018075e-06,
"max": 0.11766273006796837,
"count": 239
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 239
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 239
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681122148",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/home/sicong/Data/Projects/HuggingFace_DeepReinforcementLearning/venv_shared/bin/mlagents-learn ./SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681127823"
},
"total": 5674.665851216007,
"count": 1,
"self": 0.1807679940102389,
"children": {
"run_training.setup": {
"total": 0.12766640599875245,
"count": 1,
"self": 0.12766640599875245
},
"TrainerController.start_learning": {
"total": 5674.357416815998,
"count": 1,
"self": 4.425807208011975,
"children": {
"TrainerController._reset_env": {
"total": 3.309263809991535,
"count": 20,
"self": 3.309263809991535
},
"TrainerController.advance": {
"total": 5666.350771254991,
"count": 336237,
"self": 4.078188825093093,
"children": {
"env_step": {
"total": 4206.0092518964375,
"count": 336237,
"self": 3256.9958848187525,
"children": {
"SubprocessEnvManager._take_step": {
"total": 946.5169658295636,
"count": 336237,
"self": 26.451491816114867,
"children": {
"TorchPolicy.evaluate": {
"total": 920.0654740134487,
"count": 632474,
"self": 920.0654740134487
}
}
},
"workers": {
"total": 2.4964012481214013,
"count": 336237,
"self": 0.0,
"children": {
"worker_root": {
"total": 5667.749886682577,
"count": 336237,
"is_parallel": true,
"self": 2959.9660778657417,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001744405017234385,
"count": 2,
"is_parallel": true,
"self": 0.0003522140032146126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013921910140197724,
"count": 8,
"is_parallel": true,
"self": 0.0013921910140197724
}
}
},
"UnityEnvironment.step": {
"total": 0.01870985199639108,
"count": 1,
"is_parallel": true,
"self": 0.0006707470020046458,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004609339957823977,
"count": 1,
"is_parallel": true,
"self": 0.0004609339957823977
},
"communicator.exchange": {
"total": 0.016097498999442905,
"count": 1,
"is_parallel": true,
"self": 0.016097498999442905
},
"steps_from_proto": {
"total": 0.0014806719991611317,
"count": 2,
"is_parallel": true,
"self": 0.00024401101109106094,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012366609880700707,
"count": 8,
"is_parallel": true,
"self": 0.0012366609880700707
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2707.7565631848265,
"count": 336236,
"is_parallel": true,
"self": 200.43624722062668,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 124.41545320049045,
"count": 336236,
"is_parallel": true,
"self": 124.41545320049045
},
"communicator.exchange": {
"total": 1927.032509234923,
"count": 336236,
"is_parallel": true,
"self": 1927.032509234923
},
"steps_from_proto": {
"total": 455.87235352878633,
"count": 672472,
"is_parallel": true,
"self": 71.89859090356913,
"children": {
"_process_rank_one_or_two_observation": {
"total": 383.9737626252172,
"count": 2689888,
"is_parallel": true,
"self": 383.9737626252172
}
}
}
}
},
"steps_from_proto": {
"total": 0.027245632008998655,
"count": 38,
"is_parallel": true,
"self": 0.0043746480223489925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.022870983986649662,
"count": 152,
"is_parallel": true,
"self": 0.022870983986649662
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1456.2633305334602,
"count": 336237,
"self": 39.944305689728935,
"children": {
"process_trajectory": {
"total": 479.6868951536162,
"count": 336237,
"self": 476.6264334236039,
"children": {
"RLTrainer._checkpoint": {
"total": 3.060461730012321,
"count": 10,
"self": 3.060461730012321
}
}
},
"_update_policy": {
"total": 936.6321296901151,
"count": 239,
"self": 592.2271633920609,
"children": {
"TorchPOCAOptimizer.update": {
"total": 344.4049662980542,
"count": 7170,
"self": 344.4049662980542
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.899928510189056e-07,
"count": 1,
"self": 5.899928510189056e-07
},
"TrainerController._save_models": {
"total": 0.2715739530103747,
"count": 1,
"self": 0.000999018011498265,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2705749349988764,
"count": 1,
"self": 0.2705749349988764
}
}
}
}
}
}
}