Mespil_Rangers2 / run_logs /timers.json
hectorjelly's picture
Second Push2`
6d8d4da
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5533990859985352,
"min": 1.5243241786956787,
"max": 1.9740146398544312,
"count": 281
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32161.57421875,
"min": 15847.4189453125,
"max": 41691.37109375,
"count": 281
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 48.13725490196079,
"min": 41.33898305084746,
"max": 68.25,
"count": 281
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19640.0,
"min": 6968.0,
"max": 21036.0,
"count": 281
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1557.8932694610762,
"min": 1466.0225536182486,
"max": 1557.8932694610762,
"count": 281
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 317810.22697005956,
"min": 110089.1313619379,
"max": 356328.79757558525,
"count": 281
},
"SoccerTwos.Step.mean": {
"value": 7999996.0,
"min": 5199967.0,
"max": 7999996.0,
"count": 281
},
"SoccerTwos.Step.sum": {
"value": 7999996.0,
"min": 5199967.0,
"max": 7999996.0,
"count": 281
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.03293711319565773,
"min": -0.08797458559274673,
"max": 0.10747097432613373,
"count": 281
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 6.719171047210693,
"min": -15.219603538513184,
"max": 22.89131736755371,
"count": 281
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.032839883118867874,
"min": -0.08830176293849945,
"max": 0.11047522723674774,
"count": 281
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.699336051940918,
"min": -15.276205062866211,
"max": 23.53122329711914,
"count": 281
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 281
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 281
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.20517647149516086,
"min": -0.27683497256919987,
"max": 0.32227417504843414,
"count": 281
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 41.85600018501282,
"min": -52.12059968709946,
"max": 68.64439928531647,
"count": 281
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.20517647149516086,
"min": -0.27683497256919987,
"max": 0.32227417504843414,
"count": 281
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 41.85600018501282,
"min": -52.12059968709946,
"max": 68.64439928531647,
"count": 281
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 281
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 281
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016338667902649224,
"min": 0.0101395297019432,
"max": 0.02169702316944798,
"count": 136
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016338667902649224,
"min": 0.0101395297019432,
"max": 0.02169702316944798,
"count": 136
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1118941955268383,
"min": 0.10524865016341209,
"max": 0.13034348463018736,
"count": 136
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1118941955268383,
"min": 0.10524865016341209,
"max": 0.13034348463018736,
"count": 136
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11206316997607549,
"min": 0.10617038980126381,
"max": 0.13078679591417314,
"count": 136
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11206316997607549,
"min": 0.10617038980126381,
"max": 0.13078679591417314,
"count": 136
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 2.8052490652499513e-07,
"min": 2.8052490652499513e-07,
"max": 0.00010436504021167501,
"count": 136
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 2.8052490652499513e-07,
"min": 2.8052490652499513e-07,
"max": 0.00010436504021167501,
"count": 136
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10014021249999999,
"min": 0.10014021249999999,
"max": 0.15218248750000005,
"count": 136
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.10014021249999999,
"min": 0.10014021249999999,
"max": 0.15218248750000005,
"count": 136
},
"SoccerTwos.Policy.Beta.mean": {
"value": 1.4664402499999922e-05,
"min": 1.4664402499999922e-05,
"max": 0.0017459374175000004,
"count": 136
},
"SoccerTwos.Policy.Beta.sum": {
"value": 1.4664402499999922e-05,
"min": 1.4664402499999922e-05,
"max": 0.0017459374175000004,
"count": 136
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675725772",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\hecto\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=Mespil_Rangers --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1675732433"
},
"total": 6661.3305878,
"count": 1,
"self": 0.5378562999994756,
"children": {
"run_training.setup": {
"total": 0.08360659999999998,
"count": 1,
"self": 0.08360659999999998
},
"TrainerController.start_learning": {
"total": 6660.7091249000005,
"count": 1,
"self": 4.1006596999759495,
"children": {
"TrainerController._reset_env": {
"total": 4.026199999999587,
"count": 16,
"self": 4.026199999999587
},
"TrainerController.advance": {
"total": 6652.483442200026,
"count": 197138,
"self": 3.7749283999291947,
"children": {
"env_step": {
"total": 2724.1751109000315,
"count": 197138,
"self": 2133.1191382002185,
"children": {
"SubprocessEnvManager._take_step": {
"total": 588.6807736998038,
"count": 197138,
"self": 20.955833299764322,
"children": {
"TorchPolicy.evaluate": {
"total": 567.7249404000395,
"count": 351458,
"self": 567.7249404000395
}
}
},
"workers": {
"total": 2.375199000009239,
"count": 197138,
"self": 0.0,
"children": {
"worker_root": {
"total": 6652.094254000057,
"count": 197138,
"is_parallel": true,
"self": 4926.447215599936,
"children": {
"steps_from_proto": {
"total": 0.021310700000425786,
"count": 32,
"is_parallel": true,
"self": 0.004511000001696797,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01679969999872899,
"count": 128,
"is_parallel": true,
"self": 0.01679969999872899
}
}
},
"UnityEnvironment.step": {
"total": 1725.6257277001214,
"count": 197138,
"is_parallel": true,
"self": 82.67076649983801,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.10730750014284,
"count": 197138,
"is_parallel": true,
"self": 78.10730750014284
},
"communicator.exchange": {
"total": 1330.510779799869,
"count": 197138,
"is_parallel": true,
"self": 1330.510779799869
},
"steps_from_proto": {
"total": 234.33687390027163,
"count": 394276,
"is_parallel": true,
"self": 48.47007940044537,
"children": {
"_process_rank_one_or_two_observation": {
"total": 185.86679449982626,
"count": 1577104,
"is_parallel": true,
"self": 185.86679449982626
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3924.5334029000646,
"count": 197138,
"self": 27.740514800213077,
"children": {
"process_trajectory": {
"total": 1140.2175799998477,
"count": 197138,
"self": 1139.6040892998476,
"children": {
"RLTrainer._checkpoint": {
"total": 0.613490699999943,
"count": 6,
"self": 0.613490699999943
}
}
},
"_update_policy": {
"total": 2756.575308100004,
"count": 136,
"self": 367.88587230002395,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2388.68943579998,
"count": 4080,
"self": 2388.68943579998
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999998731771484e-07,
"count": 1,
"self": 6.999998731771484e-07
},
"TrainerController._save_models": {
"total": 0.09882229999948322,
"count": 1,
"self": 0.00831449999986944,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09050779999961378,
"count": 1,
"self": 0.09050779999961378
}
}
}
}
}
}
}