poca-SoccerTwos / run_logs /timers.json
numan966's picture
First Push
7f88340
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.4932351112365723,
"min": 2.4932351112365723,
"max": 3.295762777328491,
"count": 301
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 55449.546875,
"min": 15175.5185546875,
"max": 108702.859375,
"count": 301
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.955555555555556,
"min": 46.666666666666664,
"max": 999.0,
"count": 301
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19784.0,
"min": 15564.0,
"max": 25516.0,
"count": 301
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1424.2568962525577,
"min": 1198.381890849238,
"max": 1425.7131067735204,
"count": 255
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 256366.24132546037,
"min": 2400.034436562306,
"max": 293703.9598943941,
"count": 255
},
"SoccerTwos.Step.mean": {
"value": 3009990.0,
"min": 9774.0,
"max": 3009990.0,
"count": 301
},
"SoccerTwos.Step.sum": {
"value": 3009990.0,
"min": 9774.0,
"max": 3009990.0,
"count": 301
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.03920089825987816,
"min": -0.05091611668467522,
"max": 0.19523094594478607,
"count": 301
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 7.016960620880127,
"min": -10.030474662780762,
"max": 20.29961585998535,
"count": 301
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03831464424729347,
"min": -0.051254600286483765,
"max": 0.2010337859392166,
"count": 301
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.858321189880371,
"min": -10.097156524658203,
"max": 21.403934478759766,
"count": 301
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 301
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 301
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.08670391317186409,
"min": -0.6047799976542592,
"max": 0.5424400006021772,
"count": 301
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 15.520000457763672,
"min": -45.38840043544769,
"max": 41.24939972162247,
"count": 301
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.08670391317186409,
"min": -0.6047799976542592,
"max": 0.5424400006021772,
"count": 301
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 15.520000457763672,
"min": -45.38840043544769,
"max": 41.24939972162247,
"count": 301
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 301
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 301
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019933591484247396,
"min": 0.0102610221428525,
"max": 0.022171040313939253,
"count": 142
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019933591484247396,
"min": 0.0102610221428525,
"max": 0.022171040313939253,
"count": 142
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08567158604661623,
"min": 3.67103888265774e-07,
"max": 0.09261030455430348,
"count": 142
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08567158604661623,
"min": 3.67103888265774e-07,
"max": 0.09261030455430348,
"count": 142
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08662142530083657,
"min": 5.43677314605399e-07,
"max": 0.094602270424366,
"count": 142
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08662142530083657,
"min": 5.43677314605399e-07,
"max": 0.094602270424366,
"count": 142
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 142
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 142
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 142
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 142
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 142
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 142
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676200733",
"python_version": "3.9.16 (main, Jan 11 2023, 10:02:19) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/numanmohammad/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos_4 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1676217623"
},
"total": 16889.386610081,
"count": 1,
"self": 0.5301684959995328,
"children": {
"run_training.setup": {
"total": 0.055927148999999954,
"count": 1,
"self": 0.055927148999999954
},
"TrainerController.start_learning": {
"total": 16888.800514436003,
"count": 1,
"self": 6.057592861558078,
"children": {
"TrainerController._reset_env": {
"total": 4.030374796997214,
"count": 16,
"self": 4.030374796997214
},
"TrainerController.advance": {
"total": 16878.280215613453,
"count": 198148,
"self": 6.3010103682026966,
"children": {
"env_step": {
"total": 4678.166740248991,
"count": 198148,
"self": 3832.033327977566,
"children": {
"SubprocessEnvManager._take_step": {
"total": 842.3676424640794,
"count": 198148,
"self": 42.37373407699647,
"children": {
"TorchPolicy.evaluate": {
"total": 799.9939083870829,
"count": 383684,
"self": 799.9939083870829
}
}
},
"workers": {
"total": 3.765769807345876,
"count": 198147,
"self": 0.0,
"children": {
"worker_root": {
"total": 16874.310074276,
"count": 198147,
"is_parallel": true,
"self": 13763.786149159852,
"children": {
"steps_from_proto": {
"total": 0.041061649997048644,
"count": 32,
"is_parallel": true,
"self": 0.00884137500270965,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03222027499433899,
"count": 128,
"is_parallel": true,
"self": 0.03222027499433899
}
}
},
"UnityEnvironment.step": {
"total": 3110.4828634661503,
"count": 198147,
"is_parallel": true,
"self": 173.0467596460535,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 117.55066479009602,
"count": 198147,
"is_parallel": true,
"self": 117.55066479009602
},
"communicator.exchange": {
"total": 2292.3341577065876,
"count": 198147,
"is_parallel": true,
"self": 2292.3341577065876
},
"steps_from_proto": {
"total": 527.5512813234134,
"count": 396294,
"is_parallel": true,
"self": 113.85399624045453,
"children": {
"_process_rank_one_or_two_observation": {
"total": 413.69728508295884,
"count": 1585176,
"is_parallel": true,
"self": 413.69728508295884
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 12193.81246499626,
"count": 198147,
"self": 40.42644453520188,
"children": {
"process_trajectory": {
"total": 1284.6019422140507,
"count": 198147,
"self": 1282.157113324051,
"children": {
"RLTrainer._checkpoint": {
"total": 2.444828889999826,
"count": 6,
"self": 2.444828889999826
}
}
},
"_update_policy": {
"total": 10868.784078247007,
"count": 142,
"self": 640.584827876004,
"children": {
"TorchPOCAOptimizer.update": {
"total": 10228.199250371003,
"count": 4260,
"self": 10228.199250371003
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.218997102929279e-06,
"count": 1,
"self": 5.218997102929279e-06
},
"TrainerController._save_models": {
"total": 0.4323259449993202,
"count": 1,
"self": 0.0021620840016112197,
"children": {
"RLTrainer._checkpoint": {
"total": 0.430163860997709,
"count": 1,
"self": 0.430163860997709
}
}
}
}
}
}
}