poca-SoccerTwos / run_logs /timers.json
teresayong's picture
First Push
a1b4e96 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.0268044471740723,
"min": 2.0112318992614746,
"max": 3.2957406044006348,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 39044.359375,
"min": 13800.83203125,
"max": 109630.703125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 52.61702127659574,
"min": 40.15,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19784.0,
"min": 12792.0,
"max": 29276.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1586.674592850713,
"min": 1191.405162268078,
"max": 1606.8956111825048,
"count": 493
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 298294.82345593406,
"min": 2387.633037183873,
"max": 367569.15790483716,
"count": 493
},
"SoccerTwos.Step.mean": {
"value": 4999950.0,
"min": 9654.0,
"max": 4999950.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999950.0,
"min": 9654.0,
"max": 4999950.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.07815653830766678,
"min": -0.10160785168409348,
"max": 0.19746340811252594,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -14.615272521972656,
"min": -20.829608917236328,
"max": 39.237892150878906,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0748273953795433,
"min": -0.10606652498245239,
"max": 0.19612206518650055,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -13.99272346496582,
"min": -21.743637084960938,
"max": 39.527435302734375,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.25244278130046827,
"min": -0.5040666659673055,
"max": 0.6695000037550927,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -47.20680010318756,
"min": -47.20680010318756,
"max": 79.10739988088608,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.25244278130046827,
"min": -0.5040666659673055,
"max": 0.6695000037550927,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -47.20680010318756,
"min": -47.20680010318756,
"max": 79.10739988088608,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01579215988361587,
"min": 0.011324209309532308,
"max": 0.02333148938875335,
"count": 239
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01579215988361587,
"min": 0.011324209309532308,
"max": 0.02333148938875335,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10237656806906065,
"min": 0.0002916541343438439,
"max": 0.11561118289828301,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10237656806906065,
"min": 0.0002916541343438439,
"max": 0.11561118289828301,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10386196921269099,
"min": 0.000286358574036664,
"max": 0.11859728594621023,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10386196921269099,
"min": 0.000286358574036664,
"max": 0.11859728594621023,
"count": 239
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 239
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 239
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1728389875",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Teresa\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1728402700"
},
"total": 12824.079886899912,
"count": 1,
"self": 2.9874007999897003,
"children": {
"run_training.setup": {
"total": 0.1338965999893844,
"count": 1,
"self": 0.1338965999893844
},
"TrainerController.start_learning": {
"total": 12820.958589499933,
"count": 1,
"self": 7.54340277321171,
"children": {
"TrainerController._reset_env": {
"total": 5.910594700137153,
"count": 25,
"self": 5.910594700137153
},
"TrainerController.advance": {
"total": 12807.361433726619,
"count": 340164,
"self": 7.6218028384028,
"children": {
"env_step": {
"total": 4993.326868413016,
"count": 340164,
"self": 3857.6697461552685,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1131.1918471063254,
"count": 340164,
"self": 42.37281252234243,
"children": {
"TorchPolicy.evaluate": {
"total": 1088.819034583983,
"count": 632118,
"self": 1088.819034583983
}
}
},
"workers": {
"total": 4.465275151422247,
"count": 340164,
"self": 0.0,
"children": {
"worker_root": {
"total": 12806.358717884985,
"count": 340164,
"is_parallel": true,
"self": 9731.84806127625,
"children": {
"steps_from_proto": {
"total": 0.03374139906372875,
"count": 50,
"is_parallel": true,
"self": 0.007215699297375977,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.026525699766352773,
"count": 200,
"is_parallel": true,
"self": 0.026525699766352773
}
}
},
"UnityEnvironment.step": {
"total": 3074.4769152096706,
"count": 340164,
"is_parallel": true,
"self": 134.9302772221854,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 104.70917891338468,
"count": 340164,
"is_parallel": true,
"self": 104.70917891338468
},
"communicator.exchange": {
"total": 2408.603457110119,
"count": 340164,
"is_parallel": true,
"self": 2408.603457110119
},
"steps_from_proto": {
"total": 426.2340019639814,
"count": 680328,
"is_parallel": true,
"self": 87.98160513897892,
"children": {
"_process_rank_one_or_two_observation": {
"total": 338.2523968250025,
"count": 2721312,
"is_parallel": true,
"self": 338.2523968250025
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7806.4127624752,
"count": 340164,
"self": 54.97365895297844,
"children": {
"process_trajectory": {
"total": 1259.2771371220006,
"count": 340164,
"self": 1256.7193604218774,
"children": {
"RLTrainer._checkpoint": {
"total": 2.5577767001232132,
"count": 10,
"self": 2.5577767001232132
}
}
},
"_update_policy": {
"total": 6492.161966400221,
"count": 239,
"self": 559.923837391194,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5932.238129009027,
"count": 7173,
"self": 5932.238129009027
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.00006853044033e-07,
"count": 1,
"self": 9.00006853044033e-07
},
"TrainerController._save_models": {
"total": 0.1431573999579996,
"count": 1,
"self": 0.04142310004681349,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1017342999111861,
"count": 1,
"self": 0.1017342999111861
}
}
}
}
}
}
}