poca-SoccerTwos / run_logs /timers.json
SAL83's picture
First Push`
141fc9c
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.252450466156006,
"min": 3.239375352859497,
"max": 3.2957582473754883,
"count": 23
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 46523.05078125,
"min": 29084.0859375,
"max": 105464.265625,
"count": 23
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 836.1666666666666,
"min": 390.38461538461536,
"max": 999.0,
"count": 23
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20068.0,
"min": 15308.0,
"max": 26368.0,
"count": 23
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1194.1021612600794,
"min": 1194.1021612600794,
"max": 1199.4501758937838,
"count": 16
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4776.408645040317,
"min": 2392.5884358698113,
"max": 19142.079010214882,
"count": 16
},
"SoccerTwos.Step.mean": {
"value": 229464.0,
"min": 9202.0,
"max": 229464.0,
"count": 23
},
"SoccerTwos.Step.sum": {
"value": 229464.0,
"min": 9202.0,
"max": 229464.0,
"count": 23
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.010462536476552486,
"min": -0.05356094613671303,
"max": -0.010462536476552486,
"count": 23
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.12555043399333954,
"min": -0.7497747540473938,
"max": -0.11435933411121368,
"count": 23
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.007407273631542921,
"min": -0.059053339064121246,
"max": -0.007407273631542921,
"count": 23
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.0888872817158699,
"min": -0.762631893157959,
"max": -0.0888872817158699,
"count": 23
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 23
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 23
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.3333333333333333,
"min": -0.3333333333333333,
"max": 0.1102000003059705,
"count": 23
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.0,
"min": -4.286400005221367,
"max": 1.3224000036716461,
"count": 23
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.3333333333333333,
"min": -0.3333333333333333,
"max": 0.1102000003059705,
"count": 23
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.0,
"min": -4.286400005221367,
"max": 1.3224000036716461,
"count": 23
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 23
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01595255153176064,
"min": 0.014761542660805087,
"max": 0.018002025652094744,
"count": 10
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01595255153176064,
"min": 0.014761542660805087,
"max": 0.018002025652094744,
"count": 10
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.004805158156280716,
"min": 5.288229215996883e-05,
"max": 0.005204846151173115,
"count": 10
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.004805158156280716,
"min": 5.288229215996883e-05,
"max": 0.005204846151173115,
"count": 10
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.004873709209884207,
"min": 5.408759219183897e-05,
"max": 0.004873709209884207,
"count": 10
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.004873709209884207,
"min": 5.408759219183897e-05,
"max": 0.004873709209884207,
"count": 10
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 10
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 10
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 10
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 10
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 10
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679952980",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "E:\\Programme\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1679953944"
},
"total": 964.2253236,
"count": 1,
"self": 0.0079384000000573,
"children": {
"run_training.setup": {
"total": 0.10692509999999977,
"count": 1,
"self": 0.10692509999999977
},
"TrainerController.start_learning": {
"total": 964.1104601,
"count": 1,
"self": 0.4366970999997193,
"children": {
"TrainerController._reset_env": {
"total": 3.782034099999967,
"count": 2,
"self": 3.782034099999967
},
"TrainerController.advance": {
"total": 959.6502606000003,
"count": 15638,
"self": 0.4020585999950299,
"children": {
"env_step": {
"total": 347.34366680000284,
"count": 15638,
"self": 266.7614842999968,
"children": {
"SubprocessEnvManager._take_step": {
"total": 80.28700820000329,
"count": 15638,
"self": 2.525830800001472,
"children": {
"TorchPolicy.evaluate": {
"total": 77.76117740000181,
"count": 31056,
"self": 77.76117740000181
}
}
},
"workers": {
"total": 0.29517430000270206,
"count": 15638,
"self": 0.0,
"children": {
"worker_root": {
"total": 952.933439000007,
"count": 15638,
"is_parallel": true,
"self": 740.6339967000042,
"children": {
"steps_from_proto": {
"total": 0.0046097000000728805,
"count": 4,
"is_parallel": true,
"self": 0.0009336999999676188,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0036760000001052617,
"count": 16,
"is_parallel": true,
"self": 0.0036760000001052617
}
}
},
"UnityEnvironment.step": {
"total": 212.2948326000027,
"count": 15638,
"is_parallel": true,
"self": 10.84308829999847,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.761387600003554,
"count": 15638,
"is_parallel": true,
"self": 9.761387600003554
},
"communicator.exchange": {
"total": 156.46432419999547,
"count": 15638,
"is_parallel": true,
"self": 156.46432419999547
},
"steps_from_proto": {
"total": 35.22603250000521,
"count": 31276,
"is_parallel": true,
"self": 6.827983600008803,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.398048899996407,
"count": 125104,
"is_parallel": true,
"self": 28.398048899996407
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 611.9045352000024,
"count": 15638,
"self": 3.1723605999962956,
"children": {
"process_trajectory": {
"total": 83.39987310000596,
"count": 15638,
"self": 83.39987310000596
},
"_update_policy": {
"total": 525.3323015000002,
"count": 11,
"self": 45.60896249999962,
"children": {
"TorchPOCAOptimizer.update": {
"total": 479.7233390000006,
"count": 303,
"self": 479.7233390000006
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.999999974752427e-07,
"count": 1,
"self": 9.999999974752427e-07
},
"TrainerController._save_models": {
"total": 0.24146730000006755,
"count": 1,
"self": 0.0032110000000784567,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2382562999999891,
"count": 1,
"self": 0.2382562999999891
}
}
}
}
}
}
}