poca-SoccerTwos / run_logs /timers.json
Brumocas's picture
First Push
4126fb2 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6256906986236572,
"min": 1.5151653289794922,
"max": 3.2957353591918945,
"count": 1154
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33138.078125,
"min": 13666.1015625,
"max": 118539.6875,
"count": 1154
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 57.37931034482759,
"min": 40.30578512396694,
"max": 999.0,
"count": 1154
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19968.0,
"min": 15708.0,
"max": 25472.0,
"count": 1154
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1645.624387676063,
"min": 1199.5057223791089,
"max": 1669.7630272282447,
"count": 1140
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 286338.64345563494,
"min": 2404.2031305023047,
"max": 375012.08373040194,
"count": 1140
},
"SoccerTwos.Step.mean": {
"value": 11539989.0,
"min": 9520.0,
"max": 11539989.0,
"count": 1154
},
"SoccerTwos.Step.sum": {
"value": 11539989.0,
"min": 9520.0,
"max": 11539989.0,
"count": 1154
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.019805457442998886,
"min": -0.11690514534711838,
"max": 0.19465824961662292,
"count": 1154
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.4659550189971924,
"min": -23.752120971679688,
"max": 31.145320892333984,
"count": 1154
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02072952315211296,
"min": -0.11703426390886307,
"max": 0.1865769624710083,
"count": 1154
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.627666473388672,
"min": -24.734535217285156,
"max": 29.852313995361328,
"count": 1154
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1154
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1154
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0650217124394008,
"min": -0.47911351496303406,
"max": 0.46066511855569,
"count": 1154
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 11.378799676895142,
"min": -63.05960011482239,
"max": 51.5588002204895,
"count": 1154
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0650217124394008,
"min": -0.47911351496303406,
"max": 0.46066511855569,
"count": 1154
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 11.378799676895142,
"min": -63.05960011482239,
"max": 51.5588002204895,
"count": 1154
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1154
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1154
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015679552406072616,
"min": 0.010357148100592895,
"max": 0.026769747784904516,
"count": 557
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015679552406072616,
"min": 0.010357148100592895,
"max": 0.026769747784904516,
"count": 557
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09584968760609627,
"min": 7.58380077968468e-05,
"max": 0.1247166780134042,
"count": 557
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09584968760609627,
"min": 7.58380077968468e-05,
"max": 0.1247166780134042,
"count": 557
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.096784957498312,
"min": 7.545345603527191e-05,
"max": 0.12721291308601698,
"count": 557
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.096784957498312,
"min": 7.545345603527191e-05,
"max": 0.12721291308601698,
"count": 557
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 557
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 557
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 557
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 557
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 557
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 557
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731096002",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/home/bruno/.local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1731108295"
},
"total": 12293.145396513999,
"count": 1,
"self": 0.10250656699827232,
"children": {
"run_training.setup": {
"total": 0.04124298799979442,
"count": 1,
"self": 0.04124298799979442
},
"TrainerController.start_learning": {
"total": 12293.001646959001,
"count": 1,
"self": 12.606341991941008,
"children": {
"TrainerController._reset_env": {
"total": 3.6790765220048343,
"count": 58,
"self": 3.6790765220048343
},
"TrainerController.advance": {
"total": 12276.601408473056,
"count": 796136,
"self": 14.037701958906837,
"children": {
"env_step": {
"total": 9077.855338799312,
"count": 796136,
"self": 6440.073907020722,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2629.75552052446,
"count": 796136,
"self": 75.34004229852508,
"children": {
"TorchPolicy.evaluate": {
"total": 2554.4154782259347,
"count": 1453228,
"self": 2554.4154782259347
}
}
},
"workers": {
"total": 8.025911254130278,
"count": 796135,
"self": 0.0,
"children": {
"worker_root": {
"total": 12271.393248586126,
"count": 796135,
"is_parallel": true,
"self": 7142.085683102685,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003926381000383117,
"count": 2,
"is_parallel": true,
"self": 0.0010260030003337306,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002900378000049386,
"count": 8,
"is_parallel": true,
"self": 0.002900378000049386
}
}
},
"UnityEnvironment.step": {
"total": 0.037870615999963775,
"count": 1,
"is_parallel": true,
"self": 0.0008947249998527695,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006014400000822206,
"count": 1,
"is_parallel": true,
"self": 0.0006014400000822206
},
"communicator.exchange": {
"total": 0.03402241999992839,
"count": 1,
"is_parallel": true,
"self": 0.03402241999992839
},
"steps_from_proto": {
"total": 0.0023520310001003963,
"count": 2,
"is_parallel": true,
"self": 0.00045028300019112066,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019017479999092757,
"count": 8,
"is_parallel": true,
"self": 0.0019017479999092757
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5129.247043940451,
"count": 796134,
"is_parallel": true,
"self": 302.1264598935504,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 172.57972834578413,
"count": 796134,
"is_parallel": true,
"self": 172.57972834578413
},
"communicator.exchange": {
"total": 3833.6327489276505,
"count": 796134,
"is_parallel": true,
"self": 3833.6327489276505
},
"steps_from_proto": {
"total": 820.9081067734655,
"count": 1592268,
"is_parallel": true,
"self": 164.48092725723473,
"children": {
"_process_rank_one_or_two_observation": {
"total": 656.4271795162308,
"count": 6369072,
"is_parallel": true,
"self": 656.4271795162308
}
}
}
}
},
"steps_from_proto": {
"total": 0.060521542990500166,
"count": 114,
"is_parallel": true,
"self": 0.011821879970739246,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04869966301976092,
"count": 456,
"is_parallel": true,
"self": 0.04869966301976092
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3184.7083677148376,
"count": 796135,
"self": 88.46047877437559,
"children": {
"process_trajectory": {
"total": 1355.7992229554516,
"count": 796135,
"self": 1352.8841532714537,
"children": {
"RLTrainer._checkpoint": {
"total": 2.915069683997899,
"count": 23,
"self": 2.915069683997899
}
}
},
"_update_policy": {
"total": 1740.4486659850104,
"count": 557,
"self": 766.7064941201447,
"children": {
"TorchPOCAOptimizer.update": {
"total": 973.7421718648657,
"count": 16710,
"self": 973.7421718648657
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.469992174766958e-07,
"count": 1,
"self": 5.469992174766958e-07
},
"TrainerController._save_models": {
"total": 0.11481942499995057,
"count": 1,
"self": 0.001115167999159894,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11370425700079068,
"count": 1,
"self": 0.11370425700079068
}
}
}
}
}
}
}