poca-SoccerTwos / run_logs /timers.json
hartman23's picture
Initial commit
0577018 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.3980586528778076,
"min": 1.3634949922561646,
"max": 3.2957286834716797,
"count": 1923
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27916.435546875,
"min": 17922.47265625,
"max": 121982.59375,
"count": 1923
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.142857142857146,
"min": 42.06086956521739,
"max": 999.0,
"count": 1923
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19708.0,
"min": 16296.0,
"max": 25048.0,
"count": 1923
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1633.4859769363852,
"min": 1185.2219053093295,
"max": 1675.201217289502,
"count": 1915
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 297294.4478024221,
"min": 2371.7281019661887,
"max": 365594.8613591246,
"count": 1915
},
"SoccerTwos.Step.mean": {
"value": 19229985.0,
"min": 9540.0,
"max": 19229985.0,
"count": 1923
},
"SoccerTwos.Step.sum": {
"value": 19229985.0,
"min": 9540.0,
"max": 19229985.0,
"count": 1923
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.007346347905695438,
"min": -0.13940945267677307,
"max": 0.19194668531417847,
"count": 1923
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.3370352983474731,
"min": -21.46905517578125,
"max": 27.089494705200195,
"count": 1923
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.006368838716298342,
"min": -0.1394197791814804,
"max": 0.18767111003398895,
"count": 1923
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.1591286659240723,
"min": -21.470645904541016,
"max": 25.7528076171875,
"count": 1923
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1923
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1923
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.004892306995915843,
"min": -0.625,
"max": 0.5011166661977768,
"count": 1923
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.8903998732566833,
"min": -56.993600368499756,
"max": 58.75800025463104,
"count": 1923
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.004892306995915843,
"min": -0.625,
"max": 0.5011166661977768,
"count": 1923
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.8903998732566833,
"min": -56.993600368499756,
"max": 58.75800025463104,
"count": 1923
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1923
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1923
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01572740066718931,
"min": 0.009356382608045048,
"max": 0.024892098117076482,
"count": 929
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01572740066718931,
"min": 0.009356382608045048,
"max": 0.024892098117076482,
"count": 929
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09969830686847368,
"min": 0.00018726339743201,
"max": 0.12347314481933912,
"count": 929
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09969830686847368,
"min": 0.00018726339743201,
"max": 0.12347314481933912,
"count": 929
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10096026236812274,
"min": 0.00018778860685415567,
"max": 0.12593497683604557,
"count": 929
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10096026236812274,
"min": 0.00018778860685415567,
"max": 0.12593497683604557,
"count": 929
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 929
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 929
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 929
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 929
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 929
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 929
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1736445161",
"python_version": "3.10.12 (main, Jan 9 2025, 09:23:36) [Clang 16.0.0 (clang-1600.0.26.6)]",
"command_line_arguments": "/Users/hartman23/.pyenv/versions/hugging_face_unit7/bin/mlagents-learn config/poca/SoccerTwos.yaml --env=training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1736797963"
},
"total": 63518.89755920798,
"count": 1,
"self": 0.10883466608356684,
"children": {
"run_training.setup": {
"total": 0.058410916943103075,
"count": 1,
"self": 0.058410916943103075
},
"TrainerController.start_learning": {
"total": 63518.730313624954,
"count": 1,
"self": 11.427141123102047,
"children": {
"TrainerController._reset_env": {
"total": 6.660674122045748,
"count": 97,
"self": 6.660674122045748
},
"TrainerController.advance": {
"total": 63500.46627292177,
"count": 1314866,
"self": 10.511858905549161,
"children": {
"env_step": {
"total": 52705.449174563866,
"count": 1314866,
"self": 51124.18154920405,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1573.8389198563527,
"count": 1314866,
"self": 53.12486745265778,
"children": {
"TorchPolicy.evaluate": {
"total": 1520.714052403695,
"count": 2422794,
"self": 1520.714052403695
}
}
},
"workers": {
"total": 7.428705503465608,
"count": 1314865,
"self": 0.0,
"children": {
"worker_root": {
"total": 63498.35339734331,
"count": 1314865,
"is_parallel": true,
"self": 14027.942053620704,
"children": {
"steps_from_proto": {
"total": 0.12692924542352557,
"count": 194,
"is_parallel": true,
"self": 0.016379294567741454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.11054995085578412,
"count": 776,
"is_parallel": true,
"self": 0.11054995085578412
}
}
},
"UnityEnvironment.step": {
"total": 49470.28441447718,
"count": 1314865,
"is_parallel": true,
"self": 131.70878516847733,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 910.4375813604565,
"count": 1314865,
"is_parallel": true,
"self": 910.4375813604565
},
"communicator.exchange": {
"total": 46831.39487738756,
"count": 1314865,
"is_parallel": true,
"self": 46831.39487738756
},
"steps_from_proto": {
"total": 1596.7431705606868,
"count": 2629730,
"is_parallel": true,
"self": 189.29334452899639,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1407.4498260316905,
"count": 10518920,
"is_parallel": true,
"self": 1407.4498260316905
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 10784.505239452352,
"count": 1314865,
"self": 89.22563077264931,
"children": {
"process_trajectory": {
"total": 2218.2159456931986,
"count": 1314865,
"self": 2211.1133135244017,
"children": {
"RLTrainer._checkpoint": {
"total": 7.102632168796845,
"count": 38,
"self": 7.102632168796845
}
}
},
"_update_policy": {
"total": 8477.063662986504,
"count": 929,
"self": 1186.083405618323,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7290.980257368181,
"count": 27870,
"self": 7290.980257368181
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.5797787606716156e-07,
"count": 1,
"self": 4.5797787606716156e-07
},
"TrainerController._save_models": {
"total": 0.17622500006109476,
"count": 1,
"self": 0.0035456251353025436,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17267937492579222,
"count": 1,
"self": 0.17267937492579222
}
}
}
}
}
}
}