{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 2.6528384685516357, "min": 2.4322147369384766, "max": 2.65816330909729, "count": 117 }, "SoccerTwos.Policy.Entropy.sum": { "value": 537613.625, "min": 310775.375, "max": 549801.1875, "count": 117 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 54.08498896247241, "min": 54.08498896247241, "max": 62.84418901660281, "count": 117 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 196004.0, "min": 119788.0, "max": 197784.0, "count": 117 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1614.3367746305064, "min": 1578.8003440979692, "max": 1622.8726740188145, "count": 117 }, "SoccerTwos.Self-play.ELO.sum": { "value": 2925178.235630478, "min": 1660215.0605133465, "max": 2925178.235630478, "count": 117 }, "SoccerTwos.Step.mean": { "value": 199999990.0, "min": 188399962.0, "max": 199999990.0, "count": 117 }, "SoccerTwos.Step.sum": { "value": 199999990.0, "min": 188399962.0, "max": 199999990.0, "count": 117 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.019491881132125854, "min": -0.04701324179768562, "max": 0.01850668340921402, "count": 117 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -35.33877944946289, "min": -78.70016479492188, "max": 30.461999893188477, "count": 117 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.019520428031682968, "min": -0.04731886833906174, "max": 0.019112171605229378, "count": 117 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -35.39053726196289, "min": -79.21178436279297, "max": 31.458635330200195, "count": 117 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 117 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 117 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.0214473244503461, "min": -0.10128233249576588, "max": 0.04053535807610717, "count": 117 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -38.88399922847748, "min": -180.58639883995056, "max": 66.7211993932724, "count": 117 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.0214473244503461, "min": -0.10128233249576588, "max": 0.04053535807610717, "count": 117 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -38.88399922847748, "min": -180.58639883995056, "max": 66.7211993932724, "count": 117 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.01666612112821895, "min": 0.014369342826151599, "max": 0.020751501047464746, "count": 117 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.08333060564109473, "min": 0.0393741226347629, "max": 0.10375750523732373, "count": 117 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.07812577520807584, "min": 0.07085908461983005, "max": 0.08086237917343776, "count": 117 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.3906288760403792, "min": 0.15153703168034555, "max": 0.4043118958671888, "count": 117 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.07841956729690233, "min": 0.0710975918918848, "max": 0.0811544980108738, "count": 117 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.39209783648451163, "min": 0.152129290252924, "max": 0.405772490054369, "count": 117 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 4.699177655409736e-08, "min": 4.699177655409736e-08, "max": 1.1630134184980004e-05, "count": 117 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 2.349588827704868e-07, "min": 2.349588827704868e-07, "max": 5.7790140105165514e-05, "count": 117 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.2000000000000001, "min": 0.20000000000000007, "max": 0.2000000000000001, "count": 117 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 1.0000000000000004, "min": 0.40000000000000013, "max": 1.0000000000000004, "count": 117 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.010000000000000002, "min": 0.010000000000000002, "max": 0.010000000000000002, "count": 117 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.05000000000000001, "min": 0.020000000000000004, "max": 0.05000000000000001, "count": 117 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 117 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 117 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1678222722", "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", "command_line_arguments": "/home/jonatan/PycharmProjects/HuggingFaceDLUnit7/venv3.9/bin/mlagents-learn ./config/poca/0045.yaml --env=ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=0045 --num-envs=4 --no-graphics --resume", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.13.0+cu117", "numpy_version": "1.21.2", "end_time_seconds": "1678230611" }, "total": 7889.465192068001, "count": 1, "self": 0.2696227580017876, "children": { "run_training.setup": { "total": 0.01664846600033343, "count": 1, "self": 0.01664846600033343 }, "TrainerController.start_learning": { "total": 7889.178920843999, "count": 1, "self": 10.752363069383136, "children": { "TrainerController._reset_env": { "total": 3.463714202025585, "count": 60, "self": 3.463714202025585 }, "TrainerController.advance": { "total": 7874.500197356592, "count": 559789, "self": 9.39329748975797, "children": { "env_step": { "total": 4144.548156110133, "count": 559789, "self": 1336.6384269962437, "children": { "SubprocessEnvManager._take_step": { "total": 2801.1750146099694, "count": 817310, "self": 68.20674589369446, "children": { "TorchPolicy.evaluate": { "total": 2732.968268716275, "count": 1478846, "self": 2732.968268716275 } } }, "workers": { "total": 6.734714503920259, "count": 559789, "self": 0.0, "children": { "worker_root": { "total": 31516.81156241736, "count": 817176, "is_parallel": true, "self": 22518.382912696936, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0035886910009139683, "count": 2, "is_parallel": true, "self": 0.0007174489983299281, "children": { "_process_rank_one_or_two_observation": { "total": 0.00287124200258404, "count": 8, "is_parallel": true, "self": 0.00287124200258404 } } }, "UnityEnvironment.step": { "total": 0.023859431999881053, "count": 1, "is_parallel": true, "self": 0.0005689329955202993, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0009388960024807602, "count": 1, "is_parallel": true, "self": 0.0009388960024807602 }, "communicator.exchange": { "total": 0.020894899000268197, "count": 1, "is_parallel": true, "self": 0.020894899000268197 }, "steps_from_proto": { "total": 0.0014567040016117971, "count": 2, "is_parallel": true, "self": 0.0002599760045995936, "children": { "_process_rank_one_or_two_observation": { "total": 0.0011967279970122036, "count": 8, "is_parallel": true, "self": 0.0011967279970122036 } } } } } } }, "steps_from_proto": { "total": 0.538976262039796, "count": 472, "is_parallel": true, "self": 0.08545314905495616, "children": { "_process_rank_one_or_two_observation": { "total": 0.4535231129848398, "count": 1888, "is_parallel": true, "self": 0.4535231129848398 } } }, "UnityEnvironment.step": { "total": 8997.889673458383, "count": 817175, "is_parallel": true, "self": 629.6915362030704, "children": { "UnityEnvironment._generate_step_input": { "total": 401.58648914375226, "count": 817175, "is_parallel": true, "self": 401.58648914375226 }, "communicator.exchange": { "total": 6419.922019215028, "count": 817175, "is_parallel": true, "self": 6419.922019215028 }, "steps_from_proto": { "total": 1546.6896288965327, "count": 1634350, "is_parallel": true, "self": 265.2149146822085, "children": { "_process_rank_one_or_two_observation": { "total": 1281.4747142143242, "count": 6537400, "is_parallel": true, "self": 1281.4747142143242 } } } } } } } } } } }, "trainer_advance": { "total": 3720.558743756701, "count": 559789, "self": 97.07802265732244, "children": { "process_trajectory": { "total": 1635.727193868417, "count": 559789, "self": 1632.8433832344126, "children": { "RLTrainer._checkpoint": { "total": 2.883810634004476, "count": 6, "self": 2.883810634004476 } } }, "_update_policy": { "total": 1987.7535272309615, "count": 566, "self": 1322.1454499153187, "children": { "TorchPOCAOptimizer.update": { "total": 665.6080773156427, "count": 16980, "self": 665.6080773156427 } } } } } } }, "trainer_threads": { "total": 5.509973561856896e-07, "count": 1, "self": 5.509973561856896e-07 }, "TrainerController._save_models": { "total": 0.46264566500030924, "count": 1, "self": 0.017148974002338946, "children": { "RLTrainer._checkpoint": { "total": 0.4454966909979703, "count": 1, "self": 0.4454966909979703 } } } } } } }