{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.2832632064819336, "min": 1.2684003114700317, "max": 3.2957401275634766, "count": 600 }, "SoccerTwos.Policy.Entropy.sum": { "value": 25213.5546875, "min": 23637.341796875, "max": 105463.6875, "count": 600 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 50.06122448979592, "min": 40.16393442622951, "max": 999.0, "count": 600 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19624.0, "min": 9148.0, "max": 30308.0, "count": 600 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1612.5714019639524, "min": 1194.2640186974977, "max": 1630.7290462503897, "count": 597 }, "SoccerTwos.Self-play.ELO.sum": { "value": 316063.9947849347, "min": 2392.1594197128456, "max": 395496.9839687504, "count": 597 }, "SoccerTwos.Step.mean": { "value": 5999978.0, "min": 9066.0, "max": 5999978.0, "count": 600 }, "SoccerTwos.Step.sum": { "value": 5999978.0, "min": 9066.0, "max": 5999978.0, "count": 600 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.019455071538686752, "min": -0.13337911665439606, "max": 0.17903470993041992, "count": 600 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -3.8131940364837646, "min": -21.740795135498047, "max": 29.003623962402344, "count": 600 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.019237015396356583, "min": -0.1282082349061966, "max": 0.18147912621498108, "count": 600 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -3.7704551219940186, "min": -20.89794158935547, "max": 28.931102752685547, "count": 600 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 600 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 600 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.01065918255825432, "min": -0.5714285714285714, "max": 0.4607020406698694, "count": 600 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -2.0891997814178467, "min": -56.986400067806244, "max": 51.64679992198944, "count": 600 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.01065918255825432, "min": -0.5714285714285714, "max": 0.4607020406698694, "count": 600 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -2.0891997814178467, "min": -56.986400067806244, "max": 51.64679992198944, "count": 600 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 600 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 600 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.017776346167859933, "min": 0.012005063751712442, "max": 0.023718134961867084, "count": 289 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.017776346167859933, "min": 0.012005063751712442, "max": 0.023718134961867084, "count": 289 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.1329140156507492, "min": 0.00024713672003902804, "max": 0.13567976752916971, "count": 289 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.1329140156507492, "min": 0.00024713672003902804, "max": 0.13567976752916971, "count": 289 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.13346647222836813, "min": 0.0002482580409074823, "max": 0.1363338639338811, "count": 289 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.13346647222836813, "min": 0.0002482580409074823, "max": 0.1363338639338811, "count": 289 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 4.1676658333334297e-07, "min": 4.1676658333334297e-07, "max": 0.0004977303337872668, "count": 289 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 4.1676658333334297e-07, "min": 4.1676658333334297e-07, "max": 0.0004977303337872668, "count": 289 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.10012499999999996, "min": 0.10012499999999996, "max": 0.24931909999999988, "count": 289 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.10012499999999996, "min": 0.10012499999999996, "max": 0.24931909999999988, "count": 289 }, "SoccerTwos.Policy.Beta.mean": { "value": 1.4575000000000109e-05, "min": 1.4575000000000109e-05, "max": 0.00547507906, "count": 289 }, "SoccerTwos.Policy.Beta.sum": { "value": 1.4575000000000109e-05, "min": 1.4575000000000109e-05, "max": 0.00547507906, "count": 289 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1695400149", "python_version": "3.9.18 (main, Sep 11 2023, 13:41:44) \n[GCC 11.2.0]", "command_line_arguments": "/home/bencla02/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1695424007" }, "total": 23857.254660554, "count": 1, "self": 1.8443036030039366, "children": { "run_training.setup": { "total": 0.06774241900029665, "count": 1, "self": 0.06774241900029665 }, "TrainerController.start_learning": { "total": 23855.342614531997, "count": 1, "self": 9.3236760759537, "children": { "TrainerController._reset_env": { "total": 22.473410881995733, "count": 30, "self": 22.473410881995733 }, "TrainerController.advance": { "total": 23821.58676374805, "count": 412951, "self": 9.753795432305196, "children": { "env_step": { "total": 20328.32970435258, "count": 412951, "self": 16378.638990021069, "children": { "SubprocessEnvManager._take_step": { "total": 3944.168110118123, "count": 412951, "self": 58.450217890976546, "children": { "TorchPolicy.evaluate": { "total": 3885.7178922271464, "count": 755220, "self": 3885.7178922271464 } } }, "workers": { "total": 5.522604213390423, "count": 412951, "self": 0.0, "children": { "worker_root": { "total": 23796.8697500605, "count": 412951, "is_parallel": true, "self": 8782.898021341722, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0073635650001051545, "count": 2, "is_parallel": true, "self": 0.001946675999533909, "children": { "_process_rank_one_or_two_observation": { "total": 0.0054168890005712456, "count": 8, "is_parallel": true, "self": 0.0054168890005712456 } } }, "UnityEnvironment.step": { "total": 0.07333069100013745, "count": 1, "is_parallel": true, "self": 0.0016978019993985072, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0012270420002096216, "count": 1, "is_parallel": true, "self": 0.0012270420002096216 }, "communicator.exchange": { "total": 0.06524673000012626, "count": 1, "is_parallel": true, "self": 0.06524673000012626 }, "steps_from_proto": { "total": 0.005159117000403057, "count": 2, "is_parallel": true, "self": 0.0012761110010615084, "children": { "_process_rank_one_or_two_observation": { "total": 0.003883005999341549, "count": 8, "is_parallel": true, "self": 0.003883005999341549 } } } } } } }, "UnityEnvironment.step": { "total": 15013.84787381278, "count": 412950, "is_parallel": true, "self": 603.0381175974526, "children": { "UnityEnvironment._generate_step_input": { "total": 417.71546232640776, "count": 412950, "is_parallel": true, "self": 417.71546232640776 }, "communicator.exchange": { "total": 12178.089629355487, "count": 412950, "is_parallel": true, "self": 12178.089629355487 }, "steps_from_proto": { "total": 1815.0046645334323, "count": 825900, "is_parallel": true, "self": 358.5539025688031, "children": { "_process_rank_one_or_two_observation": { "total": 1456.4507619646292, "count": 3303600, "is_parallel": true, "self": 1456.4507619646292 } } } } }, "steps_from_proto": { "total": 0.12385490599945115, "count": 58, "is_parallel": true, "self": 0.023857262021920178, "children": { "_process_rank_one_or_two_observation": { "total": 0.09999764397753097, "count": 232, "is_parallel": true, "self": 0.09999764397753097 } } } } } } } } }, "trainer_advance": { "total": 3483.503263963164, "count": 412951, "self": 64.74315570805948, "children": { "process_trajectory": { "total": 1856.202270903104, "count": 412951, "self": 1834.0740205301086, "children": { "RLTrainer._checkpoint": { "total": 22.128250372995353, "count": 12, "self": 22.128250372995353 } } }, "_update_policy": { "total": 1562.5578373520007, "count": 289, "self": 911.3214614978842, "children": { "TorchPOCAOptimizer.update": { "total": 651.2363758541164, "count": 8679, "self": 651.2363758541164 } } } } } } }, "trainer_threads": { "total": 8.90999217517674e-07, "count": 1, "self": 8.90999217517674e-07 }, "TrainerController._save_models": { "total": 1.9587629349989584, "count": 1, "self": 0.1290339059996768, "children": { "RLTrainer._checkpoint": { "total": 1.8297290289992816, "count": 1, "self": 1.8297290289992816 } } } } } } }