{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 3.164043664932251, "min": 3.1504082679748535, "max": 3.2957186698913574, "count": 100 }, "SoccerTwos.Policy.Entropy.sum": { "value": 55484.66796875, "min": 17966.31640625, "max": 118636.3671875, "count": 100 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 692.2857142857143, "min": 429.5, "max": 999.0, "count": 100 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19384.0, "min": 14860.0, "max": 28128.0, "count": 100 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1196.4868807812372, "min": 1195.714274014148, "max": 1202.1226653024685, "count": 79 }, "SoccerTwos.Self-play.ELO.sum": { "value": 9571.895046249898, "min": 2391.723822481543, "max": 19213.65732599318, "count": 79 }, "SoccerTwos.Step.mean": { "value": 999476.0, "min": 9082.0, "max": 999476.0, "count": 100 }, "SoccerTwos.Step.sum": { "value": 999476.0, "min": 9082.0, "max": 999476.0, "count": 100 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.0050732153467834, "min": -0.06521205604076385, "max": 0.004090228583663702, "count": 100 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -0.07102501392364502, "min": -0.8477566838264465, "max": 0.08180457353591919, "count": 100 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.004948639776557684, "min": -0.06522572785615921, "max": 0.004414226859807968, "count": 100 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -0.0692809596657753, "min": -0.8479344844818115, "max": 0.08828453719615936, "count": 100 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 100 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 100 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.21462856978178024, "min": -0.625, "max": 0.21462856978178024, "count": 100 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 3.0047999769449234, "min": -10.0, "max": 3.0983999967575073, "count": 100 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.21462856978178024, "min": -0.625, "max": 0.21462856978178024, "count": 100 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 3.0047999769449234, "min": -10.0, "max": 3.0983999967575073, "count": 100 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.019011949476165076, "min": 0.012549475398069868, "max": 0.023926622100407257, "count": 46 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.019011949476165076, "min": 0.012549475398069868, "max": 0.023926622100407257, "count": 46 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.001051949808606878, "min": 4.6546632878137945e-06, "max": 0.0048920990588764346, "count": 46 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.001051949808606878, "min": 4.6546632878137945e-06, "max": 0.0048920990588764346, "count": 46 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.001052915861752505, "min": 4.704622824647231e-06, "max": 0.005131117440760136, "count": 46 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.001052915861752505, "min": 4.704622824647231e-06, "max": 0.005131117440760136, "count": 46 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 46 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 46 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 46 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 46 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 46 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 46 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1730118741", "python_version": "3.10.12 (main, Jul 5 2023, 15:34:07) [Clang 14.0.6 ]", "command_line_arguments": "/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force", "mlagents_version": "1.2.0.dev0", "mlagents_envs_version": "1.2.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.2", "numpy_version": "1.23.5", "end_time_seconds": "1730123993" }, "total": 5251.929941541, "count": 1, "self": 0.8905895819998477, "children": { "run_training.setup": { "total": 0.0407955840000227, "count": 1, "self": 0.0407955840000227 }, "TrainerController.start_learning": { "total": 5250.998556375, "count": 1, "self": 0.8494405689270934, "children": { "TrainerController._reset_env": { "total": 5.40024495900002, "count": 5, "self": 5.40024495900002 }, "TrainerController.advance": { "total": 5244.591269847072, "count": 65147, "self": 0.783884355918417, "children": { "env_step": { "total": 4233.473686160043, "count": 65147, "self": 4060.969413991089, "children": { "SubprocessEnvManager._take_step": { "total": 171.87254303697318, "count": 65147, "self": 4.632058884001879, "children": { "TorchPolicy.evaluate": { "total": 167.2404841529713, "count": 129428, "self": 167.2404841529713 } } }, "workers": { "total": 0.6317291319809328, "count": 65147, "self": 0.0, "children": { "worker_root": { "total": 5244.4905273399, "count": 65147, "is_parallel": true, "self": 1295.7024681188686, "children": { "steps_from_proto": { "total": 0.011031582000441631, "count": 10, "is_parallel": true, "self": 0.00150404100003243, "children": { "_process_rank_one_or_two_observation": { "total": 0.009527541000409201, "count": 40, "is_parallel": true, "self": 0.009527541000409201 } } }, "UnityEnvironment.step": { "total": 3948.7770276390306, "count": 65147, "is_parallel": true, "self": 10.884332605109194, "children": { "UnityEnvironment._generate_step_input": { "total": 66.44501918499651, "count": 65147, "is_parallel": true, "self": 66.44501918499651 }, "communicator.exchange": { "total": 3734.903470397952, "count": 65147, "is_parallel": true, "self": 3734.903470397952 }, "steps_from_proto": { "total": 136.54420545097327, "count": 130294, "is_parallel": true, "self": 15.792033981142652, "children": { "_process_rank_one_or_two_observation": { "total": 120.75217146983061, "count": 521176, "is_parallel": true, "self": 120.75217146983061 } } } } } } } } } } }, "trainer_advance": { "total": 1010.3336993311102, "count": 65147, "self": 9.05399979518063, "children": { "process_trajectory": { "total": 139.06804578292883, "count": 65147, "self": 138.66621924192884, "children": { "RLTrainer._checkpoint": { "total": 0.40182654099999127, "count": 2, "self": 0.40182654099999127 } } }, "_update_policy": { "total": 862.2116537530007, "count": 46, "self": 94.00169543700827, "children": { "TorchPOCAOptimizer.update": { "total": 768.2099583159925, "count": 1380, "self": 768.2099583159925 } } } } } } }, "trainer_threads": { "total": 6.250002115848474e-07, "count": 1, "self": 6.250002115848474e-07 }, "TrainerController._save_models": { "total": 0.15760037500058388, "count": 1, "self": 0.002268500000354834, "children": { "RLTrainer._checkpoint": { "total": 0.15533187500022905, "count": 1, "self": 0.15533187500022905 } } } } } } }