M2LabOrg's picture
First Push
0ade0d8 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.167383909225464,
"min": 2.7106640338897705,
"max": 3.2957725524902344,
"count": 544
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 41049.296875,
"min": 19608.78125,
"max": 128716.046875,
"count": 544
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 995.2,
"min": 381.7142857142857,
"max": 999.0,
"count": 544
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19904.0,
"min": 13680.0,
"max": 26868.0,
"count": 544
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1177.3107222571905,
"min": 1172.4777884557814,
"max": 1208.5267078256456,
"count": 278
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7063.8643335431425,
"min": 2344.9555769115627,
"max": 21691.976140517247,
"count": 278
},
"SoccerTwos.Step.mean": {
"value": 5439112.0,
"min": 9450.0,
"max": 5439112.0,
"count": 544
},
"SoccerTwos.Step.sum": {
"value": 5439112.0,
"min": 9450.0,
"max": 5439112.0,
"count": 544
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008692451752722263,
"min": -0.03634738177061081,
"max": 0.061751946806907654,
"count": 544
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.08692451566457748,
"min": -0.8172084093093872,
"max": 0.8027512431144714,
"count": 544
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.008504488505423069,
"min": -0.03568016365170479,
"max": 0.061723921447992325,
"count": 544
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.08504488319158554,
"min": -0.8263312578201294,
"max": 0.8114222288131714,
"count": 544
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 544
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 544
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.04722000062465668,
"min": -0.5714285714285714,
"max": 0.3640749976038933,
"count": 544
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.4722000062465668,
"min": -8.631199985742569,
"max": 5.8251999616622925,
"count": 544
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.04722000062465668,
"min": -0.5714285714285714,
"max": 0.3640749976038933,
"count": 544
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.4722000062465668,
"min": -8.631199985742569,
"max": 5.8251999616622925,
"count": 544
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 544
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 544
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014030667313879046,
"min": 0.011357098693648975,
"max": 0.025539314995209377,
"count": 251
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014030667313879046,
"min": 0.011357098693648975,
"max": 0.025539314995209377,
"count": 251
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.002205784496618435,
"min": 3.967816223730362e-09,
"max": 0.008369538219024737,
"count": 251
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.002205784496618435,
"min": 3.967816223730362e-09,
"max": 0.008369538219024737,
"count": 251
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0022077524607690673,
"min": 4.4284059530497185e-09,
"max": 0.008384909993037582,
"count": 251
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0022077524607690673,
"min": 4.4284059530497185e-09,
"max": 0.008384909993037582,
"count": 251
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 251
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 251
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 251
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 251
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 251
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 251
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718713187",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718722251"
},
"total": 9063.936211994,
"count": 1,
"self": 0.07311926500005939,
"children": {
"run_training.setup": {
"total": 0.0499135740001293,
"count": 1,
"self": 0.0499135740001293
},
"TrainerController.start_learning": {
"total": 9063.813179155,
"count": 1,
"self": 7.071073910870837,
"children": {
"TrainerController._reset_env": {
"total": 5.497794401001556,
"count": 28,
"self": 5.497794401001556
},
"TrainerController.advance": {
"total": 9051.045150815127,
"count": 354354,
"self": 7.3268535387524025,
"children": {
"env_step": {
"total": 7216.38152588307,
"count": 354354,
"self": 5336.483690287725,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1875.5880353470225,
"count": 354354,
"self": 49.139999579167124,
"children": {
"TorchPolicy.evaluate": {
"total": 1826.4480357678553,
"count": 703684,
"self": 1826.4480357678553
}
}
},
"workers": {
"total": 4.309800248322517,
"count": 354353,
"self": 0.0,
"children": {
"worker_root": {
"total": 9052.663712260855,
"count": 354353,
"is_parallel": true,
"self": 4686.861310212332,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002798342999540182,
"count": 2,
"is_parallel": true,
"self": 0.0006868559994472889,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002111487000092893,
"count": 8,
"is_parallel": true,
"self": 0.002111487000092893
}
}
},
"UnityEnvironment.step": {
"total": 0.027724608999960765,
"count": 1,
"is_parallel": true,
"self": 0.000773768999351887,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005563469999287918,
"count": 1,
"is_parallel": true,
"self": 0.0005563469999287918
},
"communicator.exchange": {
"total": 0.024384438000197406,
"count": 1,
"is_parallel": true,
"self": 0.024384438000197406
},
"steps_from_proto": {
"total": 0.00201005500048268,
"count": 2,
"is_parallel": true,
"self": 0.00045496500069930335,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015550899997833767,
"count": 8,
"is_parallel": true,
"self": 0.0015550899997833767
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4365.749160123526,
"count": 354352,
"is_parallel": true,
"self": 221.96528798972122,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 150.28138246569188,
"count": 354352,
"is_parallel": true,
"self": 150.28138246569188
},
"communicator.exchange": {
"total": 3324.386212575986,
"count": 354352,
"is_parallel": true,
"self": 3324.386212575986
},
"steps_from_proto": {
"total": 669.1162770921264,
"count": 708704,
"is_parallel": true,
"self": 123.28174811653116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 545.8345289755953,
"count": 2834816,
"is_parallel": true,
"self": 545.8345289755953
}
}
}
}
},
"steps_from_proto": {
"total": 0.05324192499756464,
"count": 54,
"is_parallel": true,
"self": 0.010207459011780884,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.043034465985783754,
"count": 216,
"is_parallel": true,
"self": 0.043034465985783754
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1827.3367713933048,
"count": 354353,
"self": 63.86901875470949,
"children": {
"process_trajectory": {
"total": 497.3312759065825,
"count": 354353,
"self": 495.3270141715843,
"children": {
"RLTrainer._checkpoint": {
"total": 2.004261734998181,
"count": 10,
"self": 2.004261734998181
}
}
},
"_update_policy": {
"total": 1266.1364767320129,
"count": 252,
"self": 728.892340538051,
"children": {
"TorchPOCAOptimizer.update": {
"total": 537.2441361939618,
"count": 7560,
"self": 537.2441361939618
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2860000424552709e-06,
"count": 1,
"self": 1.2860000424552709e-06
},
"TrainerController._save_models": {
"total": 0.19915874199978134,
"count": 1,
"self": 0.0020229499987181043,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19713579200106324,
"count": 1,
"self": 0.19713579200106324
}
}
}
}
}
}
}