{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 0.601077139377594, "min": 0.5837947726249695, "max": 0.9881709814071655, "count": 80 }, "SnowballTarget.Policy.Entropy.sum": { "value": 5712.63720703125, "min": 5643.8955078125, "max": 10030.509765625, "count": 80 }, "SnowballTarget.Step.mean": { "value": 999992.0, "min": 209936.0, "max": 999992.0, "count": 80 }, "SnowballTarget.Step.sum": { "value": 999992.0, "min": 209936.0, "max": 999992.0, "count": 80 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 14.166631698608398, "min": 12.94360065460205, "max": 14.166631698608398, "count": 80 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2762.4931640625, "min": 2485.17138671875, "max": 2896.8720703125, "count": 80 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.0720234285188638, "min": 0.060970312462635706, "max": 0.07712253872637566, "count": 80 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.2880937140754552, "min": 0.24388124985054283, "max": 0.369829463961936, "count": 80 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.17329983738269292, "min": 0.14300536825960758, "max": 0.23871639186479882, "count": 80 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.6931993495307717, "min": 0.6001153049223563, "max": 1.0542938566091014, "count": 80 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 1.402899532400004e-06, "min": 1.402899532400004e-06, "max": 0.0002383428205524, "count": 80 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 5.611598129600016e-06, "min": 5.611598129600016e-06, "max": 0.001176864107712, "count": 80 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.1004676, "min": 0.1004676, "max": 0.1794476, "count": 80 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.4018704, "min": 0.4018704, "max": 0.892288, "count": 80 }, "SnowballTarget.Policy.Beta.mean": { "value": 3.3333240000000065e-05, "min": 3.3333240000000065e-05, "max": 0.00397443524, "count": 80 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.00013333296000000026, "min": 0.00013333296000000026, "max": 0.0196251712, "count": 80 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 80 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 8756.0, "min": 8756.0, "max": 10945.0, "count": 80 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 27.545454545454547, "min": 25.163636363636364, "max": 27.854545454545455, "count": 80 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1212.0, "min": 1116.0, "max": 1532.0, "count": 80 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 27.545454545454547, "min": 25.163636363636364, "max": 27.854545454545455, "count": 80 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1212.0, "min": 1116.0, "max": 1532.0, "count": 80 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 80 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 80 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1730728012", "python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume", "mlagents_version": "1.2.0.dev0", "mlagents_envs_version": "1.2.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.5.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1730729744" }, "total": 1732.12271626, "count": 1, "self": 0.5099708959999134, "children": { "run_training.setup": { "total": 0.08511985199993433, "count": 1, "self": 0.08511985199993433 }, "TrainerController.start_learning": { "total": 1731.5276255120002, "count": 1, "self": 1.4218081270296352, "children": { "TrainerController._reset_env": { "total": 2.5309698369999296, "count": 1, "self": 2.5309698369999296 }, "TrainerController.advance": { "total": 1727.4860122999705, "count": 72728, "self": 1.4741836119442269, "children": { "env_step": { "total": 1224.1766177549985, "count": 72728, "self": 931.4992000770171, "children": { "SubprocessEnvManager._take_step": { "total": 291.7915417349475, "count": 72728, "self": 5.0668181610044485, "children": { "TorchPolicy.evaluate": { "total": 286.72472357394304, "count": 72728, "self": 286.72472357394304 } } }, "workers": { "total": 0.8858759430338523, "count": 72728, "self": 0.0, "children": { "worker_root": { "total": 1726.5713134429836, "count": 72728, "is_parallel": true, "self": 910.7163590749676, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002908205000039743, "count": 1, "is_parallel": true, "self": 0.0007877580001149909, "children": { "_process_rank_one_or_two_observation": { "total": 0.002120446999924752, "count": 10, "is_parallel": true, "self": 0.002120446999924752 } } }, "UnityEnvironment.step": { "total": 0.04131846600012068, "count": 1, "is_parallel": true, "self": 0.0008152610000706773, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0003790219998336397, "count": 1, "is_parallel": true, "self": 0.0003790219998336397 }, "communicator.exchange": { "total": 0.03820512500010409, "count": 1, "is_parallel": true, "self": 0.03820512500010409 }, "steps_from_proto": { "total": 0.0019190580001122726, "count": 1, "is_parallel": true, "self": 0.00044222899987289566, "children": { "_process_rank_one_or_two_observation": { "total": 0.001476829000239377, "count": 10, "is_parallel": true, "self": 0.001476829000239377 } } } } } } }, "UnityEnvironment.step": { "total": 815.854954368016, "count": 72727, "is_parallel": true, "self": 40.134165063941964, "children": { "UnityEnvironment._generate_step_input": { "total": 21.711173289010503, "count": 72727, "is_parallel": true, "self": 21.711173289010503 }, "communicator.exchange": { "total": 624.4117628640613, "count": 72727, "is_parallel": true, "self": 624.4117628640613 }, "steps_from_proto": { "total": 129.59785315100225, "count": 72727, "is_parallel": true, "self": 23.52818316418302, "children": { "_process_rank_one_or_two_observation": { "total": 106.06966998681924, "count": 727270, "is_parallel": true, "self": 106.06966998681924 } } } } } } } } } } }, "trainer_advance": { "total": 501.83521093302784, "count": 72728, "self": 1.719571437992272, "children": { "process_trajectory": { "total": 112.45800108804315, "count": 72728, "self": 110.7157728100442, "children": { "RLTrainer._checkpoint": { "total": 1.7422282779989473, "count": 16, "self": 1.7422282779989473 } } }, "_update_policy": { "total": 387.6576384069924, "count": 363, "self": 159.84262632701825, "children": { "TorchPPOOptimizer.update": { "total": 227.81501207997417, "count": 18510, "self": 227.81501207997417 } } } } } } }, "trainer_threads": { "total": 9.680002222012263e-07, "count": 1, "self": 9.680002222012263e-07 }, "TrainerController._save_models": { "total": 0.0888342799999009, "count": 1, "self": 0.0011844049995488604, "children": { "RLTrainer._checkpoint": { "total": 0.08764987500035204, "count": 1, "self": 0.08764987500035204 } } } } } } }