Aitor's picture
First try
a3689d6
raw
history blame contribute delete
No virus
19 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.6152853965759277,
"min": 1.6152853965759277,
"max": 2.88734769821167,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 15404.9765625,
"min": 15404.9765625,
"max": 29557.68359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 7.5939483642578125,
"min": 0.08465117961168289,
"max": 7.5939483642578125,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1480.8199462890625,
"min": 16.42232894897461,
"max": 1517.674072265625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 15.295454545454545,
"min": 2.6818181818181817,
"max": 15.818181818181818,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 673.0,
"min": 118.0,
"max": 870.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 15.295454545454545,
"min": 2.6818181818181817,
"max": 15.818181818181818,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 673.0,
"min": 118.0,
"max": 870.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.05884845642026003,
"min": 0.042890819961173166,
"max": 0.05884845642026003,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.11769691284052006,
"min": 0.08578163992234633,
"max": 0.14945020310366158,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.3042387500697491,
"min": 0.1109681136623098,
"max": 0.3042387500697491,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6084775001394982,
"min": 0.2219362273246196,
"max": 0.8250444426256067,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.432097856000005e-06,
"min": 6.432097856000005e-06,
"max": 0.00029023200325600004,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.286419571200001e-05,
"min": 1.286419571200001e-05,
"max": 0.0007419960526679999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102144,
"min": 0.102144,
"max": 0.196744,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.204288,
"min": 0.204288,
"max": 0.5473320000000002,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00011698560000000007,
"min": 0.00011698560000000007,
"max": 0.0048375256,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00023397120000000015,
"min": 0.00023397120000000015,
"max": 0.012371866800000002,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677710679",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677711146"
},
"total": 466.70228948199997,
"count": 1,
"self": 0.42417477200012854,
"children": {
"run_training.setup": {
"total": 0.10674178399995071,
"count": 1,
"self": 0.10674178399995071
},
"TrainerController.start_learning": {
"total": 466.1713729259999,
"count": 1,
"self": 0.5451415709945877,
"children": {
"TrainerController._reset_env": {
"total": 7.331290541000044,
"count": 1,
"self": 7.331290541000044
},
"TrainerController.advance": {
"total": 458.17238222100536,
"count": 18200,
"self": 0.28932266699848697,
"children": {
"env_step": {
"total": 457.8830595540069,
"count": 18200,
"self": 320.35797583299757,
"children": {
"SubprocessEnvManager._take_step": {
"total": 137.2406449350051,
"count": 18200,
"self": 1.5290543090019355,
"children": {
"TorchPolicy.evaluate": {
"total": 135.71159062600316,
"count": 18200,
"self": 30.2905803559928,
"children": {
"TorchPolicy.sample_actions": {
"total": 105.42101027001036,
"count": 18200,
"self": 105.42101027001036
}
}
}
}
},
"workers": {
"total": 0.2844387860042161,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 464.66502466899715,
"count": 18200,
"is_parallel": true,
"self": 221.16348440200466,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002174560000071324,
"count": 1,
"is_parallel": true,
"self": 0.0007738339999150412,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014007260001562827,
"count": 10,
"is_parallel": true,
"self": 0.0014007260001562827
}
}
},
"UnityEnvironment.step": {
"total": 0.08619363399998292,
"count": 1,
"is_parallel": true,
"self": 0.002991712000152802,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00025803599999107973,
"count": 1,
"is_parallel": true,
"self": 0.00025803599999107973
},
"communicator.exchange": {
"total": 0.08097321899992949,
"count": 1,
"is_parallel": true,
"self": 0.08097321899992949
},
"steps_from_proto": {
"total": 0.001970666999909554,
"count": 1,
"is_parallel": true,
"self": 0.00048233899974547967,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014883280001640742,
"count": 10,
"is_parallel": true,
"self": 0.0014883280001640742
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 243.5015402669925,
"count": 18199,
"is_parallel": true,
"self": 9.697661220998953,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.483377517013878,
"count": 18199,
"is_parallel": true,
"self": 5.483377517013878
},
"communicator.exchange": {
"total": 195.8069058279914,
"count": 18199,
"is_parallel": true,
"self": 195.8069058279914
},
"steps_from_proto": {
"total": 32.513595700988276,
"count": 18199,
"is_parallel": true,
"self": 7.0953470900116145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.41824861097666,
"count": 181990,
"is_parallel": true,
"self": 25.41824861097666
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013384299995777837,
"count": 1,
"self": 0.00013384299995777837,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 453.43585246907253,
"count": 528982,
"is_parallel": true,
"self": 13.739551421075703,
"children": {
"process_trajectory": {
"total": 322.0069829139975,
"count": 528982,
"is_parallel": true,
"self": 320.7396215989977,
"children": {
"RLTrainer._checkpoint": {
"total": 1.267361314999789,
"count": 4,
"is_parallel": true,
"self": 1.267361314999789
}
}
},
"_update_policy": {
"total": 117.68931813399934,
"count": 45,
"is_parallel": true,
"self": 60.278016426001386,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.41130170799795,
"count": 2292,
"is_parallel": true,
"self": 57.41130170799795
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12242474999993647,
"count": 1,
"self": 0.0006489879999662662,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1217757619999702,
"count": 1,
"self": 0.1217757619999702
}
}
}
}
}
}
}