ppo-pyramids_2 / run_logs /timers.json
daripaez's picture
First training
4059ed1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39405375719070435,
"min": 0.39405375719070435,
"max": 1.3711942434310913,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11884.6611328125,
"min": 11884.6611328125,
"max": 43878.21484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989957.0,
"min": 29904.0,
"max": 989957.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989957.0,
"min": 29904.0,
"max": 989957.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.35812458395957947,
"min": -0.1926780790090561,
"max": 0.35812458395957947,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 50.85369110107422,
"min": -22.928691864013672,
"max": 51.33595275878906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.07746008783578873,
"min": 0.05612904950976372,
"max": 1.3501837253570557,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 10.999332427978516,
"min": 7.633550643920898,
"max": 160.67185974121094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06896481717947166,
"min": 0.0654346836244225,
"max": 0.0747980715774195,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9655074405126032,
"min": 0.4736699358799365,
"max": 1.0363280189842186,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012755712386038679,
"min": 0.000564802245880741,
"max": 0.037981760066615404,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17857997340454151,
"min": 0.004518417967045928,
"max": 0.2658723204663078,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.2175097565000003e-05,
"min": 1.2175097565000003e-05,
"max": 0.0004919182873306286,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00017045136591000004,
"min": 0.00017045136591000004,
"max": 0.0042912138417573,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243500000000001,
"min": 0.10243500000000001,
"max": 0.19838365714285713,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.43409,
"min": 1.2632909000000003,
"max": 2.1906906,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002532565000000001,
"min": 0.0002532565000000001,
"max": 0.009838527348571427,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035455910000000016,
"min": 0.0035455910000000016,
"max": 0.08585844572999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01319117285311222,
"min": 0.01319117285311222,
"max": 0.3732606768608093,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1846764236688614,
"min": 0.1846764236688614,
"max": 2.6128246784210205,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 530.3703703703703,
"min": 456.69354838709677,
"max": 994.90625,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28640.0,
"min": 17480.0,
"max": 32097.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3807888544268079,
"min": -0.9331688061356544,
"max": 1.3807888544268079,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 74.56259813904762,
"min": -29.861401796340942,
"max": 82.67979875206947,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3807888544268079,
"min": -0.9331688061356544,
"max": 1.3807888544268079,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 74.56259813904762,
"min": -29.861401796340942,
"max": 82.67979875206947,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.3807370372340773,
"min": 0.3281575545332715,
"max": 27.761747141679127,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 20.559800010640174,
"min": 20.017610826529562,
"max": 666.2819314002991,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674227908",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674230025"
},
"total": 2117.571584035,
"count": 1,
"self": 0.4425003139999717,
"children": {
"run_training.setup": {
"total": 0.10409670800004278,
"count": 1,
"self": 0.10409670800004278
},
"TrainerController.start_learning": {
"total": 2117.0249870129996,
"count": 1,
"self": 1.441794357046092,
"children": {
"TrainerController._reset_env": {
"total": 10.176455394999948,
"count": 1,
"self": 10.176455394999948
},
"TrainerController.advance": {
"total": 2105.321217838954,
"count": 63434,
"self": 1.493019461975564,
"children": {
"env_step": {
"total": 1451.0062666140234,
"count": 63434,
"self": 1333.1178923740417,
"children": {
"SubprocessEnvManager._take_step": {
"total": 116.990481499015,
"count": 63434,
"self": 4.799046566034463,
"children": {
"TorchPolicy.evaluate": {
"total": 112.19143493298054,
"count": 62603,
"self": 37.36012621596342,
"children": {
"TorchPolicy.sample_actions": {
"total": 74.83130871701712,
"count": 62603,
"self": 74.83130871701712
}
}
}
}
},
"workers": {
"total": 0.8978927409665403,
"count": 63434,
"self": 0.0,
"children": {
"worker_root": {
"total": 2111.9759014870206,
"count": 63434,
"is_parallel": true,
"self": 890.6667694830642,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0071472990000529535,
"count": 1,
"is_parallel": true,
"self": 0.004693347000170434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00245395199988252,
"count": 8,
"is_parallel": true,
"self": 0.00245395199988252
}
}
},
"UnityEnvironment.step": {
"total": 0.049319598000010956,
"count": 1,
"is_parallel": true,
"self": 0.0004950559998633253,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004947160000483564,
"count": 1,
"is_parallel": true,
"self": 0.0004947160000483564
},
"communicator.exchange": {
"total": 0.04664073100002497,
"count": 1,
"is_parallel": true,
"self": 0.04664073100002497
},
"steps_from_proto": {
"total": 0.0016890950000743032,
"count": 1,
"is_parallel": true,
"self": 0.0004574220000677087,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012316730000065945,
"count": 8,
"is_parallel": true,
"self": 0.0012316730000065945
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1221.3091320039564,
"count": 63433,
"is_parallel": true,
"self": 29.196980398969345,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.362062910977556,
"count": 63433,
"is_parallel": true,
"self": 24.362062910977556
},
"communicator.exchange": {
"total": 1067.9131527820336,
"count": 63433,
"is_parallel": true,
"self": 1067.9131527820336
},
"steps_from_proto": {
"total": 99.83693591197584,
"count": 63433,
"is_parallel": true,
"self": 24.33309144214502,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.50384446983082,
"count": 507464,
"is_parallel": true,
"self": 75.50384446983082
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 652.821931762955,
"count": 63434,
"self": 2.8668696418804984,
"children": {
"process_trajectory": {
"total": 134.48664189906856,
"count": 63434,
"self": 134.29472890606814,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19191299300041464,
"count": 2,
"self": 0.19191299300041464
}
}
},
"_update_policy": {
"total": 515.468420222006,
"count": 399,
"self": 197.3544712229808,
"children": {
"TorchPPOOptimizer.update": {
"total": 318.1139489990252,
"count": 22812,
"self": 318.1139489990252
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.410002010350581e-07,
"count": 1,
"self": 8.410002010350581e-07
},
"TrainerController._save_models": {
"total": 0.08551858099963283,
"count": 1,
"self": 0.0013613749993055535,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08415720600032728,
"count": 1,
"self": 0.08415720600032728
}
}
}
}
}
}
}