ppo-Pyramids / run_logs /timers.json
lotek93's picture
first training
b208673
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41847214102745056,
"min": 0.4111345708370209,
"max": 1.4206733703613281,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12594.337890625,
"min": 12353.771484375,
"max": 43097.546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989917.0,
"min": 29952.0,
"max": 989917.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989917.0,
"min": 29952.0,
"max": 989917.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.48761865496635437,
"min": -0.19656066596508026,
"max": 0.553560733795166,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 132.1446533203125,
"min": -46.584877014160156,
"max": 151.67564392089844,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011708006262779236,
"min": -0.015151040628552437,
"max": 0.5884678959846497,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.1728696823120117,
"min": -3.9544215202331543,
"max": 139.46688842773438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07129789938751076,
"min": 0.06438892503350659,
"max": 0.07416815378725924,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9981705914251506,
"min": 0.5191770765108147,
"max": 1.0528202614708184,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014649653129232493,
"min": 0.0006460525672714354,
"max": 0.018060780182704083,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2050951438092549,
"min": 0.005814473105442919,
"max": 0.24424495907242444,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.604261750992855e-06,
"min": 7.604261750992855e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010645966451389998,
"min": 0.00010645966451389998,
"max": 0.0036323470892177002,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253472142857144,
"min": 0.10253472142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354861,
"min": 1.3886848,
"max": 2.6176328000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002632186707142856,
"min": 0.0002632186707142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036850613899999988,
"min": 0.0036850613899999988,
"max": 0.12109715177000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01422194205224514,
"min": 0.013834279961884022,
"max": 0.7068777084350586,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19910718500614166,
"min": 0.19367991387844086,
"max": 4.94814395904541,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 376.4246575342466,
"min": 345.58620689655174,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27479.0,
"min": 15984.0,
"max": 32653.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4591369755464056,
"min": -1.0000000521540642,
"max": 1.5951820279543216,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 106.51699921488762,
"min": -30.68500165641308,
"max": 137.93139803409576,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4591369755464056,
"min": -1.0000000521540642,
"max": 1.5951820279543216,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 106.51699921488762,
"min": -30.68500165641308,
"max": 137.93139803409576,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05580410962902035,
"min": 0.0539580090457715,
"max": 14.959889590740204,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.0737000029184856,
"min": 4.0737000029184856,
"max": 239.35823345184326,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674323622",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674325508"
},
"total": 1885.6171599600002,
"count": 1,
"self": 0.4233702240007915,
"children": {
"run_training.setup": {
"total": 0.0994880819998798,
"count": 1,
"self": 0.0994880819998798
},
"TrainerController.start_learning": {
"total": 1885.0943016539995,
"count": 1,
"self": 1.0985744838158098,
"children": {
"TrainerController._reset_env": {
"total": 5.8504338069997175,
"count": 1,
"self": 5.8504338069997175
},
"TrainerController.advance": {
"total": 1878.0591257391839,
"count": 63746,
"self": 1.1235332349501732,
"children": {
"env_step": {
"total": 1255.4810552161798,
"count": 63746,
"self": 1158.5313003803672,
"children": {
"SubprocessEnvManager._take_step": {
"total": 96.27021256398893,
"count": 63746,
"self": 3.950098298986177,
"children": {
"TorchPolicy.evaluate": {
"total": 92.32011426500276,
"count": 62568,
"self": 30.924605056981363,
"children": {
"TorchPolicy.sample_actions": {
"total": 61.395509208021394,
"count": 62568,
"self": 61.395509208021394
}
}
}
}
},
"workers": {
"total": 0.6795422718237205,
"count": 63746,
"self": 0.0,
"children": {
"worker_root": {
"total": 1881.736724373006,
"count": 63746,
"is_parallel": true,
"self": 811.0037077651409,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016148560002875456,
"count": 1,
"is_parallel": true,
"self": 0.0005901800000174262,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010246760002701194,
"count": 8,
"is_parallel": true,
"self": 0.0010246760002701194
}
}
},
"UnityEnvironment.step": {
"total": 0.04305098399981944,
"count": 1,
"is_parallel": true,
"self": 0.0004613060004885483,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040460699983668746,
"count": 1,
"is_parallel": true,
"self": 0.00040460699983668746
},
"communicator.exchange": {
"total": 0.04073958999970273,
"count": 1,
"is_parallel": true,
"self": 0.04073958999970273
},
"steps_from_proto": {
"total": 0.0014454809997914708,
"count": 1,
"is_parallel": true,
"self": 0.00036111700001129066,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010843639997801802,
"count": 8,
"is_parallel": true,
"self": 0.0010843639997801802
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1070.733016607865,
"count": 63745,
"is_parallel": true,
"self": 26.212361153108304,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.340600204971906,
"count": 63745,
"is_parallel": true,
"self": 20.340600204971906
},
"communicator.exchange": {
"total": 931.9952261419485,
"count": 63745,
"is_parallel": true,
"self": 931.9952261419485
},
"steps_from_proto": {
"total": 92.18482910783632,
"count": 63745,
"is_parallel": true,
"self": 19.79130723033677,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.39352187749955,
"count": 509960,
"is_parallel": true,
"self": 72.39352187749955
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 621.4545372880539,
"count": 63746,
"self": 2.226961416076392,
"children": {
"process_trajectory": {
"total": 137.32023143597735,
"count": 63746,
"self": 137.13215306697748,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18807836899986796,
"count": 2,
"self": 0.18807836899986796
}
}
},
"_update_policy": {
"total": 481.90734443600013,
"count": 452,
"self": 179.34432625195177,
"children": {
"TorchPPOOptimizer.update": {
"total": 302.56301818404836,
"count": 22824,
"self": 302.56301818404836
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.759999789413996e-07,
"count": 1,
"self": 7.759999789413996e-07
},
"TrainerController._save_models": {
"total": 0.08616684800017538,
"count": 1,
"self": 0.0014345160006996593,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08473233199947572,
"count": 1,
"self": 0.08473233199947572
}
}
}
}
}
}
}