ppo-Pyramids / run_logs /timers.json
ibadrehman's picture
using ppo with default settings in Pyramids
0671868
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5260794758796692,
"min": 0.5162389278411865,
"max": 1.5067613124847412,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15908.6435546875,
"min": 15445.869140625,
"max": 45709.109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.49786219000816345,
"min": -0.18923769891262054,
"max": 0.586122989654541,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 134.920654296875,
"min": -44.849334716796875,
"max": 162.35606384277344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0040044416673481464,
"min": -0.019228234887123108,
"max": 0.35606899857521057,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.0852036476135254,
"min": -5.364677429199219,
"max": 84.38835144042969,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07029249128540126,
"min": 0.06539952745218246,
"max": 0.07345419479910668,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0543873692810188,
"min": 0.5078823952995851,
"max": 1.1018129219866002,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014884760766936879,
"min": 0.000670427912630423,
"max": 0.015437128184544556,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2232714115040532,
"min": 0.009385990776825922,
"max": 0.2232714115040532,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.585697471466664e-06,
"min": 7.585697471466664e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011378546207199996,
"min": 0.00011378546207199996,
"max": 0.0035078945307018996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252853333333335,
"min": 0.10252853333333335,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5379280000000002,
"min": 1.3691136000000002,
"max": 2.5692981,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002626004799999999,
"min": 0.0002626004799999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003939007199999999,
"min": 0.003939007199999999,
"max": 0.11695288019000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009213524870574474,
"min": 0.009213524870574474,
"max": 0.4031728208065033,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13820287585258484,
"min": 0.13548897206783295,
"max": 2.8222098350524902,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 369.38961038961037,
"min": 339.3058823529412,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28443.0,
"min": 15984.0,
"max": 32963.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5220545266162266,
"min": -1.0000000521540642,
"max": 1.6369464880851812,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 117.19819854944944,
"min": -32.000001668930054,
"max": 145.12999799102545,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5220545266162266,
"min": -1.0000000521540642,
"max": 1.6369464880851812,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 117.19819854944944,
"min": -32.000001668930054,
"max": 145.12999799102545,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03607453322041834,
"min": 0.035992192937960174,
"max": 8.11090998351574,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7777390579722123,
"min": 2.7777390579722123,
"max": 129.77455973625183,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676727815",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676730767"
},
"total": 2952.234315915,
"count": 1,
"self": 0.7902261170002021,
"children": {
"run_training.setup": {
"total": 0.13270480999972278,
"count": 1,
"self": 0.13270480999972278
},
"TrainerController.start_learning": {
"total": 2951.311384988,
"count": 1,
"self": 2.1671906180413316,
"children": {
"TrainerController._reset_env": {
"total": 8.267766016999758,
"count": 1,
"self": 8.267766016999758
},
"TrainerController.advance": {
"total": 2940.764433336959,
"count": 63710,
"self": 2.3791104378760792,
"children": {
"env_step": {
"total": 2035.716256969968,
"count": 63710,
"self": 1865.1640017069153,
"children": {
"SubprocessEnvManager._take_step": {
"total": 169.19386491804016,
"count": 63710,
"self": 6.4894162400914865,
"children": {
"TorchPolicy.evaluate": {
"total": 162.70444867794868,
"count": 62558,
"self": 55.77297327477754,
"children": {
"TorchPolicy.sample_actions": {
"total": 106.93147540317113,
"count": 62558,
"self": 106.93147540317113
}
}
}
}
},
"workers": {
"total": 1.3583903450125945,
"count": 63710,
"self": 0.0,
"children": {
"worker_root": {
"total": 2943.764384024003,
"count": 63710,
"is_parallel": true,
"self": 1244.4632353709367,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00226704399983646,
"count": 1,
"is_parallel": true,
"self": 0.0008741179990465753,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013929260007898847,
"count": 8,
"is_parallel": true,
"self": 0.0013929260007898847
}
}
},
"UnityEnvironment.step": {
"total": 0.05712765499993111,
"count": 1,
"is_parallel": true,
"self": 0.0006361919995470089,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005697920000784507,
"count": 1,
"is_parallel": true,
"self": 0.0005697920000784507
},
"communicator.exchange": {
"total": 0.05384455900002649,
"count": 1,
"is_parallel": true,
"self": 0.05384455900002649
},
"steps_from_proto": {
"total": 0.0020771120002791577,
"count": 1,
"is_parallel": true,
"self": 0.0005370620006033278,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00154004999967583,
"count": 8,
"is_parallel": true,
"self": 0.00154004999967583
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1699.3011486530663,
"count": 63709,
"is_parallel": true,
"self": 41.2980893110971,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.485431850878285,
"count": 63709,
"is_parallel": true,
"self": 30.485431850878285
},
"communicator.exchange": {
"total": 1497.8720901230508,
"count": 63709,
"is_parallel": true,
"self": 1497.8720901230508
},
"steps_from_proto": {
"total": 129.6455373680401,
"count": 63709,
"is_parallel": true,
"self": 32.52427127121655,
"children": {
"_process_rank_one_or_two_observation": {
"total": 97.12126609682355,
"count": 509672,
"is_parallel": true,
"self": 97.12126609682355
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 902.6690659291148,
"count": 63710,
"self": 4.080403488161664,
"children": {
"process_trajectory": {
"total": 206.16297196195592,
"count": 63710,
"self": 205.93708502395657,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22588693799934845,
"count": 2,
"self": 0.22588693799934845
}
}
},
"_update_policy": {
"total": 692.4256904789972,
"count": 445,
"self": 265.9654881970373,
"children": {
"TorchPPOOptimizer.update": {
"total": 426.46020228195994,
"count": 22788,
"self": 426.46020228195994
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.720000000204891e-06,
"count": 1,
"self": 1.720000000204891e-06
},
"TrainerController._save_models": {
"total": 0.11199329600003693,
"count": 1,
"self": 0.00180769600046915,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11018559999956778,
"count": 1,
"self": 0.11018559999956778
}
}
}
}
}
}
}