taohoang's picture
First Push
05e4dfa
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39007964730262756,
"min": 0.39007964730262756,
"max": 1.4313101768493652,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11546.357421875,
"min": 11546.357421875,
"max": 43420.2265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989924.0,
"min": 29952.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989924.0,
"min": 29952.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.569145143032074,
"min": -0.09493271261453629,
"max": 0.6658176779747009,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 157.6531982421875,
"min": -22.8787841796875,
"max": 194.24618530273438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.005202829837799072,
"min": -0.00948827713727951,
"max": 0.48604050278663635,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.4411839246749878,
"min": -2.5713231563568115,
"max": 115.19159698486328,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06903162507279333,
"min": 0.06419442618769236,
"max": 0.07291301597262431,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9664427510191066,
"min": 0.4891955765581759,
"max": 1.0359565366331178,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014301904888443892,
"min": 0.0013636240519873835,
"max": 0.017613208401598964,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2002266684382145,
"min": 0.017727112675835984,
"max": 0.2554347384527015,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.44798323165714e-06,
"min": 7.44798323165714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010427176524319996,
"min": 0.00010427176524319996,
"max": 0.0036325324891558993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248262857142856,
"min": 0.10248262857142856,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347568,
"min": 1.3886848,
"max": 2.6108441,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002580145942857142,
"min": 0.0002580145942857142,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036122043199999987,
"min": 0.0036122043199999987,
"max": 0.12110332559,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010755806230008602,
"min": 0.010438631288707256,
"max": 0.647458553314209,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15058128535747528,
"min": 0.15058128535747528,
"max": 4.532209873199463,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 331.2926829268293,
"min": 277.83620689655174,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27166.0,
"min": 15984.0,
"max": 33386.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5955170639222716,
"min": -1.0000000521540642,
"max": 1.6886961388473327,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 130.83239924162626,
"min": -29.417601741850376,
"max": 195.77039861679077,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5955170639222716,
"min": -1.0000000521540642,
"max": 1.6886961388473327,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 130.83239924162626,
"min": -29.417601741850376,
"max": 195.77039861679077,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03729154423224206,
"min": 0.03181802893558252,
"max": 14.680876759812236,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.057906627043849,
"min": 3.057906627043849,
"max": 234.89402815699577,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679132117",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679134355"
},
"total": 2237.2870271419997,
"count": 1,
"self": 0.49314952999975503,
"children": {
"run_training.setup": {
"total": 0.1045499080000809,
"count": 1,
"self": 0.1045499080000809
},
"TrainerController.start_learning": {
"total": 2236.689327704,
"count": 1,
"self": 1.4638034699437412,
"children": {
"TrainerController._reset_env": {
"total": 5.781389474999742,
"count": 1,
"self": 5.781389474999742
},
"TrainerController.advance": {
"total": 2229.3454272930558,
"count": 64052,
"self": 1.5002008950650634,
"children": {
"env_step": {
"total": 1589.7788561920452,
"count": 64052,
"self": 1475.531661475873,
"children": {
"SubprocessEnvManager._take_step": {
"total": 113.37213210905202,
"count": 64052,
"self": 4.999922251127828,
"children": {
"TorchPolicy.evaluate": {
"total": 108.37220985792419,
"count": 62552,
"self": 108.37220985792419
}
}
},
"workers": {
"total": 0.8750626071200713,
"count": 64052,
"self": 0.0,
"children": {
"worker_root": {
"total": 2231.5150218540043,
"count": 64052,
"is_parallel": true,
"self": 877.9102417029994,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018768750001072476,
"count": 1,
"is_parallel": true,
"self": 0.0006603550009458559,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012165199991613918,
"count": 8,
"is_parallel": true,
"self": 0.0012165199991613918
}
}
},
"UnityEnvironment.step": {
"total": 0.046280665999802295,
"count": 1,
"is_parallel": true,
"self": 0.0005164009994587104,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000504741999975522,
"count": 1,
"is_parallel": true,
"self": 0.000504741999975522
},
"communicator.exchange": {
"total": 0.043580800000199815,
"count": 1,
"is_parallel": true,
"self": 0.043580800000199815
},
"steps_from_proto": {
"total": 0.0016787230001682474,
"count": 1,
"is_parallel": true,
"self": 0.0003681710009004746,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013105519992677728,
"count": 8,
"is_parallel": true,
"self": 0.0013105519992677728
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1353.604780151005,
"count": 64051,
"is_parallel": true,
"self": 31.376816915067593,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.440755797995735,
"count": 64051,
"is_parallel": true,
"self": 24.440755797995735
},
"communicator.exchange": {
"total": 1200.3159996269055,
"count": 64051,
"is_parallel": true,
"self": 1200.3159996269055
},
"steps_from_proto": {
"total": 97.47120781103604,
"count": 64051,
"is_parallel": true,
"self": 21.102433500889674,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.36877431014636,
"count": 512408,
"is_parallel": true,
"self": 76.36877431014636
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 638.0663702059455,
"count": 64052,
"self": 2.6996300689775126,
"children": {
"process_trajectory": {
"total": 123.44357178397149,
"count": 64052,
"self": 123.11635056497153,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32722121899996637,
"count": 2,
"self": 0.32722121899996637
}
}
},
"_update_policy": {
"total": 511.9231683529965,
"count": 455,
"self": 326.32215405903435,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.60101429396218,
"count": 22827,
"self": 185.60101429396218
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0510002539376728e-06,
"count": 1,
"self": 1.0510002539376728e-06
},
"TrainerController._save_models": {
"total": 0.09870641500037891,
"count": 1,
"self": 0.0018483070007278002,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09685810799965111,
"count": 1,
"self": 0.09685810799965111
}
}
}
}
}
}
}