zl-ppo-Huggy / run_logs /timers.json
zlicastro's picture
Upload my Huggy boy.
77e6070
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3773263692855835,
"min": 1.3773263692855835,
"max": 1.4231269359588623,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68070.2265625,
"min": 68070.2265625,
"max": 76697.90625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.10648148148148,
"min": 73.13224368499257,
"max": 397.57936507936506,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49317.0,
"min": 48834.0,
"max": 50095.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999936.0,
"min": 49713.0,
"max": 1999936.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999936.0,
"min": 49713.0,
"max": 1999936.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.514389753341675,
"min": 0.04993633180856705,
"max": 2.5256264209747314,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1629.3245849609375,
"min": 6.24204158782959,
"max": 1639.748046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.926974509877187,
"min": 1.8267483329772949,
"max": 4.0519389970914315,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2544.6794824004173,
"min": 228.34354162216187,
"max": 2585.1397720575333,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.926974509877187,
"min": 1.8267483329772949,
"max": 4.0519389970914315,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2544.6794824004173,
"min": 228.34354162216187,
"max": 2585.1397720575333,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015472133636164169,
"min": 0.013561197902163258,
"max": 0.019518072229645137,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04641640090849251,
"min": 0.027122395804326516,
"max": 0.058554216688935415,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05165168102830648,
"min": 0.020431418798398226,
"max": 0.05536146673063438,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15495504308491945,
"min": 0.04086283759679645,
"max": 0.16608440019190313,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.620648793150012e-06,
"min": 3.620648793150012e-06,
"max": 0.00029530110156629995,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0861946379450036e-05,
"min": 1.0861946379450036e-05,
"max": 0.0008438320687226498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120685,
"min": 0.10120685,
"max": 0.1984337,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30362055,
"min": 0.20755740000000003,
"max": 0.58127735,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.02218150000002e-05,
"min": 7.02218150000002e-05,
"max": 0.00492184163,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002106654450000006,
"min": 0.0002106654450000006,
"max": 0.014065739765,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670381209",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=zl-Huggy-01 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670383689"
},
"total": 2479.4554283260004,
"count": 1,
"self": 0.4410988940007883,
"children": {
"run_training.setup": {
"total": 0.1112992599998961,
"count": 1,
"self": 0.1112992599998961
},
"TrainerController.start_learning": {
"total": 2478.903030172,
"count": 1,
"self": 4.4074491239302915,
"children": {
"TrainerController._reset_env": {
"total": 9.851468468999883,
"count": 1,
"self": 9.851468468999883
},
"TrainerController.advance": {
"total": 2464.48457436807,
"count": 233192,
"self": 4.553547679026451,
"children": {
"env_step": {
"total": 1857.0126117689854,
"count": 233192,
"self": 1540.7918200229421,
"children": {
"SubprocessEnvManager._take_step": {
"total": 313.39049901499743,
"count": 233192,
"self": 15.574790799073071,
"children": {
"TorchPolicy.evaluate": {
"total": 297.81570821592436,
"count": 222920,
"self": 71.07128135381777,
"children": {
"TorchPolicy.sample_actions": {
"total": 226.7444268621066,
"count": 222920,
"self": 226.7444268621066
}
}
}
}
},
"workers": {
"total": 2.8302927310458017,
"count": 233192,
"self": 0.0,
"children": {
"worker_root": {
"total": 2470.34284792792,
"count": 233192,
"is_parallel": true,
"self": 1209.000966229926,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001907788999915283,
"count": 1,
"is_parallel": true,
"self": 0.00034060400002999813,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001567184999885285,
"count": 2,
"is_parallel": true,
"self": 0.001567184999885285
}
}
},
"UnityEnvironment.step": {
"total": 0.028911758000049304,
"count": 1,
"is_parallel": true,
"self": 0.0003004950001468387,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019699499989656033,
"count": 1,
"is_parallel": true,
"self": 0.00019699499989656033
},
"communicator.exchange": {
"total": 0.027691273000073124,
"count": 1,
"is_parallel": true,
"self": 0.027691273000073124
},
"steps_from_proto": {
"total": 0.00072299499993278,
"count": 1,
"is_parallel": true,
"self": 0.0002562779998243059,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046671700010847417,
"count": 2,
"is_parallel": true,
"self": 0.00046671700010847417
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1261.341881697994,
"count": 233191,
"is_parallel": true,
"self": 35.35063704581762,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.23386435201633,
"count": 233191,
"is_parallel": true,
"self": 79.23386435201633
},
"communicator.exchange": {
"total": 1048.9433284920326,
"count": 233191,
"is_parallel": true,
"self": 1048.9433284920326
},
"steps_from_proto": {
"total": 97.81405180812749,
"count": 233191,
"is_parallel": true,
"self": 42.275648078221366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.53840372990612,
"count": 466382,
"is_parallel": true,
"self": 55.53840372990612
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 602.9184149200578,
"count": 233192,
"self": 6.57727992100331,
"children": {
"process_trajectory": {
"total": 166.4084211680506,
"count": 233192,
"self": 165.77136076605075,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6370604019998609,
"count": 4,
"self": 0.6370604019998609
}
}
},
"_update_policy": {
"total": 429.93271383100387,
"count": 97,
"self": 346.5880613409988,
"children": {
"TorchPPOOptimizer.update": {
"total": 83.34465249000505,
"count": 3880,
"self": 83.34465249000505
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.229999366449192e-07,
"count": 1,
"self": 8.229999366449192e-07
},
"TrainerController._save_models": {
"total": 0.15953738799998973,
"count": 1,
"self": 0.0027015379996555566,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15683585000033418,
"count": 1,
"self": 0.15683585000033418
}
}
}
}
}
}
}