ppo-Huggy / run_logs /timers.json
iagogb's picture
Huggy - test
1158602
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3996837139129639,
"min": 1.3996837139129639,
"max": 1.4261325597763062,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71466.453125,
"min": 68773.2421875,
"max": 76291.640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.93738489871086,
"min": 83.65128205128205,
"max": 412.74380165289256,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49922.0,
"min": 48748.0,
"max": 50165.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999794.0,
"min": 49654.0,
"max": 1999794.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999794.0,
"min": 49654.0,
"max": 1999794.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3833022117614746,
"min": -0.0439065620303154,
"max": 2.4596683979034424,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1294.133056640625,
"min": -5.268787384033203,
"max": 1404.263427734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.627664059674981,
"min": 1.994104227423668,
"max": 3.9991121053244982,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1969.8215844035149,
"min": 239.29250729084015,
"max": 2207.866935133934,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.627664059674981,
"min": 1.994104227423668,
"max": 3.9991121053244982,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1969.8215844035149,
"min": 239.29250729084015,
"max": 2207.866935133934,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015362618274795953,
"min": 0.013398732870822036,
"max": 0.02000887486162052,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04608785482438786,
"min": 0.026797465741644072,
"max": 0.05639019217778696,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.051266343105170455,
"min": 0.02335156494130691,
"max": 0.05672086816695002,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15379902931551137,
"min": 0.04670312988261382,
"max": 0.17016260450085005,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.642698785800006e-06,
"min": 3.642698785800006e-06,
"max": 0.00029532067655977496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0928096357400018e-05,
"min": 1.0928096357400018e-05,
"max": 0.0008442253685915497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10121420000000003,
"min": 0.10121420000000003,
"max": 0.19844022500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036426000000001,
"min": 0.20760515000000002,
"max": 0.5814084500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.05885800000001e-05,
"min": 7.05885800000001e-05,
"max": 0.0049221672275,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002117657400000003,
"min": 0.0002117657400000003,
"max": 0.014072281654999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670844348",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670846461"
},
"total": 2112.6818090059996,
"count": 1,
"self": 0.38998422999975446,
"children": {
"run_training.setup": {
"total": 0.10245523300000059,
"count": 1,
"self": 0.10245523300000059
},
"TrainerController.start_learning": {
"total": 2112.189369543,
"count": 1,
"self": 3.566082455081414,
"children": {
"TrainerController._reset_env": {
"total": 10.024610192999944,
"count": 1,
"self": 10.024610192999944
},
"TrainerController.advance": {
"total": 2098.489552431919,
"count": 232327,
"self": 3.769028545911169,
"children": {
"env_step": {
"total": 1643.0964413170261,
"count": 232327,
"self": 1378.177156894927,
"children": {
"SubprocessEnvManager._take_step": {
"total": 262.4424918439904,
"count": 232327,
"self": 14.09532520505718,
"children": {
"TorchPolicy.evaluate": {
"total": 248.34716663893323,
"count": 223064,
"self": 62.83893556781163,
"children": {
"TorchPolicy.sample_actions": {
"total": 185.5082310711216,
"count": 223064,
"self": 185.5082310711216
}
}
}
}
},
"workers": {
"total": 2.4767925781086433,
"count": 232327,
"self": 0.0,
"children": {
"worker_root": {
"total": 2104.6747525999153,
"count": 232327,
"is_parallel": true,
"self": 974.6749462739601,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004402758000196627,
"count": 1,
"is_parallel": true,
"self": 0.0003502260003642732,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004052531999832354,
"count": 2,
"is_parallel": true,
"self": 0.004052531999832354
}
}
},
"UnityEnvironment.step": {
"total": 0.03252375900001425,
"count": 1,
"is_parallel": true,
"self": 0.00029468100001395214,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019648700003926933,
"count": 1,
"is_parallel": true,
"self": 0.00019648700003926933
},
"communicator.exchange": {
"total": 0.03130719199998566,
"count": 1,
"is_parallel": true,
"self": 0.03130719199998566
},
"steps_from_proto": {
"total": 0.0007253989999753685,
"count": 1,
"is_parallel": true,
"self": 0.00023542899998574285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004899699999896256,
"count": 2,
"is_parallel": true,
"self": 0.0004899699999896256
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1129.9998063259552,
"count": 232326,
"is_parallel": true,
"self": 33.572759733019666,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 72.82675118494876,
"count": 232326,
"is_parallel": true,
"self": 72.82675118494876
},
"communicator.exchange": {
"total": 934.2671171749776,
"count": 232326,
"is_parallel": true,
"self": 934.2671171749776
},
"steps_from_proto": {
"total": 89.33317823300922,
"count": 232326,
"is_parallel": true,
"self": 36.73128612204778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.601892110961444,
"count": 464652,
"is_parallel": true,
"self": 52.601892110961444
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 451.624082568982,
"count": 232327,
"self": 5.6224972379286555,
"children": {
"process_trajectory": {
"total": 139.60057856705157,
"count": 232327,
"self": 139.14061379505142,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4599647720001485,
"count": 4,
"self": 0.4599647720001485
}
}
},
"_update_policy": {
"total": 306.40100676400175,
"count": 97,
"self": 253.8647439840097,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.53626277999206,
"count": 2910,
"self": 52.53626277999206
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.530000741302501e-07,
"count": 1,
"self": 8.530000741302501e-07
},
"TrainerController._save_models": {
"total": 0.10912360999964221,
"count": 1,
"self": 0.0018834139996215526,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10724019600002066,
"count": 1,
"self": 0.10724019600002066
}
}
}
}
}
}
}