ppo-Huggy / run_logs /timers.json
Carlosrelao's picture
Huggy
e99d66a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3991444110870361,
"min": 1.3991444110870361,
"max": 1.4263895750045776,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68773.546875,
"min": 68269.2109375,
"max": 77274.6328125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.73555166374781,
"min": 84.58390410958904,
"max": 414.1570247933884,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48955.0,
"min": 48853.0,
"max": 50151.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999981.0,
"min": 49919.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999981.0,
"min": 49919.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.462353229522705,
"min": -0.03736243396997452,
"max": 2.463142156600952,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1406.003662109375,
"min": -4.483491897583008,
"max": 1421.2330322265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8586119187901193,
"min": 1.7505057123800118,
"max": 4.064562246698295,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2203.267405629158,
"min": 210.06068548560143,
"max": 2277.041397511959,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8586119187901193,
"min": 1.7505057123800118,
"max": 4.064562246698295,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2203.267405629158,
"min": 210.06068548560143,
"max": 2277.041397511959,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016501361546235986,
"min": 0.01389935751176381,
"max": 0.02148483637283789,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04950408463870796,
"min": 0.02779871502352762,
"max": 0.06226362749924495,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05535527546372678,
"min": 0.024675470869988204,
"max": 0.057911147053043045,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16606582639118034,
"min": 0.04935094173997641,
"max": 0.17371333216627438,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4805988398333317e-06,
"min": 3.4805988398333317e-06,
"max": 0.00029527875157374996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0441796519499995e-05,
"min": 1.0441796519499995e-05,
"max": 0.0008437395187534998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116016666666668,
"min": 0.10116016666666668,
"max": 0.19842624999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30348050000000004,
"min": 0.20744984999999994,
"max": 0.5812465,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.789231666666664e-05,
"min": 6.789231666666664e-05,
"max": 0.004921469874999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002036769499999999,
"min": 0.0002036769499999999,
"max": 0.01406420035,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678251055",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678253462"
},
"total": 2406.799089698,
"count": 1,
"self": 0.44559081100032927,
"children": {
"run_training.setup": {
"total": 0.12248147899998685,
"count": 1,
"self": 0.12248147899998685
},
"TrainerController.start_learning": {
"total": 2406.231017408,
"count": 1,
"self": 4.1908615900133555,
"children": {
"TrainerController._reset_env": {
"total": 12.120638493000001,
"count": 1,
"self": 12.120638493000001
},
"TrainerController.advance": {
"total": 2389.8045960289865,
"count": 232494,
"self": 4.541882222979893,
"children": {
"env_step": {
"total": 1847.2193272760044,
"count": 232494,
"self": 1547.3691417169825,
"children": {
"SubprocessEnvManager._take_step": {
"total": 297.00860949293474,
"count": 232494,
"self": 15.775838159960927,
"children": {
"TorchPolicy.evaluate": {
"total": 281.2327713329738,
"count": 222912,
"self": 70.1954754479253,
"children": {
"TorchPolicy.sample_actions": {
"total": 211.0372958850485,
"count": 222912,
"self": 211.0372958850485
}
}
}
}
},
"workers": {
"total": 2.841576066087015,
"count": 232494,
"self": 0.0,
"children": {
"worker_root": {
"total": 2397.8160271970396,
"count": 232494,
"is_parallel": true,
"self": 1144.2471637690398,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012596690000066246,
"count": 1,
"is_parallel": true,
"self": 0.0004843870000286188,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007752819999780058,
"count": 2,
"is_parallel": true,
"self": 0.0007752819999780058
}
}
},
"UnityEnvironment.step": {
"total": 0.028745052999994414,
"count": 1,
"is_parallel": true,
"self": 0.0002727159999835749,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002117420000047332,
"count": 1,
"is_parallel": true,
"self": 0.0002117420000047332
},
"communicator.exchange": {
"total": 0.02755730600000561,
"count": 1,
"is_parallel": true,
"self": 0.02755730600000561
},
"steps_from_proto": {
"total": 0.0007032890000004954,
"count": 1,
"is_parallel": true,
"self": 0.00023511899999562047,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046817000000487496,
"count": 2,
"is_parallel": true,
"self": 0.00046817000000487496
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1253.5688634279998,
"count": 232493,
"is_parallel": true,
"self": 38.63276067603874,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.52463174703149,
"count": 232493,
"is_parallel": true,
"self": 78.52463174703149
},
"communicator.exchange": {
"total": 1045.3679785209847,
"count": 232493,
"is_parallel": true,
"self": 1045.3679785209847
},
"steps_from_proto": {
"total": 91.04349248394496,
"count": 232493,
"is_parallel": true,
"self": 36.76287938782394,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.280613096121016,
"count": 464986,
"is_parallel": true,
"self": 54.280613096121016
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 538.0433865300021,
"count": 232494,
"self": 6.194618699049329,
"children": {
"process_trajectory": {
"total": 168.42721278395203,
"count": 232494,
"self": 166.97223212495186,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4549806590001708,
"count": 10,
"self": 1.4549806590001708
}
}
},
"_update_policy": {
"total": 363.42155504700077,
"count": 97,
"self": 304.8726848529928,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.548870194007975,
"count": 2910,
"self": 58.548870194007975
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.629999683762435e-07,
"count": 1,
"self": 8.629999683762435e-07
},
"TrainerController._save_models": {
"total": 0.11492043299995203,
"count": 1,
"self": 0.0020213469997543143,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11289908600019771,
"count": 1,
"self": 0.11289908600019771
}
}
}
}
}
}
}