ppo-Huggy / run_logs /timers.json
DiegoD616's picture
Huggy
adc167e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4059253931045532,
"min": 1.4059253931045532,
"max": 1.4275312423706055,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69338.8359375,
"min": 67480.203125,
"max": 78967.1640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.67603305785124,
"min": 79.36977491961414,
"max": 422.0504201680672,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49414.0,
"min": 49287.0,
"max": 50224.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999975.0,
"min": 49928.0,
"max": 1999975.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999975.0,
"min": 49928.0,
"max": 1999975.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.499358892440796,
"min": -0.07402683794498444,
"max": 2.5001392364501953,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1512.1121826171875,
"min": -8.735166549682617,
"max": 1545.660888671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.965329560662104,
"min": 1.767062255386579,
"max": 3.9977468198963573,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2399.024384200573,
"min": 208.5133461356163,
"max": 2456.6529887914658,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.965329560662104,
"min": 1.767062255386579,
"max": 3.9977468198963573,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2399.024384200573,
"min": 208.5133461356163,
"max": 2456.6529887914658,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018598945477020203,
"min": 0.012923321737131725,
"max": 0.02103996479159428,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.055796836431060606,
"min": 0.02584664347426345,
"max": 0.06311989437478284,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05995257053938177,
"min": 0.023072836672266325,
"max": 0.06097219015161196,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17985771161814532,
"min": 0.04614567334453265,
"max": 0.1829165704548359,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3463988845666795e-06,
"min": 3.3463988845666795e-06,
"max": 0.00029535667654777503,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0039196653700038e-05,
"min": 1.0039196653700038e-05,
"max": 0.0008439922686692499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111543333333335,
"min": 0.10111543333333335,
"max": 0.19845222500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30334630000000007,
"min": 0.20743315,
"max": 0.5813307500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.566012333333354e-05,
"min": 6.566012333333354e-05,
"max": 0.004922766027500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019698037000000064,
"min": 0.00019698037000000064,
"max": 0.014068404425,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671588779",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671591213"
},
"total": 2434.155237786,
"count": 1,
"self": 0.442129819999991,
"children": {
"run_training.setup": {
"total": 0.10757017399998858,
"count": 1,
"self": 0.10757017399998858
},
"TrainerController.start_learning": {
"total": 2433.6055377919997,
"count": 1,
"self": 4.5625349870474565,
"children": {
"TrainerController._reset_env": {
"total": 8.171639657000014,
"count": 1,
"self": 8.171639657000014
},
"TrainerController.advance": {
"total": 2420.7439709039522,
"count": 232317,
"self": 4.705772519945185,
"children": {
"env_step": {
"total": 1925.7646820150132,
"count": 232317,
"self": 1620.4214420510518,
"children": {
"SubprocessEnvManager._take_step": {
"total": 302.2858712670014,
"count": 232317,
"self": 15.875445235026461,
"children": {
"TorchPolicy.evaluate": {
"total": 286.41042603197496,
"count": 222862,
"self": 72.40134131186818,
"children": {
"TorchPolicy.sample_actions": {
"total": 214.00908472010678,
"count": 222862,
"self": 214.00908472010678
}
}
}
}
},
"workers": {
"total": 3.0573686969599976,
"count": 232317,
"self": 0.0,
"children": {
"worker_root": {
"total": 2424.591529738945,
"count": 232317,
"is_parallel": true,
"self": 1094.7910408250302,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022819929999968736,
"count": 1,
"is_parallel": true,
"self": 0.00031930300002613876,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001962689999970735,
"count": 2,
"is_parallel": true,
"self": 0.001962689999970735
}
}
},
"UnityEnvironment.step": {
"total": 0.0352473259999897,
"count": 1,
"is_parallel": true,
"self": 0.00030229099996859077,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019956300002377247,
"count": 1,
"is_parallel": true,
"self": 0.00019956300002377247
},
"communicator.exchange": {
"total": 0.03400807700000996,
"count": 1,
"is_parallel": true,
"self": 0.03400807700000996
},
"steps_from_proto": {
"total": 0.000737394999987373,
"count": 1,
"is_parallel": true,
"self": 0.00026107000002184577,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047632499996552724,
"count": 2,
"is_parallel": true,
"self": 0.00047632499996552724
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1329.800488913915,
"count": 232316,
"is_parallel": true,
"self": 38.22829252399811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.67093673900138,
"count": 232316,
"is_parallel": true,
"self": 78.67093673900138
},
"communicator.exchange": {
"total": 1111.3996779868996,
"count": 232316,
"is_parallel": true,
"self": 1111.3996779868996
},
"steps_from_proto": {
"total": 101.50158166401582,
"count": 232316,
"is_parallel": true,
"self": 41.080452915856824,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.42112874815899,
"count": 464632,
"is_parallel": true,
"self": 60.42112874815899
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 490.273516368994,
"count": 232317,
"self": 7.2316051079600925,
"children": {
"process_trajectory": {
"total": 161.11654374403378,
"count": 232317,
"self": 159.8704258480338,
"children": {
"RLTrainer._checkpoint": {
"total": 1.246117895999987,
"count": 10,
"self": 1.246117895999987
}
}
},
"_update_policy": {
"total": 321.92536751700015,
"count": 97,
"self": 266.90224942399976,
"children": {
"TorchPPOOptimizer.update": {
"total": 55.02311809300039,
"count": 2910,
"self": 55.02311809300039
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.097000222216593e-06,
"count": 1,
"self": 1.097000222216593e-06
},
"TrainerController._save_models": {
"total": 0.12739114699979837,
"count": 1,
"self": 0.002033143999597087,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12535800300020128,
"count": 1,
"self": 0.12535800300020128
}
}
}
}
}
}
}