ppo-Huggy / run_logs /timers.json
qumingcheng's picture
Huggy
e8f2a9d
raw
history blame
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4026694297790527,
"min": 1.4026694297790527,
"max": 1.425116777420044,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70369.1171875,
"min": 68344.328125,
"max": 75793.890625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.63388804841149,
"min": 72.8126843657817,
"max": 410.780487804878,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49333.0,
"min": 48866.0,
"max": 50526.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999963.0,
"min": 49912.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999963.0,
"min": 49912.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.473856210708618,
"min": 0.14796042442321777,
"max": 2.520597219467163,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1635.218994140625,
"min": 18.051172256469727,
"max": 1688.6378173828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8746476789285484,
"min": 1.8574494901494902,
"max": 4.01082981518797,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2561.1421157717705,
"min": 226.6088377982378,
"max": 2671.212656915188,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8746476789285484,
"min": 1.8574494901494902,
"max": 4.01082981518797,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2561.1421157717705,
"min": 226.6088377982378,
"max": 2671.212656915188,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016671118202955564,
"min": 0.012149330651421528,
"max": 0.021227688041593258,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050013354608866696,
"min": 0.024298661302843055,
"max": 0.05599136124510551,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.058106591676672294,
"min": 0.0211564381296436,
"max": 0.06266383371419376,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1743197750300169,
"min": 0.0423128762592872,
"max": 0.18799150114258129,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.825548724850008e-06,
"min": 3.825548724850008e-06,
"max": 0.00029530170156609996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1476646174550025e-05,
"min": 1.1476646174550025e-05,
"max": 0.0008438707687097499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10127515000000002,
"min": 0.10127515000000002,
"max": 0.19843390000000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30382545000000005,
"min": 0.20768089999999995,
"max": 0.5812902500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.362998500000014e-05,
"min": 7.362998500000014e-05,
"max": 0.00492185161,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022088995500000043,
"min": 0.00022088995500000043,
"max": 0.014066383474999996,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682463115",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682465616"
},
"total": 2501.610102544,
"count": 1,
"self": 0.44605590499986647,
"children": {
"run_training.setup": {
"total": 0.1164121360000081,
"count": 1,
"self": 0.1164121360000081
},
"TrainerController.start_learning": {
"total": 2501.047634503,
"count": 1,
"self": 4.6213965069641745,
"children": {
"TrainerController._reset_env": {
"total": 4.087252653999997,
"count": 1,
"self": 4.087252653999997
},
"TrainerController.advance": {
"total": 2492.210785482036,
"count": 233505,
"self": 4.961381038961008,
"children": {
"env_step": {
"total": 1955.9579693690885,
"count": 233505,
"self": 1654.1675941239089,
"children": {
"SubprocessEnvManager._take_step": {
"total": 298.6525508151327,
"count": 233505,
"self": 17.965234333263197,
"children": {
"TorchPolicy.evaluate": {
"total": 280.6873164818695,
"count": 222962,
"self": 280.6873164818695
}
}
},
"workers": {
"total": 3.137824430046834,
"count": 233505,
"self": 0.0,
"children": {
"worker_root": {
"total": 2492.6985292180498,
"count": 233505,
"is_parallel": true,
"self": 1141.3786725310836,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009693789999971614,
"count": 1,
"is_parallel": true,
"self": 0.00028539800001681215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006839809999803492,
"count": 2,
"is_parallel": true,
"self": 0.0006839809999803492
}
}
},
"UnityEnvironment.step": {
"total": 0.027683233999994172,
"count": 1,
"is_parallel": true,
"self": 0.00028776199997082585,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024340699999925164,
"count": 1,
"is_parallel": true,
"self": 0.00024340699999925164
},
"communicator.exchange": {
"total": 0.026449232000004486,
"count": 1,
"is_parallel": true,
"self": 0.026449232000004486
},
"steps_from_proto": {
"total": 0.0007028330000196092,
"count": 1,
"is_parallel": true,
"self": 0.00021375900001885384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004890740000007554,
"count": 2,
"is_parallel": true,
"self": 0.0004890740000007554
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1351.3198566869662,
"count": 233504,
"is_parallel": true,
"self": 39.14669986105332,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.95393891598016,
"count": 233504,
"is_parallel": true,
"self": 84.95393891598016
},
"communicator.exchange": {
"total": 1133.891663759992,
"count": 233504,
"is_parallel": true,
"self": 1133.891663759992
},
"steps_from_proto": {
"total": 93.32755414994065,
"count": 233504,
"is_parallel": true,
"self": 37.51387163401131,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.813682515929344,
"count": 467008,
"is_parallel": true,
"self": 55.813682515929344
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 531.2914350739866,
"count": 233505,
"self": 6.790127027938752,
"children": {
"process_trajectory": {
"total": 147.5037327460479,
"count": 233505,
"self": 146.09003661404824,
"children": {
"RLTrainer._checkpoint": {
"total": 1.413696131999643,
"count": 10,
"self": 1.413696131999643
}
}
},
"_update_policy": {
"total": 376.99757529999994,
"count": 97,
"self": 317.5987839580098,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.39879134199015,
"count": 2910,
"self": 59.39879134199015
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0250000741507392e-06,
"count": 1,
"self": 1.0250000741507392e-06
},
"TrainerController._save_models": {
"total": 0.12819883499969364,
"count": 1,
"self": 0.002056452999568137,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1261423820001255,
"count": 1,
"self": 0.1261423820001255
}
}
}
}
}
}
}