ppo-Huggy / run_logs /timers.json
renee127's picture
Huggy
3a62ae9
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4065850973129272,
"min": 1.4065850973129272,
"max": 1.428175926208496,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70114.046875,
"min": 66770.1875,
"max": 80739.4765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 77.71181102362205,
"min": 71.08946608946609,
"max": 404.991935483871,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49347.0,
"min": 48779.0,
"max": 50219.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999938.0,
"min": 49891.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999938.0,
"min": 49891.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4729881286621094,
"min": 0.18025438487529755,
"max": 2.516341209411621,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1570.347412109375,
"min": 22.171289443969727,
"max": 1716.81982421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.884684631392712,
"min": 1.8598980166442025,
"max": 4.001122241995551,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2466.774740934372,
"min": 228.76745604723692,
"max": 2669.08352124691,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.884684631392712,
"min": 1.8598980166442025,
"max": 4.001122241995551,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2466.774740934372,
"min": 228.76745604723692,
"max": 2669.08352124691,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015814556495307542,
"min": 0.014242310735345301,
"max": 0.020058948326429044,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04744366948592263,
"min": 0.030700676398798046,
"max": 0.05864992073469087,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0631635488735305,
"min": 0.020826759623984496,
"max": 0.06480206803729137,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18949064662059148,
"min": 0.04165351924796899,
"max": 0.19340105392038823,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6171487943166665e-06,
"min": 3.6171487943166665e-06,
"max": 0.00029536410154529995,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0851446382949999e-05,
"min": 1.0851446382949999e-05,
"max": 0.0008441136186288,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120568333333335,
"min": 0.10120568333333335,
"max": 0.19845469999999993,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30361705000000005,
"min": 0.20753889999999997,
"max": 0.5813712000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.016359833333332e-05,
"min": 7.016359833333332e-05,
"max": 0.00492288953,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021049079499999997,
"min": 0.00021049079499999997,
"max": 0.014070422879999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671571300",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671573584"
},
"total": 2283.812969195,
"count": 1,
"self": 0.38739730099950975,
"children": {
"run_training.setup": {
"total": 0.1130311280001024,
"count": 1,
"self": 0.1130311280001024
},
"TrainerController.start_learning": {
"total": 2283.312540766,
"count": 1,
"self": 3.950057700119487,
"children": {
"TrainerController._reset_env": {
"total": 8.504283907999934,
"count": 1,
"self": 8.504283907999934
},
"TrainerController.advance": {
"total": 2270.7433008348803,
"count": 233276,
"self": 4.352788290924764,
"children": {
"env_step": {
"total": 1793.5890712780233,
"count": 233276,
"self": 1509.8442668571588,
"children": {
"SubprocessEnvManager._take_step": {
"total": 281.0986031319071,
"count": 233276,
"self": 15.054079376927234,
"children": {
"TorchPolicy.evaluate": {
"total": 266.0445237549799,
"count": 222974,
"self": 66.41928326000482,
"children": {
"TorchPolicy.sample_actions": {
"total": 199.62524049497506,
"count": 222974,
"self": 199.62524049497506
}
}
}
}
},
"workers": {
"total": 2.6462012889574,
"count": 233276,
"self": 0.0,
"children": {
"worker_root": {
"total": 2275.152259621095,
"count": 233276,
"is_parallel": true,
"self": 1032.0931932981134,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002240734999986671,
"count": 1,
"is_parallel": true,
"self": 0.0003480329999092646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018927020000774064,
"count": 2,
"is_parallel": true,
"self": 0.0018927020000774064
}
}
},
"UnityEnvironment.step": {
"total": 0.02889223400006813,
"count": 1,
"is_parallel": true,
"self": 0.0002792420002606377,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019844199994167866,
"count": 1,
"is_parallel": true,
"self": 0.00019844199994167866
},
"communicator.exchange": {
"total": 0.027680396999926415,
"count": 1,
"is_parallel": true,
"self": 0.027680396999926415
},
"steps_from_proto": {
"total": 0.000734152999939397,
"count": 1,
"is_parallel": true,
"self": 0.00024970699996629264,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00048444599997310434,
"count": 2,
"is_parallel": true,
"self": 0.00048444599997310434
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1243.0590663229814,
"count": 233275,
"is_parallel": true,
"self": 35.10906822497145,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.17740589996345,
"count": 233275,
"is_parallel": true,
"self": 81.17740589996345
},
"communicator.exchange": {
"total": 1028.6840685340062,
"count": 233275,
"is_parallel": true,
"self": 1028.6840685340062
},
"steps_from_proto": {
"total": 98.08852366404005,
"count": 233275,
"is_parallel": true,
"self": 42.878331198085675,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.21019246595438,
"count": 466550,
"is_parallel": true,
"self": 55.21019246595438
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 472.8014412659322,
"count": 233276,
"self": 6.012011297984941,
"children": {
"process_trajectory": {
"total": 155.53299873294475,
"count": 233276,
"self": 154.34923417994457,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1837645530001737,
"count": 10,
"self": 1.1837645530001737
}
}
},
"_update_policy": {
"total": 311.25643123500254,
"count": 97,
"self": 258.722799184995,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.533632050007554,
"count": 2910,
"self": 52.533632050007554
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.529999260848854e-07,
"count": 1,
"self": 9.529999260848854e-07
},
"TrainerController._save_models": {
"total": 0.11489737000010791,
"count": 1,
"self": 0.002129014999809442,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11276835500029847,
"count": 1,
"self": 0.11276835500029847
}
}
}
}
}
}
}