ppo-Huggy / run_logs /timers.json
emmashe15's picture
Huggy
b8313e3
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4076530933380127,
"min": 1.4076530933380127,
"max": 1.4283275604248047,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70054.671875,
"min": 68346.5234375,
"max": 76451.8046875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 105.78833693304536,
"min": 98.4820717131474,
"max": 416.0833333333333,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48980.0,
"min": 48953.0,
"max": 50173.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999981.0,
"min": 49397.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999981.0,
"min": 49397.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.2543015480041504,
"min": -0.043683215975761414,
"max": 2.353395462036133,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1043.7415771484375,
"min": -5.198302745819092,
"max": 1138.61767578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5007018219060053,
"min": 1.8603073175714797,
"max": 3.8366796333085067,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1620.8249435424805,
"min": 221.3765707910061,
"max": 1830.0961850881577,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5007018219060053,
"min": 1.8603073175714797,
"max": 3.8366796333085067,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1620.8249435424805,
"min": 221.3765707910061,
"max": 1830.0961850881577,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01880734783868926,
"min": 0.01301764731680224,
"max": 0.020109623441627873,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05642204351606778,
"min": 0.028850955479235076,
"max": 0.05714742003425878,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0529785597903861,
"min": 0.021720631637920935,
"max": 0.0529785597903861,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1589356793711583,
"min": 0.04344126327584187,
"max": 0.1589356793711583,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1166489611499964e-06,
"min": 3.1166489611499964e-06,
"max": 0.0002952949515683499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.34994688344999e-06,
"min": 9.34994688344999e-06,
"max": 0.000843714018762,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10103885,
"min": 0.10103885,
"max": 0.19843164999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30311655,
"min": 0.20721500000000004,
"max": 0.581238,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.183861499999995e-05,
"min": 6.183861499999995e-05,
"max": 0.004921739335,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018551584499999985,
"min": 0.00018551584499999985,
"max": 0.014063776200000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671110597",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671112906"
},
"total": 2309.931540391,
"count": 1,
"self": 0.4394172120005351,
"children": {
"run_training.setup": {
"total": 0.11859524699980284,
"count": 1,
"self": 0.11859524699980284
},
"TrainerController.start_learning": {
"total": 2309.373527932,
"count": 1,
"self": 3.907819113037476,
"children": {
"TrainerController._reset_env": {
"total": 10.735837357000037,
"count": 1,
"self": 10.735837357000037
},
"TrainerController.advance": {
"total": 2294.608417792963,
"count": 230862,
"self": 4.110854932147049,
"children": {
"env_step": {
"total": 1811.300789510949,
"count": 230862,
"self": 1525.5044853450718,
"children": {
"SubprocessEnvManager._take_step": {
"total": 283.1477390989155,
"count": 230862,
"self": 14.803389808928614,
"children": {
"TorchPolicy.evaluate": {
"total": 268.3443492899869,
"count": 222905,
"self": 67.3323117460252,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.0120375439617,
"count": 222905,
"self": 201.0120375439617
}
}
}
}
},
"workers": {
"total": 2.648565066961737,
"count": 230862,
"self": 0.0,
"children": {
"worker_root": {
"total": 2301.465923960015,
"count": 230862,
"is_parallel": true,
"self": 1039.9994047848807,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002123191000009683,
"count": 1,
"is_parallel": true,
"self": 0.0003265070001816639,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017966839998280193,
"count": 2,
"is_parallel": true,
"self": 0.0017966839998280193
}
}
},
"UnityEnvironment.step": {
"total": 0.02815786900009698,
"count": 1,
"is_parallel": true,
"self": 0.0002542640002047847,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018198099996880046,
"count": 1,
"is_parallel": true,
"self": 0.00018198099996880046
},
"communicator.exchange": {
"total": 0.027002865999975256,
"count": 1,
"is_parallel": true,
"self": 0.027002865999975256
},
"steps_from_proto": {
"total": 0.0007187579999481386,
"count": 1,
"is_parallel": true,
"self": 0.00023035100002744002,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004884069999206986,
"count": 2,
"is_parallel": true,
"self": 0.0004884069999206986
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1261.4665191751344,
"count": 230861,
"is_parallel": true,
"self": 36.2371347401122,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.23490385896685,
"count": 230861,
"is_parallel": true,
"self": 80.23490385896685
},
"communicator.exchange": {
"total": 1048.1142477200174,
"count": 230861,
"is_parallel": true,
"self": 1048.1142477200174
},
"steps_from_proto": {
"total": 96.88023285603799,
"count": 230861,
"is_parallel": true,
"self": 39.866932276291436,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.013300579746556,
"count": 461722,
"is_parallel": true,
"self": 57.013300579746556
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 479.1967733498668,
"count": 230862,
"self": 5.935917099808648,
"children": {
"process_trajectory": {
"total": 146.84192475305986,
"count": 230862,
"self": 145.58572854906015,
"children": {
"RLTrainer._checkpoint": {
"total": 1.256196203999707,
"count": 10,
"self": 1.256196203999707
}
}
},
"_update_policy": {
"total": 326.4189314969983,
"count": 97,
"self": 272.23172464500635,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.18720685199196,
"count": 2910,
"self": 54.18720685199196
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1159995665366296e-06,
"count": 1,
"self": 1.1159995665366296e-06
},
"TrainerController._save_models": {
"total": 0.12145255299992641,
"count": 1,
"self": 0.0021054699996057025,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11934708300032071,
"count": 1,
"self": 0.11934708300032071
}
}
}
}
}
}
}