ppo-Huggy / run_logs /timers.json
jmadeano's picture
Huggy
faa242d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4053518772125244,
"min": 1.4053518772125244,
"max": 1.4308953285217285,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69688.5859375,
"min": 68662.4375,
"max": 78169.890625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 97.01764705882353,
"min": 83.17171717171718,
"max": 424.109243697479,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49479.0,
"min": 48813.0,
"max": 50469.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999916.0,
"min": 49838.0,
"max": 1999916.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999916.0,
"min": 49838.0,
"max": 1999916.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.357332229614258,
"min": 0.1940331757068634,
"max": 2.422971725463867,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1202.2393798828125,
"min": 22.89591407775879,
"max": 1416.935302734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.576519626379013,
"min": 1.7813254074525025,
"max": 3.9796080649370262,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1824.0250094532967,
"min": 210.1963980793953,
"max": 2261.0962039232254,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.576519626379013,
"min": 1.7813254074525025,
"max": 3.9796080649370262,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1824.0250094532967,
"min": 210.1963980793953,
"max": 2261.0962039232254,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.013667445819818465,
"min": 0.013667445819818465,
"max": 0.01914104676494996,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0410023374594554,
"min": 0.02774603883014303,
"max": 0.055547526988084434,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04620972830388281,
"min": 0.020386556784311928,
"max": 0.05857649426907301,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13862918491164844,
"min": 0.040773113568623856,
"max": 0.1684747022887071,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1769489410500097e-06,
"min": 3.1769489410500097e-06,
"max": 0.00029534505155165,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.53084682315003e-06,
"min": 9.53084682315003e-06,
"max": 0.0008441232186256,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105895000000002,
"min": 0.10105895000000002,
"max": 0.19844835000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30317685000000005,
"min": 0.20727260000000003,
"max": 0.5813744000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.284160500000017e-05,
"min": 6.284160500000017e-05,
"max": 0.004922572665,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001885248150000005,
"min": 0.0001885248150000005,
"max": 0.014070582559999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678248817",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678251317"
},
"total": 2500.550535748,
"count": 1,
"self": 0.5952362819998598,
"children": {
"run_training.setup": {
"total": 0.10926673900002015,
"count": 1,
"self": 0.10926673900002015
},
"TrainerController.start_learning": {
"total": 2499.8460327270004,
"count": 1,
"self": 4.456642354907217,
"children": {
"TrainerController._reset_env": {
"total": 10.401419748000023,
"count": 1,
"self": 10.401419748000023
},
"TrainerController.advance": {
"total": 2484.825428870093,
"count": 231929,
"self": 4.839710199160891,
"children": {
"env_step": {
"total": 1925.0282561639926,
"count": 231929,
"self": 1609.991780906025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 312.03929799297543,
"count": 231929,
"self": 16.722015842025996,
"children": {
"TorchPolicy.evaluate": {
"total": 295.31728215094944,
"count": 222904,
"self": 74.1212354180231,
"children": {
"TorchPolicy.sample_actions": {
"total": 221.19604673292633,
"count": 222904,
"self": 221.19604673292633
}
}
}
}
},
"workers": {
"total": 2.9971772649921604,
"count": 231929,
"self": 0.0,
"children": {
"worker_root": {
"total": 2490.9631029910543,
"count": 231929,
"is_parallel": true,
"self": 1184.8993555751065,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009789509999791335,
"count": 1,
"is_parallel": true,
"self": 0.000367496000023948,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006114549999551855,
"count": 2,
"is_parallel": true,
"self": 0.0006114549999551855
}
}
},
"UnityEnvironment.step": {
"total": 0.06549742300001071,
"count": 1,
"is_parallel": true,
"self": 0.00032503600004929467,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001967999999692438,
"count": 1,
"is_parallel": true,
"self": 0.0001967999999692438
},
"communicator.exchange": {
"total": 0.06426637300000948,
"count": 1,
"is_parallel": true,
"self": 0.06426637300000948
},
"steps_from_proto": {
"total": 0.0007092139999826941,
"count": 1,
"is_parallel": true,
"self": 0.0002358270000399898,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004733869999427043,
"count": 2,
"is_parallel": true,
"self": 0.0004733869999427043
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1306.0637474159478,
"count": 231928,
"is_parallel": true,
"self": 39.31855395901471,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.3289726089564,
"count": 231928,
"is_parallel": true,
"self": 81.3289726089564
},
"communicator.exchange": {
"total": 1091.2170359720103,
"count": 231928,
"is_parallel": true,
"self": 1091.2170359720103
},
"steps_from_proto": {
"total": 94.19918487596635,
"count": 231928,
"is_parallel": true,
"self": 37.91358288395088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.28560199201547,
"count": 463856,
"is_parallel": true,
"self": 56.28560199201547
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 554.9574625069399,
"count": 231929,
"self": 7.168276507968358,
"children": {
"process_trajectory": {
"total": 168.50298382597356,
"count": 231929,
"self": 168.00265453597393,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5003292899996268,
"count": 4,
"self": 0.5003292899996268
}
}
},
"_update_policy": {
"total": 379.28620217299806,
"count": 97,
"self": 320.2163859620051,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.06981621099294,
"count": 2910,
"self": 59.06981621099294
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.490999693487538e-06,
"count": 1,
"self": 1.490999693487538e-06
},
"TrainerController._save_models": {
"total": 0.16254026300020996,
"count": 1,
"self": 0.003051195999887568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1594890670003224,
"count": 1,
"self": 0.1594890670003224
}
}
}
}
}
}
}