ppo-Huggy / run_logs /timers.json
ThomasSimonini's picture
Huggy upload after long training (2M t)
c929223
raw
history blame
14.3 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4065978527069092,
"min": 1.4065978527069092,
"max": 1.426931619644165,
"count": 66
},
"Huggy.Policy.Entropy.sum": {
"value": 42889.98046875,
"min": 40708.66796875,
"max": 46342.19921875,
"count": 66
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.47727272727273,
"min": 83.74011299435028,
"max": 401.7866666666667,
"count": 66
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 30088.0,
"min": 28987.0,
"max": 30422.0,
"count": 66
},
"Huggy.Step.mean": {
"value": 1979709.0,
"min": 29962.0,
"max": 1979709.0,
"count": 66
},
"Huggy.Step.sum": {
"value": 1979709.0,
"min": 29962.0,
"max": 1979709.0,
"count": 66
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4500532150268555,
"min": 0.16551850736141205,
"max": 2.4757072925567627,
"count": 66
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 862.4187622070312,
"min": 12.248369216918945,
"max": 867.419677734375,
"count": 66
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.806248088959943,
"min": 1.799411579966545,
"max": 3.883133729931655,
"count": 66
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1339.7993273139,
"min": 133.15645691752434,
"max": 1339.7993273139,
"count": 66
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.806248088959943,
"min": 1.799411579966545,
"max": 3.883133729931655,
"count": 66
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1339.7993273139,
"min": 133.15645691752434,
"max": 1339.7993273139,
"count": 66
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017106848221616625,
"min": 0.011600281985010952,
"max": 0.022020238154800607,
"count": 66
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03421369644323325,
"min": 0.011600281985010952,
"max": 0.04135468834429048,
"count": 66
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05233929064124823,
"min": 0.020054462427894274,
"max": 0.06298484069605668,
"count": 66
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10467858128249646,
"min": 0.020054462427894274,
"max": 0.12434903296331565,
"count": 66
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.942298352600014e-06,
"min": 4.942298352600014e-06,
"max": 0.0002968639510453499,
"count": 66
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.884596705200027e-06,
"min": 9.566196811299996e-06,
"max": 0.00057822735725755,
"count": 66
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10164740000000003,
"min": 0.10164740000000003,
"max": 0.19895465000000004,
"count": 66
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20329480000000005,
"min": 0.10318870000000001,
"max": 0.39274244999999997,
"count": 66
},
"Huggy.Policy.Beta.mean": {
"value": 9.220526000000022e-05,
"min": 9.220526000000022e-05,
"max": 0.004947837035000002,
"count": 66
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018441052000000045,
"min": 0.00016911613,
"max": 0.009637848255,
"count": 66
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1650462023",
"python_version": "3.8.13 (default, Mar 28 2022, 06:59:08) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\simon\\Anaconda3\\envs\\mlagents\\Scripts\\mlagents-learn configuration.yaml --run-id=Huggy_1",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.22.3",
"end_time_seconds": "1650464973"
},
"total": 2950.5478018000003,
"count": 1,
"self": 0.011704999999892607,
"children": {
"run_training.setup": {
"total": 0.14496459999999978,
"count": 1,
"self": 0.14496459999999978
},
"TrainerController.start_learning": {
"total": 2950.3911322000004,
"count": 1,
"self": 4.680211899995811,
"children": {
"TrainerController._reset_env": {
"total": 13.165518,
"count": 1,
"self": 13.165518
},
"TrainerController.advance": {
"total": 2932.4152388000048,
"count": 231716,
"self": 4.528217499994753,
"children": {
"env_step": {
"total": 2233.770497500003,
"count": 231716,
"self": 1508.5710400000391,
"children": {
"SubprocessEnvManager._take_step": {
"total": 721.7964393999812,
"count": 231716,
"self": 18.949565199852486,
"children": {
"TorchPolicy.evaluate": {
"total": 702.8468742001287,
"count": 222448,
"self": 469.1598053001096,
"children": {
"TorchPolicy.sample_actions": {
"total": 233.68706890001906,
"count": 222448,
"self": 233.68706890001906
}
}
}
}
},
"workers": {
"total": 3.403018099982507,
"count": 231716,
"self": 0.0,
"children": {
"worker_root": {
"total": 2933.761283299966,
"count": 231716,
"is_parallel": true,
"self": 1736.3023930999905,
"children": {
"steps_from_proto": {
"total": 0.002112600000000242,
"count": 1,
"is_parallel": true,
"self": 0.00023480000000120071,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018777999999990413,
"count": 2,
"is_parallel": true,
"self": 0.0018777999999990413
}
}
},
"UnityEnvironment.step": {
"total": 1197.4567775999758,
"count": 231716,
"is_parallel": true,
"self": 29.570126500104152,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.97191599995509,
"count": 231716,
"is_parallel": true,
"self": 83.97191599995509
},
"communicator.exchange": {
"total": 1015.2412128999453,
"count": 231716,
"is_parallel": true,
"self": 1015.2412128999453
},
"steps_from_proto": {
"total": 68.67352219997139,
"count": 231716,
"is_parallel": true,
"self": 24.178702500072326,
"children": {
"_process_rank_one_or_two_observation": {
"total": 44.49481969989906,
"count": 463432,
"is_parallel": true,
"self": 44.49481969989906
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 694.1165238000074,
"count": 231716,
"self": 6.849092999982986,
"children": {
"process_trajectory": {
"total": 217.24021400002636,
"count": 231716,
"self": 216.58489650002633,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6553175000000238,
"count": 4,
"self": 0.6553175000000238
}
}
},
"_update_policy": {
"total": 470.0272167999981,
"count": 97,
"self": 339.78801330000215,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.23920349999594,
"count": 2910,
"self": 130.23920349999594
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999998731771484e-07,
"count": 1,
"self": 6.999998731771484e-07
},
"TrainerController._save_models": {
"total": 0.13016280000010738,
"count": 1,
"self": 0.0036804000001211534,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12648239999998623,
"count": 1,
"self": 0.12648239999998623
}
}
}
}
}
}
}