MLAgents-Worm / run_logs /timers.json
kingabzpro's picture
First Worms
e0b928f
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Worm.Policy.Entropy.mean": {
"value": 1.3740898370742798,
"min": 1.3740898370742798,
"max": 1.418938398361206,
"count": 33
},
"Worm.Policy.Entropy.sum": {
"value": 41222.6953125,
"min": 41222.6953125,
"max": 42568.15234375,
"count": 33
},
"Worm.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 33
},
"Worm.Environment.EpisodeLength.sum": {
"value": 29970.0,
"min": 29970.0,
"max": 29970.0,
"count": 33
},
"Worm.Step.mean": {
"value": 989000.0,
"min": 29000.0,
"max": 989000.0,
"count": 33
},
"Worm.Step.sum": {
"value": 989000.0,
"min": 29000.0,
"max": 989000.0,
"count": 33
},
"Worm.Policy.ExtrinsicValueEstimate.mean": {
"value": 10.496319770812988,
"min": -0.09954479336738586,
"max": 10.496319770812988,
"count": 33
},
"Worm.Policy.ExtrinsicValueEstimate.sum": {
"value": 314.88958740234375,
"min": -2.8867990970611572,
"max": 314.88958740234375,
"count": 33
},
"Worm.Environment.CumulativeReward.mean": {
"value": 102.74230194091797,
"min": 0.2761195733522375,
"max": 102.74230194091797,
"count": 33
},
"Worm.Environment.CumulativeReward.sum": {
"value": 3082.269058227539,
"min": 8.283587200567126,
"max": 3082.269058227539,
"count": 33
},
"Worm.Policy.ExtrinsicReward.mean": {
"value": 102.74230194091797,
"min": 0.2761195733522375,
"max": 102.74230194091797,
"count": 33
},
"Worm.Policy.ExtrinsicReward.sum": {
"value": 3082.269058227539,
"min": 8.283587200567126,
"max": 3082.269058227539,
"count": 33
},
"Worm.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Worm.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Worm.Losses.PolicyLoss.mean": {
"value": 0.01920895631164534,
"min": 0.014907625126874163,
"max": 0.021328854729377088,
"count": 32
},
"Worm.Losses.PolicyLoss.sum": {
"value": 0.01920895631164534,
"min": 0.014907625126874163,
"max": 0.021328854729377088,
"count": 32
},
"Worm.Losses.ValueLoss.mean": {
"value": 1.009602081208002,
"min": 0.0013987165681707363,
"max": 1.009602081208002,
"count": 32
},
"Worm.Losses.ValueLoss.sum": {
"value": 1.009602081208002,
"min": 0.0013987165681707363,
"max": 1.009602081208002,
"count": 32
},
"Worm.Policy.LearningRate.mean": {
"value": 1.2000096000000011e-05,
"min": 1.2000096000000011e-05,
"max": 0.0002910000029999999,
"count": 32
},
"Worm.Policy.LearningRate.sum": {
"value": 1.2000096000000011e-05,
"min": 1.2000096000000011e-05,
"max": 0.0002910000029999999,
"count": 32
},
"Worm.Policy.Epsilon.mean": {
"value": 0.10400000000000001,
"min": 0.10400000000000001,
"max": 0.19699999999999998,
"count": 32
},
"Worm.Policy.Epsilon.sum": {
"value": 0.10400000000000001,
"min": 0.10400000000000001,
"max": 0.19699999999999998,
"count": 32
},
"Worm.Policy.Beta.mean": {
"value": 0.00020960000000000022,
"min": 0.00020960000000000022,
"max": 0.0048503,
"count": 32
},
"Worm.Policy.Beta.sum": {
"value": 0.00020960000000000022,
"min": 0.00020960000000000022,
"max": 0.0048503,
"count": 32
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1656874047",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Worm.yaml --env=./trained-envs-executables/windows/Worm/Worm --run-id=Worm Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1656875392"
},
"total": 1345.2486994829997,
"count": 1,
"self": 0.3820568099999946,
"children": {
"run_training.setup": {
"total": 0.0428618349997123,
"count": 1,
"self": 0.0428618349997123
},
"TrainerController.start_learning": {
"total": 1344.823780838,
"count": 1,
"self": 1.9971949705359293,
"children": {
"TrainerController._reset_env": {
"total": 6.562111198999446,
"count": 1,
"self": 6.562111198999446
},
"TrainerController.advance": {
"total": 1336.1459261364625,
"count": 101000,
"self": 2.2407982939748763,
"children": {
"env_step": {
"total": 1093.5435471946976,
"count": 101000,
"self": 955.4766646016924,
"children": {
"SubprocessEnvManager._take_step": {
"total": 136.88376099503148,
"count": 101000,
"self": 9.594744014470962,
"children": {
"TorchPolicy.evaluate": {
"total": 127.28901698056052,
"count": 101000,
"self": 31.02972490713273,
"children": {
"TorchPolicy.sample_actions": {
"total": 96.25929207342779,
"count": 101000,
"self": 96.25929207342779
}
}
}
}
},
"workers": {
"total": 1.1831215979736953,
"count": 101000,
"self": 0.0,
"children": {
"worker_root": {
"total": 1340.4254784390741,
"count": 101000,
"is_parallel": true,
"self": 514.6929653151319,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001303256000028341,
"count": 1,
"is_parallel": true,
"self": 0.0005514980002772063,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007517579997511348,
"count": 2,
"is_parallel": true,
"self": 0.0007517579997511348
}
}
},
"UnityEnvironment.step": {
"total": 0.03363819100013643,
"count": 1,
"is_parallel": true,
"self": 0.00023389100078929914,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045814499935659114,
"count": 1,
"is_parallel": true,
"self": 0.00045814499935659114
},
"communicator.exchange": {
"total": 0.032364753000365454,
"count": 1,
"is_parallel": true,
"self": 0.032364753000365454
},
"steps_from_proto": {
"total": 0.0005814019996250863,
"count": 1,
"is_parallel": true,
"self": 0.00022956399971008068,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00035183799991500564,
"count": 2,
"is_parallel": true,
"self": 0.00035183799991500564
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 825.7325131239422,
"count": 100999,
"is_parallel": true,
"self": 17.67336344731939,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 33.863379608704236,
"count": 100999,
"is_parallel": true,
"self": 33.863379608704236
},
"communicator.exchange": {
"total": 726.6809996297743,
"count": 100999,
"is_parallel": true,
"self": 726.6809996297743
},
"steps_from_proto": {
"total": 47.514770438144296,
"count": 100999,
"is_parallel": true,
"self": 19.870279592914812,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.644490845229484,
"count": 201998,
"is_parallel": true,
"self": 27.644490845229484
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 240.3615806477901,
"count": 101000,
"self": 2.5159017896612568,
"children": {
"process_trajectory": {
"total": 66.82347189912707,
"count": 101000,
"self": 66.57114657912916,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2523253199979081,
"count": 2,
"self": 0.2523253199979081
}
}
},
"_update_policy": {
"total": 171.02220695900178,
"count": 33,
"self": 145.79854926497956,
"children": {
"TorchPPOOptimizer.update": {
"total": 25.223657694022222,
"count": 1386,
"self": 25.223657694022222
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5180012269411236e-06,
"count": 1,
"self": 1.5180012269411236e-06
},
"TrainerController._save_models": {
"total": 0.11854701400079648,
"count": 1,
"self": 0.002503236000848119,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11604377799994836,
"count": 1,
"self": 0.11604377799994836
}
}
}
}
}
}
}