SnowballTarget / run_logs /timers.json
andylolu24's picture
Initial commit
1e88fdc
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.807567834854126,
"min": 2.807567834854126,
"max": 2.8739852905273438,
"count": 2
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 28968.484375,
"min": 28968.484375,
"max": 29432.482421875,
"count": 2
},
"SnowballTarget.Step.mean": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Step.sum": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.2438939809799194,
"min": 0.2919357717037201,
"max": 1.2438939809799194,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 254.99826049804688,
"min": 56.63554000854492,
"max": 254.99826049804688,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06771399042781387,
"min": 0.06771399042781387,
"max": 0.0714372955381787,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3385699521390694,
"min": 0.2857491821527148,
"max": 0.3385699521390694,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19390695398929073,
"min": 0.11701562528398034,
"max": 0.19390695398929073,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9695347699464536,
"min": 0.46806250113592135,
"max": 0.9695347699464536,
"count": 2
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032007656e-05,
"min": 7.032007656e-05,
"max": 0.00021882002706000002,
"count": 2
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0003516003828,
"min": 0.0003516003828,
"max": 0.0008752801082400001,
"count": 2
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.12344000000000002,
"min": 0.12344000000000002,
"max": 0.17294,
"count": 2
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.6172000000000001,
"min": 0.6172000000000001,
"max": 0.69176,
"count": 2
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0011796560000000003,
"min": 0.0011796560000000003,
"max": 0.003649706,
"count": 2
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.005898280000000001,
"min": 0.005898280000000001,
"max": 0.014598824,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 5.290909090909091,
"min": 3.022727272727273,
"max": 5.290909090909091,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 291.0,
"min": 133.0,
"max": 291.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 5.290909090909091,
"min": 3.022727272727273,
"max": 5.290909090909091,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 291.0,
"min": 133.0,
"max": 291.0,
"count": 2
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679114819",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679114879"
},
"total": 59.35834561899998,
"count": 1,
"self": 0.9208360659999926,
"children": {
"run_training.setup": {
"total": 0.1376743430000147,
"count": 1,
"self": 0.1376743430000147
},
"TrainerController.start_learning": {
"total": 58.29983520999997,
"count": 1,
"self": 0.10030118900067464,
"children": {
"TrainerController._reset_env": {
"total": 9.269022235999955,
"count": 1,
"self": 9.269022235999955
},
"TrainerController.advance": {
"total": 48.679397180999274,
"count": 1870,
"self": 0.032275204000939084,
"children": {
"env_step": {
"total": 48.647121976998335,
"count": 1870,
"self": 35.29100767799963,
"children": {
"SubprocessEnvManager._take_step": {
"total": 13.325656398999286,
"count": 1870,
"self": 0.17023340199847325,
"children": {
"TorchPolicy.evaluate": {
"total": 13.155422997000812,
"count": 1870,
"self": 13.155422997000812
}
}
},
"workers": {
"total": 0.03045789999941917,
"count": 1870,
"self": 0.0,
"children": {
"worker_root": {
"total": 57.94905145899787,
"count": 1870,
"is_parallel": true,
"self": 32.02168140799881,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007045734000030279,
"count": 1,
"is_parallel": true,
"self": 0.005524985000079141,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001520748999951138,
"count": 10,
"is_parallel": true,
"self": 0.001520748999951138
}
}
},
"UnityEnvironment.step": {
"total": 0.09024463399998695,
"count": 1,
"is_parallel": true,
"self": 0.0005722440000113238,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000346121999996285,
"count": 1,
"is_parallel": true,
"self": 0.000346121999996285
},
"communicator.exchange": {
"total": 0.0874664419999931,
"count": 1,
"is_parallel": true,
"self": 0.0874664419999931
},
"steps_from_proto": {
"total": 0.001859825999986242,
"count": 1,
"is_parallel": true,
"self": 0.0003978289998940454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014619970000921967,
"count": 10,
"is_parallel": true,
"self": 0.0014619970000921967
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 25.92737005099906,
"count": 1869,
"is_parallel": true,
"self": 1.0352703279970683,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.5810060680022957,
"count": 1869,
"is_parallel": true,
"self": 0.5810060680022957
},
"communicator.exchange": {
"total": 20.788842067999326,
"count": 1869,
"is_parallel": true,
"self": 20.788842067999326
},
"steps_from_proto": {
"total": 3.522251587000369,
"count": 1869,
"is_parallel": true,
"self": 0.7610518300056697,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.7611997569946993,
"count": 18690,
"is_parallel": true,
"self": 2.7611997569946993
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0003261130000282719,
"count": 1,
"self": 0.0003261130000282719,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 48.298664523995,
"count": 47944,
"is_parallel": true,
"self": 1.1808888979892345,
"children": {
"process_trajectory": {
"total": 27.353610033005793,
"count": 47944,
"is_parallel": true,
"self": 27.353610033005793
},
"_update_policy": {
"total": 19.764165592999973,
"count": 9,
"is_parallel": true,
"self": 6.974977577000232,
"children": {
"TorchPPOOptimizer.update": {
"total": 12.789188015999741,
"count": 456,
"is_parallel": true,
"self": 12.789188015999741
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2507884910000371,
"count": 1,
"self": 0.0011505240000246886,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2496379670000124,
"count": 1,
"self": 0.2496379670000124
}
}
}
}
}
}
}