rootacess's picture
First Push
a7d6e1e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8842992782592773,
"min": 0.8842992782592773,
"max": 2.8503754138946533,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8433.5625,
"min": 8433.5625,
"max": 29190.6953125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.964492797851562,
"min": 0.3306727111339569,
"max": 12.964492797851562,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2528.076171875,
"min": 64.15050506591797,
"max": 2625.30712890625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06629006122923731,
"min": 0.062426238639733025,
"max": 0.07345690578324995,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26516024491694923,
"min": 0.2497049545589321,
"max": 0.36728452891624974,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19624875610073408,
"min": 0.11921137527850292,
"max": 0.28981710108471853,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7849950244029363,
"min": 0.4768455011140117,
"max": 1.3667112264563055,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.295454545454547,
"min": 3.090909090909091,
"max": 25.763636363636362,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1113.0,
"min": 136.0,
"max": 1417.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.295454545454547,
"min": 3.090909090909091,
"max": 25.763636363636362,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1113.0,
"min": 136.0,
"max": 1417.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674149148",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674149575"
},
"total": 427.32587991599996,
"count": 1,
"self": 0.3843927819999635,
"children": {
"run_training.setup": {
"total": 0.11503783999998518,
"count": 1,
"self": 0.11503783999998518
},
"TrainerController.start_learning": {
"total": 426.826449294,
"count": 1,
"self": 0.5632997250013432,
"children": {
"TrainerController._reset_env": {
"total": 8.940588038000016,
"count": 1,
"self": 8.940588038000016
},
"TrainerController.advance": {
"total": 417.2055036669986,
"count": 18201,
"self": 0.30785835800230643,
"children": {
"env_step": {
"total": 416.8976453089963,
"count": 18201,
"self": 274.6571854420041,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.9632125719947,
"count": 18201,
"self": 1.4121229139891796,
"children": {
"TorchPolicy.evaluate": {
"total": 140.5510896580055,
"count": 18201,
"self": 31.52989102800356,
"children": {
"TorchPolicy.sample_actions": {
"total": 109.02119863000195,
"count": 18201,
"self": 109.02119863000195
}
}
}
}
},
"workers": {
"total": 0.27724729499752243,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 425.5224142809929,
"count": 18201,
"is_parallel": true,
"self": 203.42094964299355,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005789509999999609,
"count": 1,
"is_parallel": true,
"self": 0.003450697999994645,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002338812000004964,
"count": 10,
"is_parallel": true,
"self": 0.002338812000004964
}
}
},
"UnityEnvironment.step": {
"total": 0.032212319999985084,
"count": 1,
"is_parallel": true,
"self": 0.00048350699995580726,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031809800000814903,
"count": 1,
"is_parallel": true,
"self": 0.00031809800000814903
},
"communicator.exchange": {
"total": 0.030019020000025876,
"count": 1,
"is_parallel": true,
"self": 0.030019020000025876
},
"steps_from_proto": {
"total": 0.0013916949999952521,
"count": 1,
"is_parallel": true,
"self": 0.0003363320000460135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010553629999492387,
"count": 10,
"is_parallel": true,
"self": 0.0010553629999492387
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 222.10146463799936,
"count": 18200,
"is_parallel": true,
"self": 8.31601593500693,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.80794574100176,
"count": 18200,
"is_parallel": true,
"self": 4.80794574100176
},
"communicator.exchange": {
"total": 179.54004283199671,
"count": 18200,
"is_parallel": true,
"self": 179.54004283199671
},
"steps_from_proto": {
"total": 29.43746012999395,
"count": 18200,
"is_parallel": true,
"self": 6.419435548999758,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.018024580994194,
"count": 182000,
"is_parallel": true,
"self": 23.018024580994194
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.028499995456514e-05,
"count": 1,
"self": 4.028499995456514e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 414.32415932202883,
"count": 354922,
"is_parallel": true,
"self": 9.23941622603354,
"children": {
"process_trajectory": {
"total": 237.89404371599514,
"count": 354922,
"is_parallel": true,
"self": 237.14875121499514,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7452925009999944,
"count": 4,
"is_parallel": true,
"self": 0.7452925009999944
}
}
},
"_update_policy": {
"total": 167.19069938000015,
"count": 90,
"is_parallel": true,
"self": 42.31461850900078,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.87608087099937,
"count": 4587,
"is_parallel": true,
"self": 124.87608087099937
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1170175790000485,
"count": 1,
"self": 0.0008093870000038805,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11620819200004462,
"count": 1,
"self": 0.11620819200004462
}
}
}
}
}
}
}