Whymeeee's picture
First Push
47a5eb8 verified
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0674829483032227,
"min": 1.0674829483032227,
"max": 2.86454701423645,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10192.3271484375,
"min": 10192.3271484375,
"max": 29367.3359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.081567764282227,
"min": 0.48815402388572693,
"max": 12.081567764282227,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2355.90576171875,
"min": 94.7018814086914,
"max": 2439.36572265625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06958867145152124,
"min": 0.06455017384461255,
"max": 0.07511025282563459,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27835468580608497,
"min": 0.2591009800848505,
"max": 0.37555126412817297,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19600824054841903,
"min": 0.1501754716623063,
"max": 0.30688845123730457,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7840329621936761,
"min": 0.6007018866492252,
"max": 1.5344422561865227,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.977272727272727,
"min": 3.75,
"max": 24.022727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1055.0,
"min": 165.0,
"max": 1303.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.977272727272727,
"min": 3.75,
"max": 24.022727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1055.0,
"min": 165.0,
"max": 1303.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1721827031",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1721827516"
},
"total": 485.10289031799994,
"count": 1,
"self": 0.4354535349998514,
"children": {
"run_training.setup": {
"total": 0.06095278600014353,
"count": 1,
"self": 0.06095278600014353
},
"TrainerController.start_learning": {
"total": 484.60648399699994,
"count": 1,
"self": 0.6521972169896344,
"children": {
"TrainerController._reset_env": {
"total": 2.787145414999941,
"count": 1,
"self": 2.787145414999941
},
"TrainerController.advance": {
"total": 481.06775054301056,
"count": 18201,
"self": 0.30133144701449055,
"children": {
"env_step": {
"total": 480.76641909599607,
"count": 18201,
"self": 312.60575428999573,
"children": {
"SubprocessEnvManager._take_step": {
"total": 167.84810147199642,
"count": 18201,
"self": 1.6310776939922107,
"children": {
"TorchPolicy.evaluate": {
"total": 166.2170237780042,
"count": 18201,
"self": 166.2170237780042
}
}
},
"workers": {
"total": 0.3125633340039258,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 483.241308990976,
"count": 18201,
"is_parallel": true,
"self": 241.6660443359699,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00799593100009588,
"count": 1,
"is_parallel": true,
"self": 0.0038192010001694143,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004176729999926465,
"count": 10,
"is_parallel": true,
"self": 0.004176729999926465
}
}
},
"UnityEnvironment.step": {
"total": 0.03836967899997035,
"count": 1,
"is_parallel": true,
"self": 0.000678721999975096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041743599990695657,
"count": 1,
"is_parallel": true,
"self": 0.00041743599990695657
},
"communicator.exchange": {
"total": 0.03479202800008352,
"count": 1,
"is_parallel": true,
"self": 0.03479202800008352
},
"steps_from_proto": {
"total": 0.0024814930000047752,
"count": 1,
"is_parallel": true,
"self": 0.00045586099940919667,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020256320005955786,
"count": 10,
"is_parallel": true,
"self": 0.0020256320005955786
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 241.5752646550061,
"count": 18200,
"is_parallel": true,
"self": 10.908235326997556,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.9491585559915166,
"count": 18200,
"is_parallel": true,
"self": 5.9491585559915166
},
"communicator.exchange": {
"total": 188.23852062802234,
"count": 18200,
"is_parallel": true,
"self": 188.23852062802234
},
"steps_from_proto": {
"total": 36.47935014399468,
"count": 18200,
"is_parallel": true,
"self": 7.20099269495654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.27835744903814,
"count": 182000,
"is_parallel": true,
"self": 29.27835744903814
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00018581099993753014,
"count": 1,
"self": 0.00018581099993753014,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 475.0530258589513,
"count": 713111,
"is_parallel": true,
"self": 15.521833785943272,
"children": {
"process_trajectory": {
"total": 264.4968262370073,
"count": 713111,
"is_parallel": true,
"self": 263.54064874700725,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9561774900000728,
"count": 4,
"is_parallel": true,
"self": 0.9561774900000728
}
}
},
"_update_policy": {
"total": 195.0343658360007,
"count": 90,
"is_parallel": true,
"self": 59.593146410003556,
"children": {
"TorchPPOOptimizer.update": {
"total": 135.44121942599713,
"count": 4587,
"is_parallel": true,
"self": 135.44121942599713
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09920501099986723,
"count": 1,
"self": 0.0010472300000401447,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09815778099982708,
"count": 1,
"self": 0.09815778099982708
}
}
}
}
}
}
}