thaiminhpv's picture
First Push
14e83d7 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1854376792907715,
"min": 1.1854376792907715,
"max": 2.8613240718841553,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11318.55859375,
"min": 11318.55859375,
"max": 29302.8203125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.806108474731445,
"min": 0.25068530440330505,
"max": 11.806108474731445,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2302.191162109375,
"min": 48.63294982910156,
"max": 2366.447265625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06690009850517883,
"min": 0.06334753987635029,
"max": 0.07432731980310671,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26760039402071534,
"min": 0.25399843378417086,
"max": 0.3631370357976694,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19338780824168056,
"min": 0.1189314908708683,
"max": 0.2781052999928886,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7735512329667222,
"min": 0.4757259634834732,
"max": 1.299523535101372,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.772727272727273,
"min": 3.659090909090909,
"max": 23.772727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1046.0,
"min": 161.0,
"max": 1284.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.772727272727273,
"min": 3.659090909090909,
"max": 23.772727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1046.0,
"min": 161.0,
"max": 1284.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722247555",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722248019"
},
"total": 464.149798072,
"count": 1,
"self": 0.43804139100001294,
"children": {
"run_training.setup": {
"total": 0.05939649900000177,
"count": 1,
"self": 0.05939649900000177
},
"TrainerController.start_learning": {
"total": 463.652360182,
"count": 1,
"self": 0.5826985689952267,
"children": {
"TrainerController._reset_env": {
"total": 2.6752464520000103,
"count": 1,
"self": 2.6752464520000103
},
"TrainerController.advance": {
"total": 460.2993107830048,
"count": 18201,
"self": 0.27532003100532165,
"children": {
"env_step": {
"total": 460.02399075199946,
"count": 18201,
"self": 297.4611716070018,
"children": {
"SubprocessEnvManager._take_step": {
"total": 162.2676460989997,
"count": 18201,
"self": 1.535369269001933,
"children": {
"TorchPolicy.evaluate": {
"total": 160.73227682999777,
"count": 18201,
"self": 160.73227682999777
}
}
},
"workers": {
"total": 0.2951730459979558,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 462.47806616100075,
"count": 18201,
"is_parallel": true,
"self": 234.39606818599566,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006372251000016149,
"count": 1,
"is_parallel": true,
"self": 0.00443051600001354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001941735000002609,
"count": 10,
"is_parallel": true,
"self": 0.001941735000002609
}
}
},
"UnityEnvironment.step": {
"total": 0.03919167800000878,
"count": 1,
"is_parallel": true,
"self": 0.0007513199999493736,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004423580000434413,
"count": 1,
"is_parallel": true,
"self": 0.0004423580000434413
},
"communicator.exchange": {
"total": 0.035851377000028606,
"count": 1,
"is_parallel": true,
"self": 0.035851377000028606
},
"steps_from_proto": {
"total": 0.0021466229999873576,
"count": 1,
"is_parallel": true,
"self": 0.0004086430000143082,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017379799999730494,
"count": 10,
"is_parallel": true,
"self": 0.0017379799999730494
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 228.0819979750051,
"count": 18200,
"is_parallel": true,
"self": 10.315018286001532,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.716503569005511,
"count": 18200,
"is_parallel": true,
"self": 5.716503569005511
},
"communicator.exchange": {
"total": 176.8930764560024,
"count": 18200,
"is_parallel": true,
"self": 176.8930764560024
},
"steps_from_proto": {
"total": 35.15739966399565,
"count": 18200,
"is_parallel": true,
"self": 6.871596340975088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.28580332302056,
"count": 182000,
"is_parallel": true,
"self": 28.28580332302056
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012640099998861842,
"count": 1,
"self": 0.00012640099998861842,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 454.3775410619849,
"count": 686051,
"is_parallel": true,
"self": 14.994522028993515,
"children": {
"process_trajectory": {
"total": 251.33136989099154,
"count": 686051,
"is_parallel": true,
"self": 250.63662834699159,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6947415439999531,
"count": 4,
"is_parallel": true,
"self": 0.6947415439999531
}
}
},
"_update_policy": {
"total": 188.05164914199986,
"count": 90,
"is_parallel": true,
"self": 59.007987408000986,
"children": {
"TorchPPOOptimizer.update": {
"total": 129.04366173399887,
"count": 4587,
"is_parallel": true,
"self": 129.04366173399887
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.094977976999985,
"count": 1,
"self": 0.0009541750000607863,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09402380199992422,
"count": 1,
"self": 0.09402380199992422
}
}
}
}
}
}
}