Snowball / run_logs /timers.json
Firemedic15's picture
Upload folder using huggingface_hub
92d63bf verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9382531046867371,
"min": 0.9382531046867371,
"max": 2.863863706588745,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8958.4404296875,
"min": 8958.4404296875,
"max": 29328.828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.916909217834473,
"min": 0.3568263053894043,
"max": 12.916909217834473,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2518.79736328125,
"min": 69.22430419921875,
"max": 2611.98095703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06629789480208345,
"min": 0.06309473126505812,
"max": 0.07516176878524872,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2651915792083338,
"min": 0.25237892506023246,
"max": 0.3758088439262436,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20105268287600256,
"min": 0.10790547902933231,
"max": 0.29046356597659634,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8042107315040102,
"min": 0.43162191611732925,
"max": 1.4523178298829817,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.636363636363637,
"min": 3.1136363636363638,
"max": 25.636363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1128.0,
"min": 137.0,
"max": 1410.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.636363636363637,
"min": 3.1136363636363638,
"max": 25.636363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1128.0,
"min": 137.0,
"max": 1410.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718189995",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718190519"
},
"total": 523.9329214200001,
"count": 1,
"self": 0.4435534799999914,
"children": {
"run_training.setup": {
"total": 0.06868051300000388,
"count": 1,
"self": 0.06868051300000388
},
"TrainerController.start_learning": {
"total": 523.4206874270001,
"count": 1,
"self": 0.7227511829856894,
"children": {
"TrainerController._reset_env": {
"total": 3.04605070599996,
"count": 1,
"self": 3.04605070599996
},
"TrainerController.advance": {
"total": 519.5546489470142,
"count": 18202,
"self": 0.33350899601464334,
"children": {
"env_step": {
"total": 519.2211399509996,
"count": 18202,
"self": 341.57100485398473,
"children": {
"SubprocessEnvManager._take_step": {
"total": 177.29452228100615,
"count": 18202,
"self": 1.765624556018338,
"children": {
"TorchPolicy.evaluate": {
"total": 175.52889772498781,
"count": 18202,
"self": 175.52889772498781
}
}
},
"workers": {
"total": 0.3556128160087155,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 521.800574462,
"count": 18202,
"is_parallel": true,
"self": 259.6614858809995,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005374671000026865,
"count": 1,
"is_parallel": true,
"self": 0.0036736990001600134,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017009719998668515,
"count": 10,
"is_parallel": true,
"self": 0.0017009719998668515
}
}
},
"UnityEnvironment.step": {
"total": 0.04756425000005038,
"count": 1,
"is_parallel": true,
"self": 0.0007153479999715273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004355209999857834,
"count": 1,
"is_parallel": true,
"self": 0.0004355209999857834
},
"communicator.exchange": {
"total": 0.04408644400007233,
"count": 1,
"is_parallel": true,
"self": 0.04408644400007233
},
"steps_from_proto": {
"total": 0.0023269370000207346,
"count": 1,
"is_parallel": true,
"self": 0.0004339819998904204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018929550001303141,
"count": 10,
"is_parallel": true,
"self": 0.0018929550001303141
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 262.13908858100046,
"count": 18201,
"is_parallel": true,
"self": 11.77380685604112,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.167405628977804,
"count": 18201,
"is_parallel": true,
"self": 6.167405628977804
},
"communicator.exchange": {
"total": 204.80504381898402,
"count": 18201,
"is_parallel": true,
"self": 204.80504381898402
},
"steps_from_proto": {
"total": 39.392832276997524,
"count": 18201,
"is_parallel": true,
"self": 7.6308491069934234,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.7619831700041,
"count": 182010,
"is_parallel": true,
"self": 31.7619831700041
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001406180001595203,
"count": 1,
"self": 0.0001406180001595203,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 513.2075362960771,
"count": 734010,
"is_parallel": true,
"self": 16.401592581071895,
"children": {
"process_trajectory": {
"total": 285.428082164006,
"count": 734010,
"is_parallel": true,
"self": 284.02216603500597,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4059161290000475,
"count": 4,
"is_parallel": true,
"self": 1.4059161290000475
}
}
},
"_update_policy": {
"total": 211.37786155099923,
"count": 90,
"is_parallel": true,
"self": 67.0865895590008,
"children": {
"TorchPPOOptimizer.update": {
"total": 144.29127199199843,
"count": 4584,
"is_parallel": true,
"self": 144.29127199199843
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09709597300002315,
"count": 1,
"self": 0.0010068970000247646,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09608907599999839,
"count": 1,
"self": 0.09608907599999839
}
}
}
}
}
}
}