draziert's picture
Train run v1
b0b79c0
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.626391589641571,
"min": 0.626391589641571,
"max": 2.854173421859741,
"count": 350
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5980.787109375,
"min": 5980.787109375,
"max": 29229.58984375,
"count": 350
},
"SnowballTarget.Step.mean": {
"value": 3499984.0,
"min": 9952.0,
"max": 3499984.0,
"count": 350
},
"SnowballTarget.Step.sum": {
"value": 3499984.0,
"min": 9952.0,
"max": 3499984.0,
"count": 350
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 3.0205914974212646,
"min": 0.2688330411911011,
"max": 3.0528013706207275,
"count": 350
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 589.0153198242188,
"min": 52.15361022949219,
"max": 623.8485107421875,
"count": 350
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 350
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 350
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07336762151541787,
"min": 0.06155703776214924,
"max": 0.07584319527569973,
"count": 350
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2934704860616715,
"min": 0.24622815104859697,
"max": 0.37585181897088393,
"count": 350
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.07794808415586457,
"min": 0.06296267805730596,
"max": 0.1492786642207819,
"count": 350
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.3117923366234583,
"min": 0.25229862538330694,
"max": 0.7463933211039094,
"count": 350
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.5395284174856683e-06,
"min": 1.5395284174856683e-06,
"max": 0.000998453714440343,
"count": 350
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 6.158113669942673e-06,
"min": 6.158113669942673e-06,
"max": 0.004978125716473143,
"count": 350
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10015394285714287,
"min": 0.10015394285714287,
"max": 0.19984537142857142,
"count": 350
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40061577142857147,
"min": 0.40061577142857147,
"max": 0.9978125714285715,
"count": 350
},
"SnowballTarget.Policy.Beta.mean": {
"value": 1.768174857142834e-05,
"min": 1.768174857142834e-05,
"max": 0.004992284034285714,
"count": 350
},
"SnowballTarget.Policy.Beta.sum": {
"value": 7.072699428571336e-05,
"min": 7.072699428571336e-05,
"max": 0.02489084731428572,
"count": 350
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 29.636363636363637,
"min": 3.75,
"max": 30.204545454545453,
"count": 350
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1304.0,
"min": 165.0,
"max": 1657.0,
"count": 350
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 29.636363636363637,
"min": 3.75,
"max": 30.204545454545453,
"count": 350
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1304.0,
"min": 165.0,
"max": 1657.0,
"count": 350
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 350
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 350
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690015536",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690025518"
},
"total": 9981.979982791,
"count": 1,
"self": 0.4856626889977633,
"children": {
"run_training.setup": {
"total": 0.04059557400000813,
"count": 1,
"self": 0.04059557400000813
},
"TrainerController.start_learning": {
"total": 9981.453724528,
"count": 1,
"self": 10.63628053417051,
"children": {
"TrainerController._reset_env": {
"total": 5.281924189000051,
"count": 1,
"self": 5.281924189000051
},
"TrainerController.advance": {
"total": 9965.38093596983,
"count": 318201,
"self": 5.44633788988358,
"children": {
"env_step": {
"total": 9959.934598079946,
"count": 318201,
"self": 7544.282255256607,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2410.233643370331,
"count": 318201,
"self": 33.62752913572558,
"children": {
"TorchPolicy.evaluate": {
"total": 2376.6061142346052,
"count": 318201,
"self": 2376.6061142346052
}
}
},
"workers": {
"total": 5.4186994530081165,
"count": 318201,
"self": 0.0,
"children": {
"worker_root": {
"total": 9953.432258876956,
"count": 318201,
"is_parallel": true,
"self": 5308.832875886005,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005786140000054729,
"count": 1,
"is_parallel": true,
"self": 0.0041969110000081855,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015892290000465437,
"count": 10,
"is_parallel": true,
"self": 0.0015892290000465437
}
}
},
"UnityEnvironment.step": {
"total": 0.06278475100009473,
"count": 1,
"is_parallel": true,
"self": 0.0006350710000333493,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004064850000986553,
"count": 1,
"is_parallel": true,
"self": 0.0004064850000986553
},
"communicator.exchange": {
"total": 0.054383756999982324,
"count": 1,
"is_parallel": true,
"self": 0.054383756999982324
},
"steps_from_proto": {
"total": 0.0073594379999804005,
"count": 1,
"is_parallel": true,
"self": 0.0004525099999455051,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006906928000034895,
"count": 10,
"is_parallel": true,
"self": 0.006906928000034895
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4644.59938299095,
"count": 318200,
"is_parallel": true,
"self": 196.2637879577951,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 99.98777953723811,
"count": 318200,
"is_parallel": true,
"self": 99.98777953723811
},
"communicator.exchange": {
"total": 3666.015742334012,
"count": 318200,
"is_parallel": true,
"self": 3666.015742334012
},
"steps_from_proto": {
"total": 682.3320731619046,
"count": 318200,
"is_parallel": true,
"self": 129.15152190737774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 553.1805512545269,
"count": 3182000,
"is_parallel": true,
"self": 553.1805512545269
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016209900059038773,
"count": 1,
"self": 0.00016209900059038773,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 9889.802976791265,
"count": 8098407,
"is_parallel": true,
"self": 191.83353965077367,
"children": {
"process_trajectory": {
"total": 4750.658224287499,
"count": 8098407,
"is_parallel": true,
"self": 4728.4453855315,
"children": {
"RLTrainer._checkpoint": {
"total": 22.212838755999996,
"count": 70,
"is_parallel": true,
"self": 22.212838755999996
}
}
},
"_update_policy": {
"total": 4947.311212852992,
"count": 1590,
"is_parallel": true,
"self": 2124.995968946659,
"children": {
"TorchPPOOptimizer.update": {
"total": 2822.315243906333,
"count": 135140,
"is_parallel": true,
"self": 2822.315243906333
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15442173600058595,
"count": 1,
"self": 0.0010912180005107075,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15333051800007524,
"count": 1,
"self": 0.15333051800007524
}
}
}
}
}
}
}