ppo-Pyramids / run_logs /timers.json
gugaaa's picture
First Push
28e1f7a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5143924951553345,
"min": 0.5023782253265381,
"max": 1.494323968887329,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15546.998046875,
"min": 15031.1572265625,
"max": 45331.8125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.46740609407424927,
"min": -0.10455676168203354,
"max": 0.5233175754547119,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 124.33002471923828,
"min": -25.198179244995117,
"max": 141.81906127929688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01770658791065216,
"min": -0.05731387063860893,
"max": 0.39933425188064575,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.709952354431152,
"min": -15.13086223602295,
"max": 94.64221954345703,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06770658391798708,
"min": 0.06583870771107429,
"max": 0.07350269913797079,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.015598758769806,
"min": 0.5145188939657955,
"max": 1.0290402652753983,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013227710613369902,
"min": 0.0004937471945972507,
"max": 0.01542022868497519,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19841565920054852,
"min": 0.0069124607243615105,
"max": 0.21993783793974822,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.566537477853326e-06,
"min": 7.566537477853326e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001134980621677999,
"min": 0.0001134980621677999,
"max": 0.0036327118890961,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252214666666666,
"min": 0.10252214666666666,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5378322,
"min": 1.3886848,
"max": 2.6109039000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002619624519999999,
"min": 0.0002619624519999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003929436779999998,
"min": 0.003929436779999998,
"max": 0.12110929961,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010869168676435947,
"min": 0.010869168676435947,
"max": 0.47818687558174133,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1630375236272812,
"min": 0.15456107258796692,
"max": 3.3473081588745117,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 409.231884057971,
"min": 361.3658536585366,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28237.0,
"min": 15984.0,
"max": 33093.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5327825872362524,
"min": -1.0000000521540642,
"max": 1.5694096249090619,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 105.76199851930141,
"min": -29.77300164848566,
"max": 130.26099886745214,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5327825872362524,
"min": -1.0000000521540642,
"max": 1.5694096249090619,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 105.76199851930141,
"min": -29.77300164848566,
"max": 130.26099886745214,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04625894990285683,
"min": 0.04269650720914753,
"max": 9.883540781214833,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1918675432971213,
"min": 3.1918675432971213,
"max": 158.13665249943733,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723363462",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723365803"
},
"total": 2340.741963333,
"count": 1,
"self": 0.5362217219999366,
"children": {
"run_training.setup": {
"total": 0.05430095099995924,
"count": 1,
"self": 0.05430095099995924
},
"TrainerController.start_learning": {
"total": 2340.15144066,
"count": 1,
"self": 1.5185288539937574,
"children": {
"TrainerController._reset_env": {
"total": 2.1393548879998434,
"count": 1,
"self": 2.1393548879998434
},
"TrainerController.advance": {
"total": 2336.396304371006,
"count": 63692,
"self": 1.6047940660059794,
"children": {
"env_step": {
"total": 1672.5849237999723,
"count": 63692,
"self": 1523.2561234189704,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.35760632403185,
"count": 63692,
"self": 5.262145859973543,
"children": {
"TorchPolicy.evaluate": {
"total": 143.0954604640583,
"count": 62565,
"self": 143.0954604640583
}
}
},
"workers": {
"total": 0.9711940569700346,
"count": 63692,
"self": 0.0,
"children": {
"worker_root": {
"total": 2334.6043815019843,
"count": 63692,
"is_parallel": true,
"self": 947.6677975079922,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025336260000585753,
"count": 1,
"is_parallel": true,
"self": 0.0007642280002073676,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017693979998512077,
"count": 8,
"is_parallel": true,
"self": 0.0017693979998512077
}
}
},
"UnityEnvironment.step": {
"total": 0.04964649900011864,
"count": 1,
"is_parallel": true,
"self": 0.0006483100000878039,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045290499997463485,
"count": 1,
"is_parallel": true,
"self": 0.00045290499997463485
},
"communicator.exchange": {
"total": 0.046765875000119195,
"count": 1,
"is_parallel": true,
"self": 0.046765875000119195
},
"steps_from_proto": {
"total": 0.0017794089999370044,
"count": 1,
"is_parallel": true,
"self": 0.00037820199941052124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014012070005264832,
"count": 8,
"is_parallel": true,
"self": 0.0014012070005264832
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1386.9365839939921,
"count": 63691,
"is_parallel": true,
"self": 35.659575632995484,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.182261847002792,
"count": 63691,
"is_parallel": true,
"self": 25.182261847002792
},
"communicator.exchange": {
"total": 1218.7040415280326,
"count": 63691,
"is_parallel": true,
"self": 1218.7040415280326
},
"steps_from_proto": {
"total": 107.39070498596129,
"count": 63691,
"is_parallel": true,
"self": 22.508965693762093,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.8817392921992,
"count": 509528,
"is_parallel": true,
"self": 84.8817392921992
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 662.2065865050279,
"count": 63692,
"self": 3.0002036149967353,
"children": {
"process_trajectory": {
"total": 135.8082810220385,
"count": 63692,
"self": 135.5911562250385,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21712479699999676,
"count": 2,
"self": 0.21712479699999676
}
}
},
"_update_policy": {
"total": 523.3981018679926,
"count": 451,
"self": 310.731110711037,
"children": {
"TorchPPOOptimizer.update": {
"total": 212.66699115695565,
"count": 22722,
"self": 212.66699115695565
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.027000052999938e-06,
"count": 1,
"self": 1.027000052999938e-06
},
"TrainerController._save_models": {
"total": 0.09725152000009984,
"count": 1,
"self": 0.0014614689998779795,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09579005100022187,
"count": 1,
"self": 0.09579005100022187
}
}
}
}
}
}
}