ppo-PyramidsRND / run_logs /timers.json
zyoscovits's picture
First Push
8f13ac7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3278147578239441,
"min": 0.3278147578239441,
"max": 1.4880177974700928,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9787.2373046875,
"min": 9787.2373046875,
"max": 45140.5078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989931.0,
"min": 29924.0,
"max": 989931.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989931.0,
"min": 29924.0,
"max": 989931.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5349764823913574,
"min": -0.15306486189365387,
"max": 0.5530892014503479,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 150.32839965820312,
"min": -36.42943572998047,
"max": 152.65261840820312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02129850722849369,
"min": -0.006660452112555504,
"max": 0.18710607290267944,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.984880447387695,
"min": -1.8449451923370361,
"max": 44.90545654296875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06855354905322505,
"min": 0.0646914835598889,
"max": 0.07305705788794198,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9597496867451507,
"min": 0.5844564631035358,
"max": 1.0148837436863687,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017782458136068834,
"min": 0.000240785437767003,
"max": 0.017782458136068834,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24895441390496367,
"min": 0.002648639815437033,
"max": 0.24895441390496367,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.452868944314288e-06,
"min": 7.452868944314288e-06,
"max": 0.0002947653392448875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010434016522040003,
"min": 0.00010434016522040003,
"max": 0.0035072465309179002,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248425714285715,
"min": 0.10248425714285715,
"max": 0.1982551125,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347796000000002,
"min": 1.4347796000000002,
"max": 2.5690821,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002581772885714287,
"min": 0.0002581772885714287,
"max": 0.00982568573875,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036144820400000016,
"min": 0.0036144820400000016,
"max": 0.11693130179000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007111009210348129,
"min": 0.006715416442602873,
"max": 0.32559260725975037,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09955412894487381,
"min": 0.09401582926511765,
"max": 2.604740858078003,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 356.7816091954023,
"min": 336.8735632183908,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31040.0,
"min": 17362.0,
"max": 32753.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.528245951338061,
"min": -0.9999806972280625,
"max": 1.564570438624783,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 132.9573977664113,
"min": -31.998801663517952,
"max": 137.6821985989809,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.528245951338061,
"min": -0.9999806972280625,
"max": 1.564570438624783,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 132.9573977664113,
"min": -31.998801663517952,
"max": 137.6821985989809,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0259063337760109,
"min": 0.024837524914956458,
"max": 6.268548468347742,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2538510385129484,
"min": 2.0366770430264296,
"max": 112.83387243025936,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673817520",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673819504"
},
"total": 1983.256521368,
"count": 1,
"self": 0.42552568699989024,
"children": {
"run_training.setup": {
"total": 0.10250232899988987,
"count": 1,
"self": 0.10250232899988987
},
"TrainerController.start_learning": {
"total": 1982.7284933520002,
"count": 1,
"self": 1.1489415319872478,
"children": {
"TrainerController._reset_env": {
"total": 6.197206898000104,
"count": 1,
"self": 6.197206898000104
},
"TrainerController.advance": {
"total": 1975.2840630390122,
"count": 63716,
"self": 1.241711115957287,
"children": {
"env_step": {
"total": 1327.5383560330422,
"count": 63716,
"self": 1228.7284332509764,
"children": {
"SubprocessEnvManager._take_step": {
"total": 98.1200690130413,
"count": 63716,
"self": 4.069057999027564,
"children": {
"TorchPolicy.evaluate": {
"total": 94.05101101401374,
"count": 62550,
"self": 31.926627588959718,
"children": {
"TorchPolicy.sample_actions": {
"total": 62.12438342505402,
"count": 62550,
"self": 62.12438342505402
}
}
}
}
},
"workers": {
"total": 0.6898537690244666,
"count": 63716,
"self": 0.0,
"children": {
"worker_root": {
"total": 1978.6144047849793,
"count": 63716,
"is_parallel": true,
"self": 843.6352793440315,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016353290000097331,
"count": 1,
"is_parallel": true,
"self": 0.0005941180002082547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010412109998014785,
"count": 8,
"is_parallel": true,
"self": 0.0010412109998014785
}
}
},
"UnityEnvironment.step": {
"total": 0.042799306000006254,
"count": 1,
"is_parallel": true,
"self": 0.0004961329998423025,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043435100019451056,
"count": 1,
"is_parallel": true,
"self": 0.00043435100019451056
},
"communicator.exchange": {
"total": 0.04033565200006706,
"count": 1,
"is_parallel": true,
"self": 0.04033565200006706
},
"steps_from_proto": {
"total": 0.001533169999902384,
"count": 1,
"is_parallel": true,
"self": 0.0003821039999820641,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011510659999203199,
"count": 8,
"is_parallel": true,
"self": 0.0011510659999203199
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1134.9791254409479,
"count": 63715,
"is_parallel": true,
"self": 27.09342444988829,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.83237721495584,
"count": 63715,
"is_parallel": true,
"self": 21.83237721495584
},
"communicator.exchange": {
"total": 989.1790496909957,
"count": 63715,
"is_parallel": true,
"self": 989.1790496909957
},
"steps_from_proto": {
"total": 96.87427408510803,
"count": 63715,
"is_parallel": true,
"self": 21.113900835071036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.76037325003699,
"count": 509720,
"is_parallel": true,
"self": 75.76037325003699
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 646.5039958900127,
"count": 63716,
"self": 2.1146254729658267,
"children": {
"process_trajectory": {
"total": 139.5880277450501,
"count": 63716,
"self": 139.38327755205046,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20475019299965425,
"count": 2,
"self": 0.20475019299965425
}
}
},
"_update_policy": {
"total": 504.80134267199674,
"count": 450,
"self": 193.77169609900034,
"children": {
"TorchPPOOptimizer.update": {
"total": 311.0296465729964,
"count": 22857,
"self": 311.0296465729964
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.58000327955233e-07,
"count": 1,
"self": 9.58000327955233e-07
},
"TrainerController._save_models": {
"total": 0.0982809250003811,
"count": 1,
"self": 0.001481639000303403,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0967992860000777,
"count": 1,
"self": 0.0967992860000777
}
}
}
}
}
}
}