ppo-Pyramids / run_logs /timers.json
AMfeta99's picture
First Push
4a43623
raw
history blame contribute delete
No virus
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.15378352999687195,
"min": 0.14064596593379974,
"max": 1.4660212993621826,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4638.111328125,
"min": 4194.62548828125,
"max": 44473.22265625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999872.0,
"min": 29952.0,
"max": 2999872.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999872.0,
"min": 29952.0,
"max": 2999872.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8294541239738464,
"min": -0.10551862418651581,
"max": 0.8681994080543518,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 253.81295776367188,
"min": -25.32447052001953,
"max": 263.0644226074219,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01605323702096939,
"min": -0.02165089175105095,
"max": 0.27688130736351013,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.912290573120117,
"min": -6.300409317016602,
"max": 67.28215789794922,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07083913966312584,
"min": 0.06442113338609749,
"max": 0.07348860550527683,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9917479552837618,
"min": 0.49137666816884235,
"max": 1.078233029954729,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013722563465631304,
"min": 0.00013341782571683818,
"max": 0.016837581999078838,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19211588851883826,
"min": 0.0017344317343188963,
"max": 0.23572614798710373,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.582506615388096e-06,
"min": 1.582506615388096e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.2155092615433342e-05,
"min": 2.2155092615433342e-05,
"max": 0.004052940949019699,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10052746904761907,
"min": 0.10052746904761907,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.407384566666667,
"min": 1.3962282666666668,
"max": 2.8425251666666673,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.269415785714289e-05,
"min": 6.269415785714289e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008777182100000006,
"min": 0.0008777182100000006,
"max": 0.13510293196999998,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005908201914280653,
"min": 0.005729589611291885,
"max": 0.4693025052547455,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08271482586860657,
"min": 0.0802142545580864,
"max": 3.2851176261901855,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 210.83916083916083,
"min": 210.83916083916083,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30150.0,
"min": 15984.0,
"max": 34476.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.789160823280161,
"min": -1.0000000521540642,
"max": 1.789160823280161,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 255.84999772906303,
"min": -30.994801610708237,
"max": 255.84999772906303,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.789160823280161,
"min": -1.0000000521540642,
"max": 1.789160823280161,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 255.84999772906303,
"min": -30.994801610708237,
"max": 255.84999772906303,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.013045026905013662,
"min": 0.013045026905013662,
"max": 9.668415855616331,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8654388474169536,
"min": 1.6810851612390252,
"max": 154.6946536898613,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700127557",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700140292"
},
"total": 12735.791693758,
"count": 1,
"self": 1.3507895239981735,
"children": {
"run_training.setup": {
"total": 0.06350711100003537,
"count": 1,
"self": 0.06350711100003537
},
"TrainerController.start_learning": {
"total": 12734.377397123,
"count": 1,
"self": 7.848981506047494,
"children": {
"TrainerController._reset_env": {
"total": 1.969111182000006,
"count": 1,
"self": 1.969111182000006
},
"TrainerController.advance": {
"total": 12724.435185411954,
"count": 195193,
"self": 9.313524021841658,
"children": {
"env_step": {
"total": 8792.905527942057,
"count": 195193,
"self": 8230.656015608749,
"children": {
"SubprocessEnvManager._take_step": {
"total": 557.4007865475262,
"count": 195193,
"self": 25.922084647347674,
"children": {
"TorchPolicy.evaluate": {
"total": 531.4787019001785,
"count": 187566,
"self": 531.4787019001785
}
}
},
"workers": {
"total": 4.848725785781426,
"count": 195193,
"self": 0.0,
"children": {
"worker_root": {
"total": 12709.07262393727,
"count": 195193,
"is_parallel": true,
"self": 5102.826333066101,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029109170000083395,
"count": 1,
"is_parallel": true,
"self": 0.0010478810002041428,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018630359998041968,
"count": 8,
"is_parallel": true,
"self": 0.0018630359998041968
}
}
},
"UnityEnvironment.step": {
"total": 0.06398659900014536,
"count": 1,
"is_parallel": true,
"self": 0.0007122920001165767,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005405669999163365,
"count": 1,
"is_parallel": true,
"self": 0.0005405669999163365
},
"communicator.exchange": {
"total": 0.060743900000034046,
"count": 1,
"is_parallel": true,
"self": 0.060743900000034046
},
"steps_from_proto": {
"total": 0.0019898400000784022,
"count": 1,
"is_parallel": true,
"self": 0.00047563300017827714,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001514206999900125,
"count": 8,
"is_parallel": true,
"self": 0.001514206999900125
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 7606.24629087117,
"count": 195192,
"is_parallel": true,
"self": 152.92823860721,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 92.711893066725,
"count": 195192,
"is_parallel": true,
"self": 92.711893066725
},
"communicator.exchange": {
"total": 6926.769682427796,
"count": 195192,
"is_parallel": true,
"self": 6926.769682427796
},
"steps_from_proto": {
"total": 433.8364767694379,
"count": 195192,
"is_parallel": true,
"self": 95.34607191468308,
"children": {
"_process_rank_one_or_two_observation": {
"total": 338.49040485475484,
"count": 1561536,
"is_parallel": true,
"self": 338.49040485475484
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3922.2161334480543,
"count": 195193,
"self": 16.70470839783775,
"children": {
"process_trajectory": {
"total": 581.2622050202403,
"count": 195193,
"self": 580.3757249082387,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8864801120016637,
"count": 6,
"self": 0.8864801120016637
}
}
},
"_update_policy": {
"total": 3324.249220029976,
"count": 1405,
"self": 1329.3855336976992,
"children": {
"TorchPPOOptimizer.update": {
"total": 1994.863686332277,
"count": 68406,
"self": 1994.863686332277
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3980006769998e-06,
"count": 1,
"self": 1.3980006769998e-06
},
"TrainerController._save_models": {
"total": 0.12411762499868928,
"count": 1,
"self": 0.0025717649987200275,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12154585999996925,
"count": 1,
"self": 0.12154585999996925
}
}
}
}
}
}
}