ppo-Pyramids / run_logs /timers.json
ScrappyCoco666's picture
First Push
bd29eb7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4396551847457886,
"min": 0.4396551847457886,
"max": 1.391548991203308,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13161.517578125,
"min": 13161.517578125,
"max": 42214.03125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.45281553268432617,
"min": -0.1800701916217804,
"max": 0.5031939744949341,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 122.7130126953125,
"min": -42.6766357421875,
"max": 135.86236572265625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03403672203421593,
"min": 0.0056115612387657166,
"max": 0.5939701795578003,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.22395133972168,
"min": 1.520733118057251,
"max": 140.77093505859375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06476632071861473,
"min": 0.06440121945662507,
"max": 0.07394794854310215,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9067284900606062,
"min": 0.4774663622572017,
"max": 1.074361404927913,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018741669808155708,
"min": 0.001202503200516351,
"max": 0.018741669808155708,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.26238337731417993,
"min": 0.008417522403614458,
"max": 0.26238337731417993,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.384083252957147e-06,
"min": 7.384083252957147e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010337716554140006,
"min": 0.00010337716554140006,
"max": 0.0036324358891880997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246132857142858,
"min": 0.10246132857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344586000000001,
"min": 1.3691136000000002,
"max": 2.6108119000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025588672428571445,
"min": 0.00025588672428571445,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003582414140000002,
"min": 0.003582414140000002,
"max": 0.12110010881,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.017936760559678078,
"min": 0.017726516351103783,
"max": 0.6130093336105347,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2511146366596222,
"min": 0.2511146366596222,
"max": 4.291065216064453,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 398.9736842105263,
"min": 367.81481481481484,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30322.0,
"min": 15984.0,
"max": 31968.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4957262902079445,
"min": -1.0000000521540642,
"max": 1.5459999729480063,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 113.67519805580378,
"min": -32.000001668930054,
"max": 122.43579834699631,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4957262902079445,
"min": -1.0000000521540642,
"max": 1.5459999729480063,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 113.67519805580378,
"min": -32.000001668930054,
"max": 122.43579834699631,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07370381586090509,
"min": 0.07189388586848508,
"max": 11.804267760366201,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.601490005428786,
"min": 5.47153862711275,
"max": 188.86828416585922,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673638307",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673640440"
},
"total": 2132.170624842,
"count": 1,
"self": 0.4933884150000267,
"children": {
"run_training.setup": {
"total": 0.11826202100019145,
"count": 1,
"self": 0.11826202100019145
},
"TrainerController.start_learning": {
"total": 2131.558974406,
"count": 1,
"self": 1.2410631389784612,
"children": {
"TrainerController._reset_env": {
"total": 10.247740610999927,
"count": 1,
"self": 10.247740610999927
},
"TrainerController.advance": {
"total": 2119.973484765021,
"count": 63711,
"self": 1.3737178881015097,
"children": {
"env_step": {
"total": 1407.5015681459718,
"count": 63711,
"self": 1300.3228378689641,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.39662315901523,
"count": 63711,
"self": 4.436344639983872,
"children": {
"TorchPolicy.evaluate": {
"total": 101.96027851903136,
"count": 62581,
"self": 34.55857385299555,
"children": {
"TorchPolicy.sample_actions": {
"total": 67.40170466603581,
"count": 62581,
"self": 67.40170466603581
}
}
}
}
},
"workers": {
"total": 0.782107117992382,
"count": 63711,
"self": 0.0,
"children": {
"worker_root": {
"total": 2126.9092807480138,
"count": 63711,
"is_parallel": true,
"self": 929.5254545799667,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007137400999909005,
"count": 1,
"is_parallel": true,
"self": 0.004405902000144124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002731498999764881,
"count": 8,
"is_parallel": true,
"self": 0.002731498999764881
}
}
},
"UnityEnvironment.step": {
"total": 0.0501198409999688,
"count": 1,
"is_parallel": true,
"self": 0.0005406759999004862,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046890499993423873,
"count": 1,
"is_parallel": true,
"self": 0.00046890499993423873
},
"communicator.exchange": {
"total": 0.04747559700012971,
"count": 1,
"is_parallel": true,
"self": 0.04747559700012971
},
"steps_from_proto": {
"total": 0.001634663000004366,
"count": 1,
"is_parallel": true,
"self": 0.0004108010002710216,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012238619997333444,
"count": 8,
"is_parallel": true,
"self": 0.0012238619997333444
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1197.383826168047,
"count": 63710,
"is_parallel": true,
"self": 30.362966142013192,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.214603850988624,
"count": 63710,
"is_parallel": true,
"self": 24.214603850988624
},
"communicator.exchange": {
"total": 1043.8963723280099,
"count": 63710,
"is_parallel": true,
"self": 1043.8963723280099
},
"steps_from_proto": {
"total": 98.90988384703542,
"count": 63710,
"is_parallel": true,
"self": 23.023201704008443,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.88668214302697,
"count": 509680,
"is_parallel": true,
"self": 75.88668214302697
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 711.0981987309476,
"count": 63711,
"self": 2.4343279908746354,
"children": {
"process_trajectory": {
"total": 153.22679905207133,
"count": 63711,
"self": 153.02374417107103,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20305488100029834,
"count": 2,
"self": 0.20305488100029834
}
}
},
"_update_policy": {
"total": 555.4370716880017,
"count": 447,
"self": 212.6245376719703,
"children": {
"TorchPPOOptimizer.update": {
"total": 342.81253401603135,
"count": 22806,
"self": 342.81253401603135
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.1600031737471e-07,
"count": 1,
"self": 9.1600031737471e-07
},
"TrainerController._save_models": {
"total": 0.09668497500024387,
"count": 1,
"self": 0.0015000450002844445,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09518492999995942,
"count": 1,
"self": 0.09518492999995942
}
}
}
}
}
}
}