Fer14's picture
First Push
8a05cf4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5893661975860596,
"min": 0.568692147731781,
"max": 1.4758681058883667,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17784.71484375,
"min": 16900.833984375,
"max": 44771.93359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989898.0,
"min": 29952.0,
"max": 989898.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989898.0,
"min": 29952.0,
"max": 989898.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2101958990097046,
"min": -0.1027693897485733,
"max": 0.2590601146221161,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 53.599952697753906,
"min": -24.664653778076172,
"max": 66.0603256225586,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.03355784714221954,
"min": -0.03355784714221954,
"max": 0.21750079095363617,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -8.5572509765625,
"min": -8.5572509765625,
"max": 52.200191497802734,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07104361438125904,
"min": 0.06510643669007397,
"max": 0.07418223902032015,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9946106013376266,
"min": 0.519275673142241,
"max": 1.008624666327081,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01049372204743988,
"min": 0.0002906726545064113,
"max": 0.01233073811211701,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14691210866415833,
"min": 0.004069417163089758,
"max": 0.17263033356963814,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.538883201357144e-06,
"min": 7.538883201357144e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010554436481900002,
"min": 0.00010554436481900002,
"max": 0.0031388078537308,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251292857142857,
"min": 0.10251292857142857,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.435181,
"min": 1.3886848,
"max": 2.3587890000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002610415642857144,
"min": 0.0002610415642857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036545819000000012,
"min": 0.0036545819000000012,
"max": 0.10465229308,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010782822966575623,
"min": 0.010527980513870716,
"max": 0.34698358178138733,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15095952153205872,
"min": 0.14739172160625458,
"max": 2.428884983062744,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 574.14,
"min": 542.9433962264151,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28707.0,
"min": 15984.0,
"max": 32475.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9457479740679264,
"min": -1.0000000521540642,
"max": 1.1928377046056513,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 47.28739870339632,
"min": -31.99920167028904,
"max": 63.22039834409952,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9457479740679264,
"min": -1.0000000521540642,
"max": 1.1928377046056513,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 47.28739870339632,
"min": -31.99920167028904,
"max": 63.22039834409952,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06451316221151501,
"min": 0.06033620439614784,
"max": 6.906614917330444,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2256581105757505,
"min": 3.1978188329958357,
"max": 110.5058386772871,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679316152",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679318204"
},
"total": 2052.286290494,
"count": 1,
"self": 0.424629611999535,
"children": {
"run_training.setup": {
"total": 0.12311337300002378,
"count": 1,
"self": 0.12311337300002378
},
"TrainerController.start_learning": {
"total": 2051.738547509,
"count": 1,
"self": 1.313889352023125,
"children": {
"TrainerController._reset_env": {
"total": 7.3238711510000485,
"count": 1,
"self": 7.3238711510000485
},
"TrainerController.advance": {
"total": 2043.0090209279774,
"count": 63266,
"self": 1.3400074288706492,
"children": {
"env_step": {
"total": 1431.1082236640802,
"count": 63266,
"self": 1326.6766883310645,
"children": {
"SubprocessEnvManager._take_step": {
"total": 103.66538029603998,
"count": 63266,
"self": 4.591618385969923,
"children": {
"TorchPolicy.evaluate": {
"total": 99.07376191007006,
"count": 62557,
"self": 99.07376191007006
}
}
},
"workers": {
"total": 0.7661550369757606,
"count": 63266,
"self": 0.0,
"children": {
"worker_root": {
"total": 2047.5656384768954,
"count": 63266,
"is_parallel": true,
"self": 830.9217218268793,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002646167999955651,
"count": 1,
"is_parallel": true,
"self": 0.0008792199998879369,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001766948000067714,
"count": 8,
"is_parallel": true,
"self": 0.001766948000067714
}
}
},
"UnityEnvironment.step": {
"total": 0.04286649799996667,
"count": 1,
"is_parallel": true,
"self": 0.0005059499999333639,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045700200007559033,
"count": 1,
"is_parallel": true,
"self": 0.00045700200007559033
},
"communicator.exchange": {
"total": 0.039992156999915096,
"count": 1,
"is_parallel": true,
"self": 0.039992156999915096
},
"steps_from_proto": {
"total": 0.0019113890000426181,
"count": 1,
"is_parallel": true,
"self": 0.000656427000194526,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001254961999848092,
"count": 8,
"is_parallel": true,
"self": 0.001254961999848092
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1216.643916650016,
"count": 63265,
"is_parallel": true,
"self": 30.349315270889292,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.11040057405671,
"count": 63265,
"is_parallel": true,
"self": 22.11040057405671
},
"communicator.exchange": {
"total": 1074.713577264007,
"count": 63265,
"is_parallel": true,
"self": 1074.713577264007
},
"steps_from_proto": {
"total": 89.4706235410631,
"count": 63265,
"is_parallel": true,
"self": 18.9752240493126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.4953994917505,
"count": 506120,
"is_parallel": true,
"self": 70.4953994917505
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 610.5607898350265,
"count": 63266,
"self": 2.443337729064069,
"children": {
"process_trajectory": {
"total": 113.6138300089583,
"count": 63266,
"self": 113.41667365895864,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19715634999965914,
"count": 2,
"self": 0.19715634999965914
}
}
},
"_update_policy": {
"total": 494.50362209700415,
"count": 439,
"self": 314.2641772699617,
"children": {
"TorchPPOOptimizer.update": {
"total": 180.23944482704246,
"count": 22836,
"self": 180.23944482704246
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.439997145615052e-07,
"count": 1,
"self": 8.439997145615052e-07
},
"TrainerController._save_models": {
"total": 0.0917652339999222,
"count": 1,
"self": 0.0015074990001266997,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0902577349997955,
"count": 1,
"self": 0.0902577349997955
}
}
}
}
}
}
}