Larxel's picture
First push
3b217a7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3964817225933075,
"min": 0.3714603781700134,
"max": 1.4161570072174072,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11938.857421875,
"min": 11084.3779296875,
"max": 42960.5390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989968.0,
"min": 29889.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989968.0,
"min": 29889.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.49740511178970337,
"min": -0.09687639027833939,
"max": 0.5385540127754211,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 136.28900146484375,
"min": -23.4440860748291,
"max": 149.17945861816406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.011348957195878029,
"min": -0.011348957195878029,
"max": 0.4768191874027252,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.109614372253418,
"min": -3.109614372253418,
"max": 113.00614929199219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06826920363421303,
"min": 0.06448889277107962,
"max": 0.07569878332396493,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9557688508789823,
"min": 0.5298914832677545,
"max": 1.0650669390064043,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01547891434051743,
"min": 0.0005451975451661822,
"max": 0.01547891434051743,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21670480076724402,
"min": 0.00708756808716037,
"max": 0.21670480076724402,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.7064974312e-06,
"min": 7.7064974312e-06,
"max": 0.00029523561587384284,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010789096403679999,
"min": 0.00010789096403679999,
"max": 0.0036338251887249994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256879999999999,
"min": 0.10256879999999999,
"max": 0.19841187142857142,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359631999999998,
"min": 1.3888831,
"max": 2.6112750000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026662312,
"min": 0.00026662312,
"max": 0.009841345955714285,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037327236800000006,
"min": 0.0037327236800000006,
"max": 0.12114637250000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015609106048941612,
"min": 0.015489430166780949,
"max": 0.548474133014679,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21852748095989227,
"min": 0.21685202419757843,
"max": 3.8393189907073975,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 365.1625,
"min": 361.09638554216866,
"max": 996.0967741935484,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29213.0,
"min": 16688.0,
"max": 34326.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5848224801011384,
"min": -0.9324065040676824,
"max": 1.6073928344994783,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 126.78579840809107,
"min": -28.904601626098156,
"max": 135.02099809795618,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5848224801011384,
"min": -0.9324065040676824,
"max": 1.6073928344994783,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 126.78579840809107,
"min": -28.904601626098156,
"max": 135.02099809795618,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.058595473809691614,
"min": 0.058595473809691614,
"max": 10.524737319525551,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.687637904775329,
"min": 4.597298247710569,
"max": 178.92053443193436,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681627331",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681629423"
},
"total": 2091.680120649,
"count": 1,
"self": 0.4770355499999823,
"children": {
"run_training.setup": {
"total": 0.1085756739998942,
"count": 1,
"self": 0.1085756739998942
},
"TrainerController.start_learning": {
"total": 2091.094509425,
"count": 1,
"self": 1.2811323779678787,
"children": {
"TrainerController._reset_env": {
"total": 3.8221161689998553,
"count": 1,
"self": 3.8221161689998553
},
"TrainerController.advance": {
"total": 2085.8990290560323,
"count": 63707,
"self": 1.352631495046353,
"children": {
"env_step": {
"total": 1477.700713475048,
"count": 63707,
"self": 1375.8108469650822,
"children": {
"SubprocessEnvManager._take_step": {
"total": 101.10700351198079,
"count": 63707,
"self": 4.625225176978574,
"children": {
"TorchPolicy.evaluate": {
"total": 96.48177833500222,
"count": 62559,
"self": 96.48177833500222
}
}
},
"workers": {
"total": 0.7828629979849211,
"count": 63707,
"self": 0.0,
"children": {
"worker_root": {
"total": 2086.4761986929952,
"count": 63707,
"is_parallel": true,
"self": 817.2395403919902,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0015978869998889422,
"count": 1,
"is_parallel": true,
"self": 0.000493052999900101,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011048339999888412,
"count": 8,
"is_parallel": true,
"self": 0.0011048339999888412
}
}
},
"UnityEnvironment.step": {
"total": 0.049701269000024695,
"count": 1,
"is_parallel": true,
"self": 0.0005924609997691732,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004826200001843972,
"count": 1,
"is_parallel": true,
"self": 0.0004826200001843972
},
"communicator.exchange": {
"total": 0.04674107499999991,
"count": 1,
"is_parallel": true,
"self": 0.04674107499999991
},
"steps_from_proto": {
"total": 0.0018851130000712146,
"count": 1,
"is_parallel": true,
"self": 0.000431728999956249,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014533840001149656,
"count": 8,
"is_parallel": true,
"self": 0.0014533840001149656
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1269.236658301005,
"count": 63706,
"is_parallel": true,
"self": 31.963562746022944,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.778215380037636,
"count": 63706,
"is_parallel": true,
"self": 22.778215380037636
},
"communicator.exchange": {
"total": 1122.474027804942,
"count": 63706,
"is_parallel": true,
"self": 1122.474027804942
},
"steps_from_proto": {
"total": 92.02085237000256,
"count": 63706,
"is_parallel": true,
"self": 19.279097117788524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.74175525221403,
"count": 509648,
"is_parallel": true,
"self": 72.74175525221403
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 606.845684085938,
"count": 63707,
"self": 2.472470443935663,
"children": {
"process_trajectory": {
"total": 102.93256589500265,
"count": 63707,
"self": 102.73289349100264,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19967240400001174,
"count": 2,
"self": 0.19967240400001174
}
}
},
"_update_policy": {
"total": 501.44064774699973,
"count": 457,
"self": 320.0866044039997,
"children": {
"TorchPPOOptimizer.update": {
"total": 181.35404334300006,
"count": 22761,
"self": 181.35404334300006
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.139998837781604e-07,
"count": 1,
"self": 9.139998837781604e-07
},
"TrainerController._save_models": {
"total": 0.09223090800014688,
"count": 1,
"self": 0.0013782089999949676,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09085269900015192,
"count": 1,
"self": 0.09085269900015192
}
}
}
}
}
}
}