Firemedic15's picture
First Push
7d8e8e8 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5914092659950256,
"min": 0.5893111228942871,
"max": 1.4677213430404663,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17761.203125,
"min": 17537.8984375,
"max": 44524.79296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989949.0,
"min": 29952.0,
"max": 989949.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989949.0,
"min": 29952.0,
"max": 989949.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1261201947927475,
"min": -0.34923824667930603,
"max": 0.21711064875125885,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 31.530048370361328,
"min": -82.76946258544922,
"max": 55.14610290527344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025204800069332123,
"min": -0.001280400319956243,
"max": 0.43257439136505127,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.301199913024902,
"min": -0.32138046622276306,
"max": 102.52013397216797,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06728591255128336,
"min": 0.06556427793069802,
"max": 0.07352719386579468,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.942002775717967,
"min": 0.5008840780901247,
"max": 1.0190106177789287,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007789448115443195,
"min": 0.00010343481080217545,
"max": 0.010216503568494809,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10905227361620473,
"min": 0.0014480873512304564,
"max": 0.117371979493827,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.66344744555e-06,
"min": 7.66344744555e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001072882642377,
"min": 0.0001072882642377,
"max": 0.0032604002131999994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255444999999999,
"min": 0.10255444999999999,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357623,
"min": 1.3886848,
"max": 2.3868,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002651895550000001,
"min": 0.0002651895550000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003712653770000001,
"min": 0.003712653770000001,
"max": 0.10870132,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01552666537463665,
"min": 0.01552666537463665,
"max": 0.5615646243095398,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2173733115196228,
"min": 0.2173733115196228,
"max": 3.930952548980713,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 673.1463414634146,
"min": 673.1463414634146,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27599.0,
"min": 15984.0,
"max": 33431.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.6437072822960411,
"min": -1.0000000521540642,
"max": 0.8264633853261064,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 26.391998574137688,
"min": -31.99920167028904,
"max": 33.88499879837036,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.6437072822960411,
"min": -1.0000000521540642,
"max": 0.8264633853261064,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 26.391998574137688,
"min": -31.99920167028904,
"max": 33.88499879837036,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.10906141107652063,
"min": 0.10906141107652063,
"max": 11.398194424808025,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.471517854137346,
"min": 4.471517854137346,
"max": 182.3711107969284,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718186408",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718188608"
},
"total": 2200.465742006,
"count": 1,
"self": 0.647527412999807,
"children": {
"run_training.setup": {
"total": 0.054914188000054764,
"count": 1,
"self": 0.054914188000054764
},
"TrainerController.start_learning": {
"total": 2199.763300405,
"count": 1,
"self": 1.4302349780623445,
"children": {
"TrainerController._reset_env": {
"total": 3.10037201800003,
"count": 1,
"self": 3.10037201800003
},
"TrainerController.advance": {
"total": 2195.1437982349375,
"count": 63241,
"self": 1.456162030966425,
"children": {
"env_step": {
"total": 1558.736653119969,
"count": 63241,
"self": 1420.6301927249629,
"children": {
"SubprocessEnvManager._take_step": {
"total": 137.23265283699476,
"count": 63241,
"self": 4.8166962530012825,
"children": {
"TorchPolicy.evaluate": {
"total": 132.41595658399348,
"count": 62564,
"self": 132.41595658399348
}
}
},
"workers": {
"total": 0.8738075580113787,
"count": 63241,
"self": 0.0,
"children": {
"worker_root": {
"total": 2194.40116029797,
"count": 63241,
"is_parallel": true,
"self": 900.9212492189581,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009752143000014257,
"count": 1,
"is_parallel": true,
"self": 0.008254776999990554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014973660000237032,
"count": 8,
"is_parallel": true,
"self": 0.0014973660000237032
}
}
},
"UnityEnvironment.step": {
"total": 0.05192776799998455,
"count": 1,
"is_parallel": true,
"self": 0.0008202449999998862,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000526607999972839,
"count": 1,
"is_parallel": true,
"self": 0.000526607999972839
},
"communicator.exchange": {
"total": 0.048833838999996715,
"count": 1,
"is_parallel": true,
"self": 0.048833838999996715
},
"steps_from_proto": {
"total": 0.001747076000015113,
"count": 1,
"is_parallel": true,
"self": 0.0003702979998934097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013767780001217034,
"count": 8,
"is_parallel": true,
"self": 0.0013767780001217034
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1293.4799110790118,
"count": 63240,
"is_parallel": true,
"self": 34.564377788030015,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.592697890988063,
"count": 63240,
"is_parallel": true,
"self": 24.592697890988063
},
"communicator.exchange": {
"total": 1131.231110088946,
"count": 63240,
"is_parallel": true,
"self": 1131.231110088946
},
"steps_from_proto": {
"total": 103.09172531104753,
"count": 63240,
"is_parallel": true,
"self": 21.417419034972568,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.67430627607496,
"count": 505920,
"is_parallel": true,
"self": 81.67430627607496
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 634.9509830840022,
"count": 63241,
"self": 2.550630025964324,
"children": {
"process_trajectory": {
"total": 130.80887365003616,
"count": 63241,
"self": 130.5409999890361,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26787366100006693,
"count": 2,
"self": 0.26787366100006693
}
}
},
"_update_policy": {
"total": 501.59147940800176,
"count": 440,
"self": 297.27792810998926,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.3135512980125,
"count": 22776,
"self": 204.3135512980125
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.66999778029276e-07,
"count": 1,
"self": 9.66999778029276e-07
},
"TrainerController._save_models": {
"total": 0.08889420700006667,
"count": 1,
"self": 0.0013217339997027011,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08757247300036397,
"count": 1,
"self": 0.08757247300036397
}
}
}
}
}
}
}