xaeroq's picture
First Pyramids
4cec44b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6742886900901794,
"min": 0.6721416711807251,
"max": 1.3842403888702393,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 20153.140625,
"min": 20153.140625,
"max": 41992.31640625,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479910.0,
"min": 29996.0,
"max": 479910.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479910.0,
"min": 29996.0,
"max": 479910.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0009998594177886844,
"min": -0.09621106088161469,
"max": 0.01276826485991478,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.2449655532836914,
"min": -23.186864852905273,
"max": 3.038846969604492,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03132905438542366,
"min": 0.03132905438542366,
"max": 0.5412765741348267,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.6756181716918945,
"min": 7.6756181716918945,
"max": 128.82382202148438,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06983986733453897,
"min": 0.06526881362053823,
"max": 0.07415272995565701,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0475980100180844,
"min": 0.5038274561005495,
"max": 1.0475980100180844,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004306636560674329,
"min": 0.00010371410663252939,
"max": 0.012851338419790715,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.06459954841011493,
"min": 0.001348283386222882,
"max": 0.089959368938535,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.103485298841333e-05,
"min": 2.103485298841333e-05,
"max": 0.00029047303174708567,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00031552279482619997,
"min": 0.00031552279482619997,
"max": 0.003085113271629,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10701158666666669,
"min": 0.10701158666666669,
"max": 0.19682434285714287,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.6051738000000002,
"min": 1.3777704000000002,
"max": 2.4221326,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007104575079999999,
"min": 0.0007104575079999999,
"max": 0.00968275185142857,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.010656862619999999,
"min": 0.010656862619999999,
"max": 0.10286426290000002,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.031180718913674355,
"min": 0.031180718913674355,
"max": 0.6788924932479858,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.46771079301834106,
"min": 0.462179571390152,
"max": 4.752247333526611,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 900.9090909090909,
"min": 885.3333333333334,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29730.0,
"min": 16667.0,
"max": 32430.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.35584853047674353,
"min": -0.999819407539983,
"max": -0.30830592771663384,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -11.743001505732536,
"min": -30.99440163373947,
"max": -10.482401542365551,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.35584853047674353,
"min": -0.999819407539983,
"max": -0.30830592771663384,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -11.743001505732536,
"min": -30.99440163373947,
"max": -10.482401542365551,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.2935738717699706,
"min": 0.2935738717699706,
"max": 13.230402452104231,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 9.687937768409029,
"min": 9.687937768409029,
"max": 224.91684168577194,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1669238253",
"python_version": "3.7.15 (default, Oct 12 2022, 19:14:55) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1669239313"
},
"total": 1059.166157446,
"count": 1,
"self": 0.44317191499999353,
"children": {
"run_training.setup": {
"total": 0.07292283600003202,
"count": 1,
"self": 0.07292283600003202
},
"TrainerController.start_learning": {
"total": 1058.6500626949999,
"count": 1,
"self": 0.7315944790175308,
"children": {
"TrainerController._reset_env": {
"total": 12.279889690000005,
"count": 1,
"self": 12.279889690000005
},
"TrainerController.advance": {
"total": 1045.5424087189824,
"count": 31609,
"self": 0.8037784609789469,
"children": {
"env_step": {
"total": 671.1690453100025,
"count": 31609,
"self": 613.3330664429438,
"children": {
"SubprocessEnvManager._take_step": {
"total": 57.42272636403999,
"count": 31609,
"self": 2.4958523180122256,
"children": {
"TorchPolicy.evaluate": {
"total": 54.92687404602776,
"count": 31316,
"self": 18.69540972801883,
"children": {
"TorchPolicy.sample_actions": {
"total": 36.23146431800893,
"count": 31316,
"self": 36.23146431800893
}
}
}
}
},
"workers": {
"total": 0.4132525030187253,
"count": 31609,
"self": 0.0,
"children": {
"worker_root": {
"total": 1056.0520628660086,
"count": 31609,
"is_parallel": true,
"self": 499.2166750820163,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005752630000017689,
"count": 1,
"is_parallel": true,
"self": 0.004342835000102241,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001409794999915448,
"count": 8,
"is_parallel": true,
"self": 0.001409794999915448
}
}
},
"UnityEnvironment.step": {
"total": 0.04728041300000996,
"count": 1,
"is_parallel": true,
"self": 0.0005680020000795594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044893799997680617,
"count": 1,
"is_parallel": true,
"self": 0.00044893799997680617
},
"communicator.exchange": {
"total": 0.04443254099999194,
"count": 1,
"is_parallel": true,
"self": 0.04443254099999194
},
"steps_from_proto": {
"total": 0.0018309319999616491,
"count": 1,
"is_parallel": true,
"self": 0.0004831749999425483,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013477570000191008,
"count": 8,
"is_parallel": true,
"self": 0.0013477570000191008
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 556.8353877839922,
"count": 31608,
"is_parallel": true,
"self": 14.482318291993124,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.095371672995725,
"count": 31608,
"is_parallel": true,
"self": 13.095371672995725
},
"communicator.exchange": {
"total": 479.25650388000355,
"count": 31608,
"is_parallel": true,
"self": 479.25650388000355
},
"steps_from_proto": {
"total": 50.001193938999904,
"count": 31608,
"is_parallel": true,
"self": 12.548865062003244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.45232887699666,
"count": 252864,
"is_parallel": true,
"self": 37.45232887699666
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 373.5695849480011,
"count": 31609,
"self": 1.2859233069930838,
"children": {
"process_trajectory": {
"total": 88.0393692000078,
"count": 31609,
"self": 87.81661983700775,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2227493630000481,
"count": 1,
"self": 0.2227493630000481
}
}
},
"_update_policy": {
"total": 284.2442924410002,
"count": 219,
"self": 111.21688866898762,
"children": {
"TorchPPOOptimizer.update": {
"total": 173.02740377201258,
"count": 11415,
"self": 173.02740377201258
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3049998415226582e-06,
"count": 1,
"self": 1.3049998415226582e-06
},
"TrainerController._save_models": {
"total": 0.09616850199995497,
"count": 1,
"self": 0.0016773950001152116,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09449110699983976,
"count": 1,
"self": 0.09449110699983976
}
}
}
}
}
}
}