danielcfho's picture
Trained Pyramids agent upload
281fe24
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.14210930466651917,
"min": 0.12996448576450348,
"max": 1.4769505262374878,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4197.34033203125,
"min": 3953.0,
"max": 44804.76953125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999960.0,
"min": 29952.0,
"max": 2999960.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999960.0,
"min": 29952.0,
"max": 2999960.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8496698141098022,
"min": -0.09999418258666992,
"max": 0.9217113852500916,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 255.7506103515625,
"min": -23.99860382080078,
"max": 282.0020751953125,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010856772772967815,
"min": -0.023049956187605858,
"max": 0.330189973115921,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.2678885459899902,
"min": -6.730587005615234,
"max": 78.25502014160156,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0646066257459632,
"min": 0.0638349420891658,
"max": 0.07345521951321557,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9044927604434847,
"min": 0.5062511234688734,
"max": 1.0922523274094404,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013752571108368817,
"min": 0.00011251453177096002,
"max": 0.01661688156586717,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19253599551716344,
"min": 0.0014626889130224803,
"max": 0.23263634192214036,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5344780599690438e-06,
"min": 1.5344780599690438e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.1482692839566614e-05,
"min": 2.1482692839566614e-05,
"max": 0.0038849931050023326,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10051145952380953,
"min": 0.10051145952380953,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4071604333333334,
"min": 1.3962282666666668,
"max": 2.6949976666666666,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.10948064285713e-05,
"min": 6.10948064285713e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008553272899999982,
"min": 0.0008553272899999982,
"max": 0.1295102669,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0057883127592504025,
"min": 0.005660186987370253,
"max": 0.38986220955848694,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08103638142347336,
"min": 0.07959816604852676,
"max": 2.7290353775024414,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 229.6201550387597,
"min": 189.26,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29621.0,
"min": 15984.0,
"max": 32968.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7392077998956665,
"min": -1.0000000521540642,
"max": 1.8107399860024451,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 222.61859838664532,
"min": -30.756001755595207,
"max": 271.6109979003668,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7392077998956665,
"min": -1.0000000521540642,
"max": 1.8107399860024451,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 222.61859838664532,
"min": -30.756001755595207,
"max": 271.6109979003668,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.013680191231173922,
"min": 0.011571932129099878,
"max": 7.795177295804024,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.751064477590262,
"min": 1.6866602274822071,
"max": 124.72283673286438,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1656200467",
"python_version": "3.8.8 (default, Apr 13 2021, 15:08:03) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\danielho\\anaconda3\\Scripts\\mlagents-learn .\\config\\ppo\\PyramidsRND.yaml --env=.\\trained-envs-executables\\windows\\Pyramids.exe --run-id=First Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.20.1",
"end_time_seconds": "1656206852"
},
"total": 6384.4821957,
"count": 1,
"self": 1.3905097000006208,
"children": {
"run_training.setup": {
"total": 0.5271762999999998,
"count": 1,
"self": 0.5271762999999998
},
"TrainerController.start_learning": {
"total": 6382.5645097,
"count": 1,
"self": 4.473535899992385,
"children": {
"TrainerController._reset_env": {
"total": 20.273082700000003,
"count": 1,
"self": 20.273082700000003
},
"TrainerController.advance": {
"total": 6357.363166800008,
"count": 195034,
"self": 4.328861199999665,
"children": {
"env_step": {
"total": 3134.494833700005,
"count": 195034,
"self": 2536.136294399954,
"children": {
"SubprocessEnvManager._take_step": {
"total": 595.4624087000841,
"count": 195034,
"self": 12.391135100053589,
"children": {
"TorchPolicy.evaluate": {
"total": 583.0712736000305,
"count": 187542,
"self": 185.19782770003928,
"children": {
"TorchPolicy.sample_actions": {
"total": 397.8734458999912,
"count": 187542,
"self": 397.8734458999912
}
}
}
}
},
"workers": {
"total": 2.8961305999671865,
"count": 195034,
"self": 0.0,
"children": {
"worker_root": {
"total": 6362.33513489984,
"count": 195034,
"is_parallel": true,
"self": 4141.789927300072,
"children": {
"steps_from_proto": {
"total": 0.0031889000000013823,
"count": 1,
"is_parallel": true,
"self": 0.00034490000000353405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0028439999999978482,
"count": 8,
"is_parallel": true,
"self": 0.0028439999999978482
}
}
},
"UnityEnvironment.step": {
"total": 2220.542018699768,
"count": 195034,
"is_parallel": true,
"self": 59.383043599535995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 42.14149460014697,
"count": 195034,
"is_parallel": true,
"self": 42.14149460014697
},
"communicator.exchange": {
"total": 1926.225960499977,
"count": 195034,
"is_parallel": true,
"self": 1926.225960499977
},
"steps_from_proto": {
"total": 192.79152000010833,
"count": 195034,
"is_parallel": true,
"self": 49.958479200077676,
"children": {
"_process_rank_one_or_two_observation": {
"total": 142.83304080003066,
"count": 1560272,
"is_parallel": true,
"self": 142.83304080003066
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3218.5394719000033,
"count": 195034,
"self": 7.617965399881541,
"children": {
"process_trajectory": {
"total": 551.2480172001315,
"count": 195034,
"self": 549.0287744001319,
"children": {
"RLTrainer._checkpoint": {
"total": 2.21924279999962,
"count": 6,
"self": 2.21924279999962
}
}
},
"_update_policy": {
"total": 2659.6734892999903,
"count": 1395,
"self": 694.3334109999557,
"children": {
"TorchPPOOptimizer.update": {
"total": 1965.3400783000345,
"count": 68385,
"self": 1965.3400783000345
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2000000424450263e-06,
"count": 1,
"self": 1.2000000424450263e-06
},
"TrainerController._save_models": {
"total": 0.45472309999968274,
"count": 1,
"self": 0.09516789999906905,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3595552000006137,
"count": 1,
"self": 0.3595552000006137
}
}
}
}
}
}
}