philippds's picture
Upload 13 files
03bfa67 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.4389417171478271,
"min": 1.4189385175704956,
"max": 1.442765235900879,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 9781.92578125,
"min": 7058.5087890625,
"max": 9982.1494140625,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 82.5,
"min": 0.0,
"max": 494.1333333333333,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 1980.0,
"min": 0.0,
"max": 7412.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.5833333333333334,
"min": 0.38095238095238093,
"max": 0.7333333333333333,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 14.0,
"min": 6.0,
"max": 15.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 283.0,
"min": 250.75,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 6792.0,
"min": 4884.0,
"max": 6954.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199937.0,
"min": 5600.0,
"max": 1199937.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199937.0,
"min": 5600.0,
"max": 1199937.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.2653595805168152,
"min": 0.029307620599865913,
"max": 0.8288605213165283,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 5.8379106521606445,
"min": 0.43961429595947266,
"max": 14.840982437133789,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.1637535095214844,
"min": 0.07653423398733139,
"max": 3.4586215019226074,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 47.602577209472656,
"min": 1.071479320526123,
"max": 64.29899597167969,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 17.454433063976467,
"min": -1.0649333159128824,
"max": 38.686707599957785,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 383.99752740748227,
"min": -15.973999738693237,
"max": 582.9760599876754,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.746854851488024,
"min": 0.0,
"max": 14.208348274230957,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 16.430806732736528,
"min": 0.0,
"max": 255.75026893615723,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 15.708987276408482,
"min": -0.9584401766459147,
"max": 34.81802397171656,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 345.5977200809866,
"min": -14.37660264968872,
"max": 524.6783403318841,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.02009747641083474,
"min": 0.015691468220514555,
"max": 0.034741795350176595,
"count": 139
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.02009747641083474,
"min": 0.015691468220514555,
"max": 0.034741795350176595,
"count": 139
},
"Agent.Losses.ValueLoss.mean": {
"value": 7.238462497790654,
"min": 0.009945196118981888,
"max": 26.402251879374187,
"count": 139
},
"Agent.Losses.ValueLoss.sum": {
"value": 7.238462497790654,
"min": 0.009945196118981888,
"max": 26.402251879374187,
"count": 139
},
"Agent.Policy.LearningRate.mean": {
"value": 1.8758493747499982e-06,
"min": 1.8758493747499982e-06,
"max": 0.0002979000007,
"count": 139
},
"Agent.Policy.LearningRate.sum": {
"value": 1.8758493747499982e-06,
"min": 1.8758493747499982e-06,
"max": 0.0002979000007,
"count": 139
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10062525,
"min": 0.10062525,
"max": 0.1993,
"count": 139
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10062525,
"min": 0.10062525,
"max": 0.1993,
"count": 139
},
"Agent.Policy.Beta.mean": {
"value": 4.119997499999998e-05,
"min": 4.119997499999998e-05,
"max": 0.00496507,
"count": 139
},
"Agent.Policy.Beta.sum": {
"value": 4.119997499999998e-05,
"min": 4.119997499999998e-05,
"max": 0.00496507,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.02619731449522078,
"min": 0.024310729233548045,
"max": 0.5835290277997652,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.02619731449522078,
"min": 0.024310729233548045,
"max": 0.5835290277997652,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.9983123789230983,
"min": 1.9722897013028462,
"max": 3.310828596353531,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.9983123789230983,
"min": 1.9722897013028462,
"max": 3.310828596353531,
"count": 139
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717248804",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_4_task_2_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_2_run_id_1_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717252409"
},
"total": 3604.5230556,
"count": 1,
"self": 0.3121977000005245,
"children": {
"run_training.setup": {
"total": 0.04929309999999998,
"count": 1,
"self": 0.04929309999999998
},
"TrainerController.start_learning": {
"total": 3604.1615647999997,
"count": 1,
"self": 5.150592599950414,
"children": {
"TrainerController._reset_env": {
"total": 2.1376168,
"count": 1,
"self": 2.1376168
},
"TrainerController.advance": {
"total": 3596.69150620005,
"count": 401217,
"self": 4.888665899957232,
"children": {
"env_step": {
"total": 3591.8028403000926,
"count": 401217,
"self": 1598.6335483001171,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1989.9212179998997,
"count": 401217,
"self": 10.38291419988036,
"children": {
"TorchPolicy.evaluate": {
"total": 1979.5383038000193,
"count": 400379,
"self": 1979.5383038000193
}
}
},
"workers": {
"total": 3.2480740000757544,
"count": 401217,
"self": 0.0,
"children": {
"worker_root": {
"total": 3595.269167799993,
"count": 401217,
"is_parallel": true,
"self": 2201.3683121999547,
"children": {
"steps_from_proto": {
"total": 0.006103399999999981,
"count": 1,
"is_parallel": true,
"self": 0.00010250000000011639,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.005953199999999992,
"count": 2,
"is_parallel": true,
"self": 2.859999999982321e-05,
"children": {
"_observation_to_np_array": {
"total": 0.005924600000000169,
"count": 3,
"is_parallel": true,
"self": 2.8500000000208914e-05,
"children": {
"process_pixels": {
"total": 0.00589609999999996,
"count": 3,
"is_parallel": true,
"self": 0.00023630000000007811,
"children": {
"image_decompress": {
"total": 0.005659799999999882,
"count": 3,
"is_parallel": true,
"self": 0.005659799999999882
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.7699999999872844e-05,
"count": 2,
"is_parallel": true,
"self": 4.7699999999872844e-05
}
}
},
"UnityEnvironment.step": {
"total": 1393.8947522000383,
"count": 401217,
"is_parallel": true,
"self": 17.846898699817302,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 19.743010899945794,
"count": 401217,
"is_parallel": true,
"self": 19.743010899945794
},
"communicator.exchange": {
"total": 1214.4236552000543,
"count": 401217,
"is_parallel": true,
"self": 1214.4236552000543
},
"steps_from_proto": {
"total": 141.88118740022088,
"count": 401217,
"is_parallel": true,
"self": 27.934730900210923,
"children": {
"_process_maybe_compressed_observation": {
"total": 101.46559579985372,
"count": 802434,
"is_parallel": true,
"self": 8.125860299694509,
"children": {
"_observation_to_np_array": {
"total": 93.33973550015921,
"count": 1204416,
"is_parallel": true,
"self": 7.935106600225282,
"children": {
"process_pixels": {
"total": 85.40462889993393,
"count": 1204416,
"is_parallel": true,
"self": 40.33854679993299,
"children": {
"image_decompress": {
"total": 45.06608210000093,
"count": 1204416,
"is_parallel": true,
"self": 45.06608210000093
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 12.480860700156235,
"count": 802434,
"is_parallel": true,
"self": 12.480860700156235
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.4199999643315095e-05,
"count": 1,
"self": 2.4199999643315095e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3600.703408599978,
"count": 175653,
"is_parallel": true,
"self": 3.4899279999713144,
"children": {
"process_trajectory": {
"total": 2869.1280733000076,
"count": 175653,
"is_parallel": true,
"self": 2868.7198174000077,
"children": {
"RLTrainer._checkpoint": {
"total": 0.40825589999985823,
"count": 2,
"is_parallel": true,
"self": 0.40825589999985823
}
}
},
"_update_policy": {
"total": 728.0854072999994,
"count": 139,
"is_parallel": true,
"self": 488.24386740000165,
"children": {
"TorchPPOOptimizer.update": {
"total": 239.8415398999977,
"count": 3372,
"is_parallel": true,
"self": 239.8415398999977
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1818249999996624,
"count": 1,
"self": 0.0060155999995004095,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17580940000016199,
"count": 1,
"self": 0.17580940000016199
}
}
}
}
}
}
}