philippds's picture
Upload 13 files
d974c4c verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.2818900346755981,
"min": 1.2818900346755981,
"max": 1.420561671257019,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 7610.5810546875,
"min": 6634.158203125,
"max": 9503.5576171875,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.2727272727272727,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 9.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 25.386666666666667,
"min": 18.8,
"max": 244.22222222222223,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5712.0,
"min": 4887.0,
"max": 6759.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199979.0,
"min": 5913.0,
"max": 1199979.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199979.0,
"min": 5913.0,
"max": 1199979.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.6190378069877625,
"min": -0.0036654912400990725,
"max": 1.0050580501556396,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 139.28350830078125,
"min": -0.12096121162176132,
"max": 258.836669921875,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 73.77967834472656,
"min": 0.07052389532327652,
"max": 80.70956420898438,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 16600.427734375,
"min": 1.7630974054336548,
"max": 22960.2890625,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 98.15678325571172,
"min": 66.22676928984848,
"max": 105.42854440487712,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 22085.276232535136,
"min": 1984.3984070718288,
"max": 30485.528566875146,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.06000856314759909,
"min": 0.0,
"max": 6.729008760416146,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 13.501926708209794,
"min": 0.0,
"max": 222.05728909373283,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 88.34110324285226,
"min": 59.604091476197496,
"max": 94.88568782321059,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 19876.74822964176,
"min": 1785.9585835039616,
"max": 27436.97532238142,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.024716698137732845,
"min": 0.01383003448912253,
"max": 0.03393137456926828,
"count": 143
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.024716698137732845,
"min": 0.01383003448912253,
"max": 0.03393137456926828,
"count": 143
},
"Agent.Losses.ValueLoss.mean": {
"value": 1699.0958811442058,
"min": 288.91425768534344,
"max": 2346.4903666178384,
"count": 143
},
"Agent.Losses.ValueLoss.sum": {
"value": 1699.0958811442058,
"min": 288.91425768534344,
"max": 2346.4903666178384,
"count": 143
},
"Agent.Policy.LearningRate.mean": {
"value": 1.7393494202500032e-06,
"min": 1.7393494202500032e-06,
"max": 0.00029789475070175,
"count": 143
},
"Agent.Policy.LearningRate.sum": {
"value": 1.7393494202500032e-06,
"min": 1.7393494202500032e-06,
"max": 0.00029789475070175,
"count": 143
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10057975000000001,
"min": 0.10057975000000001,
"max": 0.19929825,
"count": 143
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10057975000000001,
"min": 0.10057975000000001,
"max": 0.19929825,
"count": 143
},
"Agent.Policy.Beta.mean": {
"value": 3.8929525000000057e-05,
"min": 3.8929525000000057e-05,
"max": 0.004964982675,
"count": 143
},
"Agent.Policy.Beta.sum": {
"value": 3.8929525000000057e-05,
"min": 3.8929525000000057e-05,
"max": 0.004964982675,
"count": 143
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.02298492395008604,
"min": 0.019557931227609515,
"max": 0.5723420046269894,
"count": 143
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.02298492395008604,
"min": 0.019557931227609515,
"max": 0.5723420046269894,
"count": 143
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.3252921452124913,
"min": 1.2326968361934025,
"max": 3.305937925974528,
"count": 143
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.3252921452124913,
"min": 1.2326968361934025,
"max": 3.305937925974528,
"count": 143
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717752552",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_9_task_1_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_1_run_id_2_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717759084"
},
"total": 6532.3746521,
"count": 1,
"self": 2.3646825000005265,
"children": {
"run_training.setup": {
"total": 0.051015299999999986,
"count": 1,
"self": 0.051015299999999986
},
"TrainerController.start_learning": {
"total": 6529.9589543,
"count": 1,
"self": 7.020151400179202,
"children": {
"TrainerController._reset_env": {
"total": 2.1190383,
"count": 1,
"self": 2.1190383
},
"TrainerController.advance": {
"total": 6520.548312999821,
"count": 410211,
"self": 6.9232442998254555,
"children": {
"env_step": {
"total": 6513.625068699996,
"count": 410211,
"self": 3996.0341070001723,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2512.4319659999474,
"count": 410211,
"self": 13.74588720016527,
"children": {
"TorchPolicy.evaluate": {
"total": 2498.686078799782,
"count": 400012,
"self": 2498.686078799782
}
}
},
"workers": {
"total": 5.1589956998757645,
"count": 410211,
"self": 0.0,
"children": {
"worker_root": {
"total": 6520.306557400101,
"count": 410211,
"is_parallel": true,
"self": 2894.5191464001737,
"children": {
"steps_from_proto": {
"total": 0.0068310000000000315,
"count": 1,
"is_parallel": true,
"self": 0.00010559999999970593,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006677200000000161,
"count": 2,
"is_parallel": true,
"self": 3.510000000028768e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006642099999999873,
"count": 3,
"is_parallel": true,
"self": 2.959999999996299e-05,
"children": {
"process_pixels": {
"total": 0.00661249999999991,
"count": 3,
"is_parallel": true,
"self": 0.00024029999999974905,
"children": {
"image_decompress": {
"total": 0.006372200000000161,
"count": 3,
"is_parallel": true,
"self": 0.006372200000000161
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.820000000016478e-05,
"count": 2,
"is_parallel": true,
"self": 4.820000000016478e-05
}
}
},
"UnityEnvironment.step": {
"total": 3625.7805799999273,
"count": 410211,
"is_parallel": true,
"self": 26.72619129981649,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.39507669975361,
"count": 410211,
"is_parallel": true,
"self": 25.39507669975361
},
"communicator.exchange": {
"total": 3374.9514166001327,
"count": 410211,
"is_parallel": true,
"self": 3374.9514166001327
},
"steps_from_proto": {
"total": 198.70789540022463,
"count": 410211,
"is_parallel": true,
"self": 39.642117400747395,
"children": {
"_process_maybe_compressed_observation": {
"total": 141.4368135996278,
"count": 820422,
"is_parallel": true,
"self": 11.152170899201195,
"children": {
"_observation_to_np_array": {
"total": 130.28464270042662,
"count": 1238292,
"is_parallel": true,
"self": 11.402069200093905,
"children": {
"process_pixels": {
"total": 118.88257350033271,
"count": 1238292,
"is_parallel": true,
"self": 56.72087620023296,
"children": {
"image_decompress": {
"total": 62.16169730009975,
"count": 1238292,
"is_parallel": true,
"self": 62.16169730009975
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 17.62896439984943,
"count": 820422,
"is_parallel": true,
"self": 17.62896439984943
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.990000029967632e-05,
"count": 1,
"self": 2.990000029967632e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 6524.062174499911,
"count": 315411,
"is_parallel": true,
"self": 9.779227299857666,
"children": {
"process_trajectory": {
"total": 5656.758363500057,
"count": 315411,
"is_parallel": true,
"self": 5656.152673900057,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6056896000000052,
"count": 2,
"is_parallel": true,
"self": 0.6056896000000052
}
}
},
"_update_policy": {
"total": 857.524583699996,
"count": 143,
"is_parallel": true,
"self": 574.102632699982,
"children": {
"TorchPPOOptimizer.update": {
"total": 283.42195100001396,
"count": 3441,
"is_parallel": true,
"self": 283.42195100001396
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.27142169999933685,
"count": 1,
"self": 0.014183799999045732,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2572379000002911,
"count": 1,
"self": 0.2572379000002911
}
}
}
}
}
}
}