|
{
|
|
"name": "root",
|
|
"gauges": {
|
|
"Agent.Policy.Entropy.mean": {
|
|
"value": 1.4681881666183472,
|
|
"min": 1.4189385175704956,
|
|
"max": 1.4694794416427612,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.Entropy.sum": {
|
|
"value": 8139.63525390625,
|
|
"min": 6985.09326171875,
|
|
"max": 10304.462890625,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
|
|
"value": 253.16666666666666,
|
|
"min": 0.0,
|
|
"max": 468.8666666666667,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
|
|
"value": 4557.0,
|
|
"min": 0.0,
|
|
"max": 7947.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
|
|
"value": 0.5,
|
|
"min": 0.4,
|
|
"max": 0.8,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
|
|
"value": 9.0,
|
|
"min": 6.0,
|
|
"max": 13.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.FurthestDistanceExplored.mean": {
|
|
"value": 137.96881548563638,
|
|
"min": 105.71984587775336,
|
|
"max": 195.2825449625651,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.FurthestDistanceExplored.sum": {
|
|
"value": 2483.438678741455,
|
|
"min": 1693.3956632614136,
|
|
"max": 3340.6087493896484,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.task.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.task.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.EpisodeLength.mean": {
|
|
"value": 306.6666666666667,
|
|
"min": 273.5,
|
|
"max": 399.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.EpisodeLength.sum": {
|
|
"value": 5520.0,
|
|
"min": 4809.0,
|
|
"max": 7074.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Step.mean": {
|
|
"value": 1199705.0,
|
|
"min": 5600.0,
|
|
"max": 1199705.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Step.sum": {
|
|
"value": 1199705.0,
|
|
"min": 5600.0,
|
|
"max": 1199705.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.mean": {
|
|
"value": 0.23348771035671234,
|
|
"min": 0.031902581453323364,
|
|
"max": 1.0864794254302979,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.sum": {
|
|
"value": 4.4362664222717285,
|
|
"min": 0.4466361403465271,
|
|
"max": 16.297191619873047,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.mean": {
|
|
"value": 0.8060107827186584,
|
|
"min": -0.18773867189884186,
|
|
"max": 2.418530225753784,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.sum": {
|
|
"value": 15.314205169677734,
|
|
"min": -2.816080093383789,
|
|
"max": 42.10493850708008,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.CumulativeReward.mean": {
|
|
"value": 9.443000662483668,
|
|
"min": -1.065999984741211,
|
|
"max": 22.900522089004518,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.CumulativeReward.sum": {
|
|
"value": 179.41701258718967,
|
|
"min": -15.989999771118164,
|
|
"max": 343.50783133506775,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityReward.mean": {
|
|
"value": 0.7501859969802593,
|
|
"min": 0.0,
|
|
"max": 18.15416892369588,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityReward.sum": {
|
|
"value": 14.253533942624927,
|
|
"min": 0.0,
|
|
"max": 272.31253385543823,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.mean": {
|
|
"value": 8.498700265429521,
|
|
"min": -0.959400208791097,
|
|
"max": 20.610466225941977,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.sum": {
|
|
"value": 161.47530504316092,
|
|
"min": -14.391003131866455,
|
|
"max": 309.15699338912964,
|
|
"count": 200
|
|
},
|
|
"Agent.IsTraining.mean": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 200
|
|
},
|
|
"Agent.IsTraining.sum": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Losses.PolicyLoss.mean": {
|
|
"value": 0.026678891968913376,
|
|
"min": 0.013505291814605394,
|
|
"max": 0.03134514829919984,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.PolicyLoss.sum": {
|
|
"value": 0.026678891968913376,
|
|
"min": 0.013505291814605394,
|
|
"max": 0.03134514829919984,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.ValueLoss.mean": {
|
|
"value": 4.150850961605708,
|
|
"min": 0.0006164339392853435,
|
|
"max": 27.327500353256863,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.ValueLoss.sum": {
|
|
"value": 4.150850961605708,
|
|
"min": 0.0006164339392853435,
|
|
"max": 27.327500353256863,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.LearningRate.mean": {
|
|
"value": 1.6583494472500056e-06,
|
|
"min": 1.6583494472500056e-06,
|
|
"max": 0.0002979000007,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.LearningRate.sum": {
|
|
"value": 1.6583494472500056e-06,
|
|
"min": 1.6583494472500056e-06,
|
|
"max": 0.0002979000007,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.Epsilon.mean": {
|
|
"value": 0.10055275000000001,
|
|
"min": 0.10055275000000001,
|
|
"max": 0.1993,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.Epsilon.sum": {
|
|
"value": 0.10055275000000001,
|
|
"min": 0.10055275000000001,
|
|
"max": 0.1993,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.Beta.mean": {
|
|
"value": 3.75822250000001e-05,
|
|
"min": 3.75822250000001e-05,
|
|
"max": 0.00496507,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.Beta.sum": {
|
|
"value": 3.75822250000001e-05,
|
|
"min": 3.75822250000001e-05,
|
|
"max": 0.00496507,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.mean": {
|
|
"value": 0.019812342322741944,
|
|
"min": 0.017701702192425728,
|
|
"max": 0.5835289520521959,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.sum": {
|
|
"value": 0.019812342322741944,
|
|
"min": 0.017701702192425728,
|
|
"max": 0.5835289520521959,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.mean": {
|
|
"value": 2.2944007217884064,
|
|
"min": 2.1021603296200433,
|
|
"max": 3.310828576485316,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.sum": {
|
|
"value": 2.2944007217884064,
|
|
"min": 2.1021603296200433,
|
|
"max": 3.310828576485316,
|
|
"count": 139
|
|
}
|
|
},
|
|
"metadata": {
|
|
"timer_format_version": "0.1.0",
|
|
"start_time_seconds": "1717378591",
|
|
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
|
|
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_8_task_6_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_8_task_6_run_id_1_train --base-port 5007",
|
|
"mlagents_version": "0.30.0",
|
|
"mlagents_envs_version": "0.30.0",
|
|
"communication_protocol_version": "1.5.0",
|
|
"pytorch_version": "1.7.1+cu110",
|
|
"numpy_version": "1.21.0",
|
|
"end_time_seconds": "1717382227"
|
|
},
|
|
"total": 3636.1255082000002,
|
|
"count": 1,
|
|
"self": 0.2796889000005649,
|
|
"children": {
|
|
"run_training.setup": {
|
|
"total": 0.05370040000000009,
|
|
"count": 1,
|
|
"self": 0.05370040000000009
|
|
},
|
|
"TrainerController.start_learning": {
|
|
"total": 3635.7921189,
|
|
"count": 1,
|
|
"self": 7.236413300019194,
|
|
"children": {
|
|
"TrainerController._reset_env": {
|
|
"total": 3.7311408,
|
|
"count": 1,
|
|
"self": 3.7311408
|
|
},
|
|
"TrainerController.advance": {
|
|
"total": 3624.66035579998,
|
|
"count": 400850,
|
|
"self": 6.328213800039521,
|
|
"children": {
|
|
"env_step": {
|
|
"total": 3618.3321419999406,
|
|
"count": 400850,
|
|
"self": 1707.5228384998413,
|
|
"children": {
|
|
"SubprocessEnvManager._take_step": {
|
|
"total": 1906.746023199977,
|
|
"count": 400850,
|
|
"self": 11.934513500004186,
|
|
"children": {
|
|
"TorchPolicy.evaluate": {
|
|
"total": 1894.8115096999727,
|
|
"count": 400041,
|
|
"self": 1894.8115096999727
|
|
}
|
|
}
|
|
},
|
|
"workers": {
|
|
"total": 4.063280300122365,
|
|
"count": 400850,
|
|
"self": 0.0,
|
|
"children": {
|
|
"worker_root": {
|
|
"total": 3626.077969799904,
|
|
"count": 400850,
|
|
"is_parallel": true,
|
|
"self": 2136.92447969996,
|
|
"children": {
|
|
"steps_from_proto": {
|
|
"total": 0.006263400000000363,
|
|
"count": 1,
|
|
"is_parallel": true,
|
|
"self": 0.00010090000000051447,
|
|
"children": {
|
|
"_process_maybe_compressed_observation": {
|
|
"total": 0.006118099999999682,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 3.2599999999494145e-05,
|
|
"children": {
|
|
"_observation_to_np_array": {
|
|
"total": 0.006085500000000188,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 2.639999999987097e-05,
|
|
"children": {
|
|
"process_pixels": {
|
|
"total": 0.006059100000000317,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 0.0002165999999999002,
|
|
"children": {
|
|
"image_decompress": {
|
|
"total": 0.005842500000000417,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 0.005842500000000417
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 4.440000000016653e-05,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 4.440000000016653e-05
|
|
}
|
|
}
|
|
},
|
|
"UnityEnvironment.step": {
|
|
"total": 1489.147226699944,
|
|
"count": 400850,
|
|
"is_parallel": true,
|
|
"self": 17.237013600043156,
|
|
"children": {
|
|
"UnityEnvironment._generate_step_input": {
|
|
"total": 17.64586779986827,
|
|
"count": 400850,
|
|
"is_parallel": true,
|
|
"self": 17.64586779986827
|
|
},
|
|
"communicator.exchange": {
|
|
"total": 1304.1111604999983,
|
|
"count": 400850,
|
|
"is_parallel": true,
|
|
"self": 1304.1111604999983
|
|
},
|
|
"steps_from_proto": {
|
|
"total": 150.15318480003424,
|
|
"count": 400850,
|
|
"is_parallel": true,
|
|
"self": 29.949543000004525,
|
|
"children": {
|
|
"_process_maybe_compressed_observation": {
|
|
"total": 107.94837389992841,
|
|
"count": 801700,
|
|
"is_parallel": true,
|
|
"self": 8.111975599988142,
|
|
"children": {
|
|
"_observation_to_np_array": {
|
|
"total": 99.83639829994027,
|
|
"count": 1203384,
|
|
"is_parallel": true,
|
|
"self": 7.571749999849985,
|
|
"children": {
|
|
"process_pixels": {
|
|
"total": 92.26464830009029,
|
|
"count": 1203384,
|
|
"is_parallel": true,
|
|
"self": 42.07163559996019,
|
|
"children": {
|
|
"image_decompress": {
|
|
"total": 50.1930127001301,
|
|
"count": 1203384,
|
|
"is_parallel": true,
|
|
"self": 50.1930127001301
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 12.255267900101298,
|
|
"count": 801700,
|
|
"is_parallel": true,
|
|
"self": 12.255267900101298
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"trainer_threads": {
|
|
"total": 2.66000001829525e-05,
|
|
"count": 1,
|
|
"self": 2.66000001829525e-05,
|
|
"children": {
|
|
"thread_root": {
|
|
"total": 0.0,
|
|
"count": 0,
|
|
"is_parallel": true,
|
|
"self": 0.0,
|
|
"children": {
|
|
"trainer_advance": {
|
|
"total": 3630.3465961000975,
|
|
"count": 180200,
|
|
"is_parallel": true,
|
|
"self": 5.290042900159278,
|
|
"children": {
|
|
"process_trajectory": {
|
|
"total": 2924.0129220999397,
|
|
"count": 180200,
|
|
"is_parallel": true,
|
|
"self": 2923.6135354999396,
|
|
"children": {
|
|
"RLTrainer._checkpoint": {
|
|
"total": 0.39938660000007076,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 0.39938660000007076
|
|
}
|
|
}
|
|
},
|
|
"_update_policy": {
|
|
"total": 701.0436310999985,
|
|
"count": 139,
|
|
"is_parallel": true,
|
|
"self": 475.70747270000385,
|
|
"children": {
|
|
"TorchPPOOptimizer.update": {
|
|
"total": 225.33615839999464,
|
|
"count": 3357,
|
|
"is_parallel": true,
|
|
"self": 225.33615839999464
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"TrainerController._save_models": {
|
|
"total": 0.16418240000029982,
|
|
"count": 1,
|
|
"self": 0.005631000000448694,
|
|
"children": {
|
|
"RLTrainer._checkpoint": {
|
|
"total": 0.15855139999985113,
|
|
"count": 1,
|
|
"self": 0.15855139999985113
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} |