philippds's picture
Upload 13 files
a33cf5b verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.4249449968338013,
"min": 1.4187017679214478,
"max": 1.444635272026062,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8549.669921875,
"min": 8512.2841796875,
"max": 8670.103515625,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.6,
"min": 0.6,
"max": 0.7333333333333333,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 9.0,
"min": 9.0,
"max": 11.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.HighestPotentialSoildFound.mean": {
"value": 0.9688728809356689,
"min": 0.46618089576562244,
"max": 0.9998686869939168,
"count": 200
},
"Agent.DroneBasedReforestation.HighestPotentialSoildFound.sum": {
"value": 14.533093214035034,
"min": 6.992713436484337,
"max": 14.998030304908752,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 399.0,
"min": 399.0,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5985.0,
"min": 5985.0,
"max": 5985.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199600.0,
"min": 5600.0,
"max": 1199600.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199600.0,
"min": 5600.0,
"max": 1199600.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.3005649745464325,
"min": 0.03917299583554268,
"max": 1.0096532106399536,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 4.508474826812744,
"min": 0.5513672232627869,
"max": 15.144798278808594,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.1138719990849495,
"min": -0.14873845875263214,
"max": 0.07217133045196533,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.7080799341201782,
"min": -2.231076955795288,
"max": 1.0103986263275146,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": -0.03111012528340022,
"min": -0.5419624785582224,
"max": 5.462964375813802e-05,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": -0.46665187925100327,
"min": -8.129437178373337,
"max": 0.0008194446563720703,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 1.1817724466323853,
"min": 0.0,
"max": 15.446411609649658,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 17.72658669948578,
"min": 0.0,
"max": 231.69617414474487,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": -0.02799933801094691,
"min": -0.48776637613773344,
"max": 4.8941373825073244e-05,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": -0.41999007016420364,
"min": -7.316495642066002,
"max": 0.0007341206073760986,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.019503678418307874,
"min": 0.01565098201778407,
"max": 0.03486651174413661,
"count": 142
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.019503678418307874,
"min": 0.01565098201778407,
"max": 0.03486651174413661,
"count": 142
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.0034317016446342072,
"min": 0.0026708836861265204,
"max": 0.05614186128756652,
"count": 142
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.0034317016446342072,
"min": 0.0026708836861265204,
"max": 0.05614186128756652,
"count": 142
},
"Agent.Policy.LearningRate.mean": {
"value": 1.8000994000000013e-06,
"min": 1.8000994000000013e-06,
"max": 0.0002979000007,
"count": 142
},
"Agent.Policy.LearningRate.sum": {
"value": 1.8000994000000013e-06,
"min": 1.8000994000000013e-06,
"max": 0.0002979000007,
"count": 142
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10060000000000001,
"min": 0.10060000000000001,
"max": 0.1993,
"count": 142
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10060000000000001,
"min": 0.10060000000000001,
"max": 0.1993,
"count": 142
},
"Agent.Policy.Beta.mean": {
"value": 3.994000000000003e-05,
"min": 3.994000000000003e-05,
"max": 0.00496507,
"count": 142
},
"Agent.Policy.Beta.sum": {
"value": 3.994000000000003e-05,
"min": 3.994000000000003e-05,
"max": 0.00496507,
"count": 142
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.028783430345356464,
"min": 0.027378874132409692,
"max": 0.5812800365189711,
"count": 142
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.028783430345356464,
"min": 0.027378874132409692,
"max": 0.5812800365189711,
"count": 142
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.959982658425967,
"min": 1.959982658425967,
"max": 3.293136020501455,
"count": 142
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.959982658425967,
"min": 1.959982658425967,
"max": 3.293136020501455,
"count": 142
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717706412",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_7_task_4_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_7_task_4_run_id_2_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717710569"
},
"total": 4156.3984868,
"count": 1,
"self": 0.2731305999996039,
"children": {
"run_training.setup": {
"total": 0.052867099999999945,
"count": 1,
"self": 0.052867099999999945
},
"TrainerController.start_learning": {
"total": 4156.0724891,
"count": 1,
"self": 6.521958099953736,
"children": {
"TrainerController._reset_env": {
"total": 2.2284139,
"count": 1,
"self": 2.2284139
},
"TrainerController.advance": {
"total": 4147.098306400046,
"count": 400401,
"self": 6.136487800156829,
"children": {
"env_step": {
"total": 4140.961818599889,
"count": 400401,
"self": 1768.698611299993,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2367.897190000048,
"count": 400401,
"self": 12.723249800181748,
"children": {
"TorchPolicy.evaluate": {
"total": 2355.173940199866,
"count": 400401,
"self": 2355.173940199866
}
}
},
"workers": {
"total": 4.366017299847898,
"count": 400401,
"self": 0.0,
"children": {
"worker_root": {
"total": 4146.730285600128,
"count": 400401,
"is_parallel": true,
"self": 2628.7303893001545,
"children": {
"steps_from_proto": {
"total": 0.006846899999999989,
"count": 1,
"is_parallel": true,
"self": 0.000111200000000089,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006684299999999865,
"count": 2,
"is_parallel": true,
"self": 3.7599999999748945e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006646700000000116,
"count": 3,
"is_parallel": true,
"self": 3.719999999995949e-05,
"children": {
"process_pixels": {
"total": 0.006609500000000157,
"count": 3,
"is_parallel": true,
"self": 0.00032260000000028377,
"children": {
"image_decompress": {
"total": 0.006286899999999873,
"count": 3,
"is_parallel": true,
"self": 0.006286899999999873
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 5.140000000003475e-05,
"count": 2,
"is_parallel": true,
"self": 5.140000000003475e-05
}
}
},
"UnityEnvironment.step": {
"total": 1517.9930493999736,
"count": 400401,
"is_parallel": true,
"self": 24.043743100025722,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.948123200017484,
"count": 400401,
"is_parallel": true,
"self": 24.948123200017484
},
"communicator.exchange": {
"total": 1278.7460356000402,
"count": 400401,
"is_parallel": true,
"self": 1278.7460356000402
},
"steps_from_proto": {
"total": 190.2551474998901,
"count": 400401,
"is_parallel": true,
"self": 37.91757190024222,
"children": {
"_process_maybe_compressed_observation": {
"total": 135.90515699965724,
"count": 800802,
"is_parallel": true,
"self": 10.664098099419036,
"children": {
"_observation_to_np_array": {
"total": 125.2410589002382,
"count": 1204206,
"is_parallel": true,
"self": 10.71637260011596,
"children": {
"process_pixels": {
"total": 114.52468630012224,
"count": 1204206,
"is_parallel": true,
"self": 54.284971400308876,
"children": {
"image_decompress": {
"total": 60.239714899813364,
"count": 1204206,
"is_parallel": true,
"self": 60.239714899813364
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 16.432418599990648,
"count": 800802,
"is_parallel": true,
"self": 16.432418599990648
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.300000025774352e-05,
"count": 1,
"self": 3.300000025774352e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4151.371208400118,
"count": 198243,
"is_parallel": true,
"self": 6.920095500179741,
"children": {
"process_trajectory": {
"total": 3251.5563046999355,
"count": 198243,
"is_parallel": true,
"self": 3251.0471040999355,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5092005999999856,
"count": 2,
"is_parallel": true,
"self": 0.5092005999999856
}
}
},
"_update_policy": {
"total": 892.8948082000026,
"count": 142,
"is_parallel": true,
"self": 594.2710099000187,
"children": {
"TorchPPOOptimizer.update": {
"total": 298.6237982999838,
"count": 3408,
"is_parallel": true,
"self": 298.6237982999838
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22377770000002783,
"count": 1,
"self": 0.011254099999860046,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2125236000001678,
"count": 1,
"self": 0.2125236000001678
}
}
}
}
}
}
}