philippds's picture
Upload 13 files
22f255c verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.4332391023635864,
"min": 1.422836184501648,
"max": 1.437020182609558,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 7851.28369140625,
"min": 7410.271484375,
"max": 10219.7275390625,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 1.0,
"min": 0.6,
"max": 1.5333333333333334,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 15.0,
"min": 9.0,
"max": 23.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 8.866666666666667,
"min": 8.6,
"max": 68.26666666666667,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 133.0,
"min": 129.0,
"max": 1204.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 1.0,
"min": 0.3333333333333333,
"max": 1.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 15.0,
"min": 5.0,
"max": 18.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 93.0030579884847,
"min": 17.981625366210938,
"max": 95.54999783833821,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 1395.0458698272705,
"min": 269.72438049316406,
"max": 1679.0282497406006,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 5.332781855265299,
"min": 0.4178741693496704,
"max": 15.010636075337727,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 79.99172782897949,
"min": 6.268112540245056,
"max": 225.15954113006592,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 2.5490067481994627,
"min": 0.21376094818115235,
"max": 3.5784213225046795,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 38.23510122299194,
"min": 3.206414222717285,
"max": 61.10646003484726,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.2549006700515747,
"min": 0.02137609521547953,
"max": 0.3578421324491501,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 3.8235100507736206,
"min": 0.320641428232193,
"max": 6.110646083950996,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 55.341584130128226,
"min": 24.923566349347432,
"max": 120.74674631754557,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 830.1237619519234,
"min": 373.8534952402115,
"max": 1811.2011947631836,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 362.8,
"min": 307.8333333333333,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5442.0,
"min": 5166.0,
"max": 7119.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199977.0,
"min": 5987.0,
"max": 1199977.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199977.0,
"min": 5987.0,
"max": 1199977.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.4449746608734131,
"min": 0.02880333736538887,
"max": 0.9132558703422546,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 7.564568996429443,
"min": 0.43205004930496216,
"max": 15.64229965209961,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8609291911125183,
"min": 0.004691948648542166,
"max": 1.414363145828247,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 14.635796546936035,
"min": 0.07037922739982605,
"max": 22.629810333251953,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 8.836806314833025,
"min": -0.44562405347824097,
"max": 16.965223222970963,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 150.2257073521614,
"min": -6.6843608021736145,
"max": 271.4435715675354,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 1.3413477364708395,
"min": 0.0,
"max": 13.342656230926513,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 22.802911520004272,
"min": 0.0,
"max": 200.1398434638977,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 7.953125378664802,
"min": -0.40106213092803955,
"max": 15.268700160086155,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 135.20313143730164,
"min": -6.015931963920593,
"max": 244.29920256137848,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.030002704394670825,
"min": 0.014978367214401564,
"max": 0.03146391699987429,
"count": 136
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.030002704394670825,
"min": 0.014978367214401564,
"max": 0.03146391699987429,
"count": 136
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.840554258475701,
"min": 0.06528064042019348,
"max": 2.2490536024173102,
"count": 136
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.840554258475701,
"min": 0.06528064042019348,
"max": 2.2490536024173102,
"count": 136
},
"Agent.Policy.LearningRate.mean": {
"value": 2.166099278000001e-06,
"min": 2.166099278000001e-06,
"max": 0.00029780325073225,
"count": 136
},
"Agent.Policy.LearningRate.sum": {
"value": 2.166099278000001e-06,
"min": 2.166099278000001e-06,
"max": 0.00029780325073225,
"count": 136
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10072200000000002,
"min": 0.10072200000000002,
"max": 0.19926775000000005,
"count": 136
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10072200000000002,
"min": 0.10072200000000002,
"max": 0.19926775000000005,
"count": 136
},
"Agent.Policy.Beta.mean": {
"value": 4.602780000000003e-05,
"min": 4.602780000000003e-05,
"max": 0.004963460725,
"count": 136
},
"Agent.Policy.Beta.sum": {
"value": 4.602780000000003e-05,
"min": 4.602780000000003e-05,
"max": 0.004963460725,
"count": 136
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.03763703163713217,
"min": 0.027854387337962788,
"max": 0.6027635087569555,
"count": 136
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.03763703163713217,
"min": 0.027854387337962788,
"max": 0.6027635087569555,
"count": 136
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.8167990893125534,
"min": 1.774645984172821,
"max": 3.315477500359217,
"count": 136
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.8167990893125534,
"min": 1.774645984172821,
"max": 3.315477500359217,
"count": 136
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717353723",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_8_task_0_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_8_task_0_run_id_1_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717357352"
},
"total": 3629.1167372,
"count": 1,
"self": 0.28471369999988383,
"children": {
"run_training.setup": {
"total": 0.053523600000000005,
"count": 1,
"self": 0.053523600000000005
},
"TrainerController.start_learning": {
"total": 3628.7784999,
"count": 1,
"self": 7.053749699939544,
"children": {
"TrainerController._reset_env": {
"total": 2.1039557999999996,
"count": 1,
"self": 2.1039557999999996
},
"TrainerController.advance": {
"total": 3619.46621800006,
"count": 401057,
"self": 6.27432409998346,
"children": {
"env_step": {
"total": 3613.1918939000766,
"count": 401057,
"self": 1716.335633299859,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1892.4066291001172,
"count": 401057,
"self": 12.080070800110434,
"children": {
"TorchPolicy.evaluate": {
"total": 1880.3265583000068,
"count": 400130,
"self": 1880.3265583000068
}
}
},
"workers": {
"total": 4.449631500100336,
"count": 401057,
"self": 0.0,
"children": {
"worker_root": {
"total": 3620.3573454999214,
"count": 401057,
"is_parallel": true,
"self": 2128.002488199925,
"children": {
"steps_from_proto": {
"total": 0.00651540000000006,
"count": 1,
"is_parallel": true,
"self": 0.00010280000000029155,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006360799999999944,
"count": 2,
"is_parallel": true,
"self": 3.0299999999927607e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006330500000000017,
"count": 3,
"is_parallel": true,
"self": 3.520000000012402e-05,
"children": {
"process_pixels": {
"total": 0.006295299999999893,
"count": 3,
"is_parallel": true,
"self": 0.00021969999999993384,
"children": {
"image_decompress": {
"total": 0.006075599999999959,
"count": 3,
"is_parallel": true,
"self": 0.006075599999999959
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 5.179999999982421e-05,
"count": 2,
"is_parallel": true,
"self": 5.179999999982421e-05
}
}
},
"UnityEnvironment.step": {
"total": 1492.3483418999965,
"count": 401057,
"is_parallel": true,
"self": 18.510564800082648,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.45156019999186,
"count": 401057,
"is_parallel": true,
"self": 18.45156019999186
},
"communicator.exchange": {
"total": 1305.0499987000107,
"count": 401057,
"is_parallel": true,
"self": 1305.0499987000107
},
"steps_from_proto": {
"total": 150.33621819991117,
"count": 401057,
"is_parallel": true,
"self": 30.017820899916373,
"children": {
"_process_maybe_compressed_observation": {
"total": 107.88746859990474,
"count": 802114,
"is_parallel": true,
"self": 7.900561099978887,
"children": {
"_observation_to_np_array": {
"total": 99.98690749992585,
"count": 1203498,
"is_parallel": true,
"self": 7.499504800025932,
"children": {
"process_pixels": {
"total": 92.48740269989992,
"count": 1203498,
"is_parallel": true,
"self": 42.883510800188546,
"children": {
"image_decompress": {
"total": 49.60389189971137,
"count": 1203498,
"is_parallel": true,
"self": 49.60389189971137
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 12.430928700090066,
"count": 802114,
"is_parallel": true,
"self": 12.430928700090066
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.8200000087963417e-05,
"count": 1,
"self": 2.8200000087963417e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3624.809459400039,
"count": 179664,
"is_parallel": true,
"self": 5.413166100085618,
"children": {
"process_trajectory": {
"total": 2921.0421439999573,
"count": 179664,
"is_parallel": true,
"self": 2920.6248989999576,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4172449999998662,
"count": 2,
"is_parallel": true,
"self": 0.4172449999998662
}
}
},
"_update_policy": {
"total": 698.3541492999962,
"count": 136,
"is_parallel": true,
"self": 471.4117506999935,
"children": {
"TorchPPOOptimizer.update": {
"total": 226.9423986000027,
"count": 3396,
"is_parallel": true,
"self": 226.9423986000027
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1545482000001357,
"count": 1,
"self": 0.006546300000081828,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14800190000005387,
"count": 1,
"self": 0.14800190000005387
}
}
}
}
}
}
}