philippds's picture
Upload 13 files
9468a23 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.2907824516296387,
"min": 1.2907824516296387,
"max": 1.4201821088790894,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8151.29150390625,
"min": 6298.5830078125,
"max": 9797.4736328125,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.04316546762589928,
"min": 0.0,
"max": 0.36363636363636365,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 6.0,
"min": 0.0,
"max": 12.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 45.733333333333334,
"min": 19.84375,
"max": 244.88888888888889,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 6174.0,
"min": 4710.0,
"max": 6960.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199921.0,
"min": 5913.0,
"max": 1199921.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199921.0,
"min": 5913.0,
"max": 1199921.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.29551228880882263,
"min": -0.007990839891135693,
"max": 1.0560173988342285,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 39.89415740966797,
"min": -1.4223694801330566,
"max": 281.7090759277344,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 70.44039154052734,
"min": 0.07052389532327652,
"max": 79.12924194335938,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 9509.453125,
"min": 1.7630974054336548,
"max": 21766.724609375,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 95.44701277705047,
"min": 59.586239498002186,
"max": 102.03421319071408,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 12885.346724901814,
"min": 1585.19285325706,
"max": 29985.52858169796,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.12004314522362418,
"min": 0.0,
"max": 6.403408001127996,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 16.205824605189264,
"min": 0.0,
"max": 243.32950404286385,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 85.9023087618338,
"min": 53.62761391933475,
"max": 91.83079028555325,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 11596.811682847561,
"min": 1426.673609673977,
"max": 26986.9748427975,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.02089279591261099,
"min": 0.012899611882554987,
"max": 0.03436816683582341,
"count": 143
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.02089279591261099,
"min": 0.012899611882554987,
"max": 0.03436816683582341,
"count": 143
},
"Agent.Losses.ValueLoss.mean": {
"value": 1614.5251719156902,
"min": 202.1668799718221,
"max": 2360.8997090657554,
"count": 143
},
"Agent.Losses.ValueLoss.sum": {
"value": 1614.5251719156902,
"min": 202.1668799718221,
"max": 2360.8997090657554,
"count": 143
},
"Agent.Policy.LearningRate.mean": {
"value": 1.3718495427499938e-06,
"min": 1.3718495427499938e-06,
"max": 0.00029789475070175,
"count": 143
},
"Agent.Policy.LearningRate.sum": {
"value": 1.3718495427499938e-06,
"min": 1.3718495427499938e-06,
"max": 0.00029789475070175,
"count": 143
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10045725,
"min": 0.10045725,
"max": 0.19929825,
"count": 143
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10045725,
"min": 0.10045725,
"max": 0.19929825,
"count": 143
},
"Agent.Policy.Beta.mean": {
"value": 3.28167749999999e-05,
"min": 3.28167749999999e-05,
"max": 0.004964982675,
"count": 143
},
"Agent.Policy.Beta.sum": {
"value": 3.28167749999999e-05,
"min": 3.28167749999999e-05,
"max": 0.004964982675,
"count": 143
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.022857981811588008,
"min": 0.021707437466830015,
"max": 0.5723420046269894,
"count": 143
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.022857981811588008,
"min": 0.021707437466830015,
"max": 0.5723420046269894,
"count": 143
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.3564641028642654,
"min": 1.2317822674910228,
"max": 3.305937925974528,
"count": 143
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.3564641028642654,
"min": 1.2317822674910228,
"max": 3.305937925974528,
"count": 143
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717357353",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_8_task_1_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_8_task_1_run_id_1_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717363622"
},
"total": 6269.2732442,
"count": 1,
"self": 3.3238776999996844,
"children": {
"run_training.setup": {
"total": 0.05461230000000006,
"count": 1,
"self": 0.05461230000000006
},
"TrainerController.start_learning": {
"total": 6265.8947542,
"count": 1,
"self": 7.407852700154763,
"children": {
"TrainerController._reset_env": {
"total": 2.1450293,
"count": 1,
"self": 2.1450293
},
"TrainerController.advance": {
"total": 6256.090983499846,
"count": 410255,
"self": 7.047772699621419,
"children": {
"env_step": {
"total": 6249.043210800224,
"count": 410255,
"self": 3945.614399300365,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2298.5497327999533,
"count": 410255,
"self": 12.924030499980745,
"children": {
"TorchPolicy.evaluate": {
"total": 2285.6257022999725,
"count": 400115,
"self": 2285.6257022999725
}
}
},
"workers": {
"total": 4.879078699905563,
"count": 410255,
"self": 0.0,
"children": {
"worker_root": {
"total": 6256.259584099772,
"count": 410255,
"is_parallel": true,
"self": 2683.3526290995705,
"children": {
"steps_from_proto": {
"total": 0.006434500000000121,
"count": 1,
"is_parallel": true,
"self": 0.00010860000000012526,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006280299999999794,
"count": 2,
"is_parallel": true,
"self": 3.149999999974007e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006248800000000054,
"count": 3,
"is_parallel": true,
"self": 2.990000000013815e-05,
"children": {
"process_pixels": {
"total": 0.006218899999999916,
"count": 3,
"is_parallel": true,
"self": 0.00022369999999982682,
"children": {
"image_decompress": {
"total": 0.005995200000000089,
"count": 3,
"is_parallel": true,
"self": 0.005995200000000089
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.5600000000201035e-05,
"count": 2,
"is_parallel": true,
"self": 4.5600000000201035e-05
}
}
},
"UnityEnvironment.step": {
"total": 3572.9005205002013,
"count": 410255,
"is_parallel": true,
"self": 25.289790000015273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.783056700118195,
"count": 410255,
"is_parallel": true,
"self": 23.783056700118195
},
"communicator.exchange": {
"total": 3333.86441219992,
"count": 410255,
"is_parallel": true,
"self": 3333.86441219992
},
"steps_from_proto": {
"total": 189.96326160014746,
"count": 410255,
"is_parallel": true,
"self": 37.51387799958056,
"children": {
"_process_maybe_compressed_observation": {
"total": 136.24381189993596,
"count": 820510,
"is_parallel": true,
"self": 10.865884400393668,
"children": {
"_observation_to_np_array": {
"total": 125.3779274995423,
"count": 1238316,
"is_parallel": true,
"self": 10.506369699743033,
"children": {
"process_pixels": {
"total": 114.87155779979926,
"count": 1238316,
"is_parallel": true,
"self": 54.45053659963225,
"children": {
"image_decompress": {
"total": 60.42102120016701,
"count": 1238316,
"is_parallel": true,
"self": 60.42102120016701
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 16.20557170063095,
"count": 820510,
"is_parallel": true,
"self": 16.20557170063095
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.889999970851932e-05,
"count": 1,
"self": 3.889999970851932e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 6260.071211400152,
"count": 306706,
"is_parallel": true,
"self": 9.574552300050527,
"children": {
"process_trajectory": {
"total": 5436.687501000099,
"count": 306706,
"is_parallel": true,
"self": 5436.110636700099,
"children": {
"RLTrainer._checkpoint": {
"total": 0.57686429999967,
"count": 2,
"is_parallel": true,
"self": 0.57686429999967
}
}
},
"_update_policy": {
"total": 813.8091581000028,
"count": 143,
"is_parallel": true,
"self": 548.5334012999971,
"children": {
"TorchPPOOptimizer.update": {
"total": 265.27575680000575,
"count": 3438,
"is_parallel": true,
"self": 265.27575680000575
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.25084979999974166,
"count": 1,
"self": 0.007685399999900255,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2431643999998414,
"count": 1,
"self": 0.2431643999998414
}
}
}
}
}
}
}