philippds's picture
Upload 13 files
d7f7a31 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.4383124113082886,
"min": 1.4189385175704956,
"max": 1.4411534070968628,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8634.189453125,
"min": 7150.2314453125,
"max": 10017.78515625,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 51.93333333333333,
"min": 0.0,
"max": 685.3333333333334,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 779.0,
"min": 0.0,
"max": 10280.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.6,
"min": 0.4,
"max": 0.8,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 9.0,
"min": 6.0,
"max": 15.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 399.0,
"min": 276.6666666666667,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5985.0,
"min": 4947.0,
"max": 6960.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199960.0,
"min": 5600.0,
"max": 1199960.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199960.0,
"min": 5600.0,
"max": 1199960.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.18753686547279358,
"min": 0.029313333332538605,
"max": 0.9013132452964783,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 2.8130528926849365,
"min": 0.43970000743865967,
"max": 15.387266159057617,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.3113268613815308,
"min": -0.2714996933937073,
"max": 3.178737163543701,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 19.669902801513672,
"min": -4.615494728088379,
"max": 63.57474136352539,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 12.308501215775808,
"min": -1.065999984741211,
"max": 29.166904857009648,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 184.62751823663712,
"min": -15.989999771118164,
"max": 583.338097140193,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.7823496143023173,
"min": 0.0,
"max": 14.209809799989065,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 11.73524421453476,
"min": 0.0,
"max": 255.77657639980316,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 11.077649823824565,
"min": -0.959400208791097,
"max": 26.25020624976605,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 166.16474735736847,
"min": -14.391003131866455,
"max": 525.004124995321,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.019534885805721085,
"min": 0.01461618389779081,
"max": 0.03142788942204788,
"count": 139
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.019534885805721085,
"min": 0.01461618389779081,
"max": 0.03142788942204788,
"count": 139
},
"Agent.Losses.ValueLoss.mean": {
"value": 7.590193698803584,
"min": 0.009250369990089288,
"max": 22.191834847132366,
"count": 139
},
"Agent.Losses.ValueLoss.sum": {
"value": 7.590193698803584,
"min": 0.009250369990089288,
"max": 22.191834847132366,
"count": 139
},
"Agent.Policy.LearningRate.mean": {
"value": 1.7100994300000114e-06,
"min": 1.7100994300000114e-06,
"max": 0.0002979000007,
"count": 139
},
"Agent.Policy.LearningRate.sum": {
"value": 1.7100994300000114e-06,
"min": 1.7100994300000114e-06,
"max": 0.0002979000007,
"count": 139
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10057,
"min": 0.10057,
"max": 0.1993,
"count": 139
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10057,
"min": 0.10057,
"max": 0.1993,
"count": 139
},
"Agent.Policy.Beta.mean": {
"value": 3.8443000000000194e-05,
"min": 3.8443000000000194e-05,
"max": 0.00496507,
"count": 139
},
"Agent.Policy.Beta.sum": {
"value": 3.8443000000000194e-05,
"min": 3.8443000000000194e-05,
"max": 0.00496507,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.018805414671078324,
"min": 0.018531922328596313,
"max": 0.5835292177895705,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.018805414671078324,
"min": 0.018531922328596313,
"max": 0.5835292177895705,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 2.063263719280561,
"min": 2.063263719280561,
"max": 3.310828685760498,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 2.063263719280561,
"min": 2.063263719280561,
"max": 3.310828685760498,
"count": 139
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717363623",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_8_task_2_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_8_task_2_run_id_1_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717367556"
},
"total": 3932.6368317,
"count": 1,
"self": 0.33526570000049105,
"children": {
"run_training.setup": {
"total": 0.05411140000000003,
"count": 1,
"self": 0.05411140000000003
},
"TrainerController.start_learning": {
"total": 3932.2474546,
"count": 1,
"self": 6.611995300156195,
"children": {
"TrainerController._reset_env": {
"total": 2.1895862,
"count": 1,
"self": 2.1895862
},
"TrainerController.advance": {
"total": 3923.2920489998437,
"count": 401018,
"self": 6.266614899957403,
"children": {
"env_step": {
"total": 3917.0254340998863,
"count": 401018,
"self": 1772.5127145997953,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2140.1856635000786,
"count": 401018,
"self": 12.527107300129956,
"children": {
"TorchPolicy.evaluate": {
"total": 2127.6585561999486,
"count": 400127,
"self": 2127.6585561999486
}
}
},
"workers": {
"total": 4.32705600001256,
"count": 401018,
"self": 0.0,
"children": {
"worker_root": {
"total": 3923.3867241998464,
"count": 401018,
"is_parallel": true,
"self": 2392.168544599982,
"children": {
"steps_from_proto": {
"total": 0.006626400000000032,
"count": 1,
"is_parallel": true,
"self": 0.00011140000000020578,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006463200000000002,
"count": 2,
"is_parallel": true,
"self": 3.4400000000101016e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006428799999999901,
"count": 3,
"is_parallel": true,
"self": 3.099999999967018e-05,
"children": {
"process_pixels": {
"total": 0.006397800000000231,
"count": 3,
"is_parallel": true,
"self": 0.0002532000000001755,
"children": {
"image_decompress": {
"total": 0.006144600000000056,
"count": 3,
"is_parallel": true,
"self": 0.006144600000000056
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 5.179999999982421e-05,
"count": 2,
"is_parallel": true,
"self": 5.179999999982421e-05
}
}
},
"UnityEnvironment.step": {
"total": 1531.2115531998643,
"count": 401018,
"is_parallel": true,
"self": 21.651849299717014,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.908498600078183,
"count": 401018,
"is_parallel": true,
"self": 22.908498600078183
},
"communicator.exchange": {
"total": 1309.0341702000505,
"count": 401018,
"is_parallel": true,
"self": 1309.0341702000505
},
"steps_from_proto": {
"total": 177.61703510001837,
"count": 401018,
"is_parallel": true,
"self": 35.42022049981796,
"children": {
"_process_maybe_compressed_observation": {
"total": 126.86174580010967,
"count": 802036,
"is_parallel": true,
"self": 9.973506800447524,
"children": {
"_observation_to_np_array": {
"total": 116.88823899966215,
"count": 1203663,
"is_parallel": true,
"self": 9.71708909982108,
"children": {
"process_pixels": {
"total": 107.17114989984107,
"count": 1203663,
"is_parallel": true,
"self": 50.424138200065926,
"children": {
"image_decompress": {
"total": 56.74701169977514,
"count": 1203663,
"is_parallel": true,
"self": 56.74701169977514
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 15.335068800090731,
"count": 802036,
"is_parallel": true,
"self": 15.335068800090731
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.5999999706982635e-05,
"count": 1,
"self": 2.5999999706982635e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3927.738197499958,
"count": 191335,
"is_parallel": true,
"self": 6.626278599947,
"children": {
"process_trajectory": {
"total": 3127.11194970001,
"count": 191335,
"is_parallel": true,
"self": 3126.65457470001,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45737499999995634,
"count": 2,
"is_parallel": true,
"self": 0.45737499999995634
}
}
},
"_update_policy": {
"total": 793.9999692000013,
"count": 139,
"is_parallel": true,
"self": 530.8008843000057,
"children": {
"TorchPPOOptimizer.update": {
"total": 263.1990848999955,
"count": 3354,
"is_parallel": true,
"self": 263.1990848999955
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15379810000013094,
"count": 1,
"self": 0.007437400000526395,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14636069999960455,
"count": 1,
"self": 0.14636069999960455
}
}
}
}
}
}
}