{ "name": "root", "gauges": { "Agent.Policy.Entropy.mean": { "value": 1.3061087131500244, "min": 1.3061087131500244, "max": 1.4217795133590698, "count": 200 }, "Agent.Policy.Entropy.sum": { "value": 7444.8193359375, "min": 6594.3779296875, "max": 9651.1162109375, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.mean": { "value": 0.015625, "min": 0.0, "max": 0.23076923076923078, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.sum": { "value": 3.0, "min": 0.0, "max": 9.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.EpisodeLength.mean": { "value": 28.796875, "min": 19.07070707070707, "max": 244.22222222222223, "count": 200 }, "Agent.Environment.EpisodeLength.sum": { "value": 5529.0, "min": 4677.0, "max": 6963.0, "count": 200 }, "Agent.Step.mean": { "value": 1199988.0, "min": 5913.0, "max": 1199988.0, "count": 200 }, "Agent.Step.sum": { "value": 1199988.0, "min": 5913.0, "max": 1199988.0, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.mean": { "value": 0.5123575925827026, "min": -0.003072895808145404, "max": 1.1557989120483398, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.sum": { "value": 97.34794616699219, "min": -0.09525977075099945, "max": 296.294921875, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.mean": { "value": 73.4503173828125, "min": 0.07052389532327652, "max": 81.7844467163086, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.sum": { "value": 13955.560546875, "min": 1.7630974054336548, "max": 23572.26171875, "count": 200 }, "Agent.Environment.CumulativeReward.mean": { "value": 94.65837076325091, "min": 71.42488220405693, "max": 107.31066000690826, "count": 200 }, "Agent.Environment.CumulativeReward.sum": { "value": 17985.090445017675, "min": 1884.4158946573734, "max": 29685.348208050244, "count": 200 }, "Agent.Policy.CuriosityReward.mean": { "value": 0.08526155473603132, "min": 0.0, "max": 5.707144324596111, "count": 200 }, "Agent.Policy.CuriosityReward.sum": { "value": 16.19969539984595, "min": 0.0, "max": 222.57862865924835, "count": 200 }, "Agent.Policy.ExtrinsicReward.mean": { "value": 85.19253195627458, "min": 64.28239143181305, "max": 96.57959257448331, "count": 200 }, "Agent.Policy.ExtrinsicReward.sum": { "value": 16186.58107169217, "min": 1695.9742895364761, "max": 26716.812616853043, "count": 200 }, "Agent.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.Losses.PolicyLoss.mean": { "value": 0.02436118039380138, "min": 0.014788141299504787, "max": 0.03415193311714878, "count": 144 }, "Agent.Losses.PolicyLoss.sum": { "value": 0.02436118039380138, "min": 0.014788141299504787, "max": 0.03415193311714878, "count": 144 }, "Agent.Losses.ValueLoss.mean": { "value": 1433.8892110188801, "min": 323.4534556070964, "max": 2381.5792338053384, "count": 144 }, "Agent.Losses.ValueLoss.sum": { "value": 1433.8892110188801, "min": 323.4534556070964, "max": 2381.5792338053384, "count": 144 }, "Agent.Policy.LearningRate.mean": { "value": 1.199349600249996e-06, "min": 1.199349600249996e-06, "max": 0.00029789475070175, "count": 144 }, "Agent.Policy.LearningRate.sum": { "value": 1.199349600249996e-06, "min": 1.199349600249996e-06, "max": 0.00029789475070175, "count": 144 }, "Agent.Policy.Epsilon.mean": { "value": 0.10039975000000001, "min": 0.10039975000000001, "max": 0.19929825, "count": 144 }, "Agent.Policy.Epsilon.sum": { "value": 0.10039975000000001, "min": 0.10039975000000001, "max": 0.19929825, "count": 144 }, "Agent.Policy.Beta.mean": { "value": 2.9947524999999938e-05, "min": 2.9947524999999938e-05, "max": 0.004964982675, "count": 144 }, "Agent.Policy.Beta.sum": { "value": 2.9947524999999938e-05, "min": 2.9947524999999938e-05, "max": 0.004964982675, "count": 144 }, "Agent.Losses.CuriosityForwardLoss.mean": { "value": 0.027251241961494088, "min": 0.019890636671334505, "max": 0.5723420046269894, "count": 144 }, "Agent.Losses.CuriosityForwardLoss.sum": { "value": 0.027251241961494088, "min": 0.019890636671334505, "max": 0.5723420046269894, "count": 144 }, "Agent.Losses.CuriosityInverseLoss.mean": { "value": 1.4139881233374278, "min": 1.2391985456148784, "max": 3.305937925974528, "count": 144 }, "Agent.Losses.CuriosityInverseLoss.sum": { "value": 1.4139881233374278, "min": 1.2391985456148784, "max": 3.305937925974528, "count": 144 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717243111", "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train --base-port 5007", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.7.1+cu110", "numpy_version": "1.21.0", "end_time_seconds": "1717248803" }, "total": 5692.3659379, "count": 1, "self": 1.7120566000003237, "children": { "run_training.setup": { "total": 0.051362200000000025, "count": 1, "self": 0.051362200000000025 }, "TrainerController.start_learning": { "total": 5690.6025191, "count": 1, "self": 5.72295740008758, "children": { "TrainerController._reset_env": { "total": 1.9943673, "count": 1, "self": 1.9943673 }, "TrainerController.advance": { "total": 5682.726238999912, "count": 410620, "self": 5.277235299933636, "children": { "env_step": { "total": 5677.449003699979, "count": 410620, "self": 3618.1533793998547, "children": { "SubprocessEnvManager._take_step": { "total": 2055.5891080000556, "count": 410620, "self": 10.855453400108672, "children": { "TorchPolicy.evaluate": { "total": 2044.733654599947, "count": 400011, "self": 2044.733654599947 } } }, "workers": { "total": 3.7065163000684045, "count": 410620, "self": 0.0, "children": { "worker_root": { "total": 5683.471061399958, "count": 410620, "is_parallel": true, "self": 2355.185482599993, "children": { "steps_from_proto": { "total": 0.006364299999999989, "count": 1, "is_parallel": true, "self": 0.00010039999999977844, "children": { "_process_maybe_compressed_observation": { "total": 0.00621930000000015, "count": 2, "is_parallel": true, "self": 2.8200000000033754e-05, "children": { "_observation_to_np_array": { "total": 0.006191100000000116, "count": 3, "is_parallel": true, "self": 2.9800000000301807e-05, "children": { "process_pixels": { "total": 0.006161299999999814, "count": 3, "is_parallel": true, "self": 0.00023290000000009137, "children": { "image_decompress": { "total": 0.005928399999999723, "count": 3, "is_parallel": true, "self": 0.005928399999999723 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 4.460000000006126e-05, "count": 2, "is_parallel": true, "self": 4.460000000006126e-05 } } }, "UnityEnvironment.step": { "total": 3328.279214499965, "count": 410620, "is_parallel": true, "self": 19.825185299910117, "children": { "UnityEnvironment._generate_step_input": { "total": 19.60430590010752, "count": 410620, "is_parallel": true, "self": 19.60430590010752 }, "communicator.exchange": { "total": 3146.9941028000803, "count": 410620, "is_parallel": true, "self": 3146.9941028000803 }, "steps_from_proto": { "total": 141.85562049986675, "count": 410620, "is_parallel": true, "self": 28.017311199988725, "children": { "_process_maybe_compressed_observation": { "total": 101.8772321999369, "count": 821240, "is_parallel": true, "self": 8.001321499805357, "children": { "_observation_to_np_array": { "total": 93.87591070013154, "count": 1239717, "is_parallel": true, "self": 7.759124300138225, "children": { "process_pixels": { "total": 86.11678639999332, "count": 1239717, "is_parallel": true, "self": 40.50604840019888, "children": { "image_decompress": { "total": 45.61073799979444, "count": 1239717, "is_parallel": true, "self": 45.61073799979444 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 11.96107709994112, "count": 821240, "is_parallel": true, "self": 11.96107709994112 } } } } } } } } } } } } }, "trainer_threads": { "total": 3.680000008898787e-05, "count": 1, "self": 3.680000008898787e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 5685.509467500165, "count": 281719, "is_parallel": true, "self": 6.973894799978552, "children": { "process_trajectory": { "total": 4957.248087100189, "count": 281719, "is_parallel": true, "self": 4956.869032000189, "children": { "RLTrainer._checkpoint": { "total": 0.37905510000018694, "count": 2, "is_parallel": true, "self": 0.37905510000018694 } } }, "_update_policy": { "total": 721.2874855999976, "count": 144, "is_parallel": true, "self": 483.4252829999915, "children": { "TorchPPOOptimizer.update": { "total": 237.86220260000604, "count": 3456, "is_parallel": true, "self": 237.86220260000604 } } } } } } } } }, "TrainerController._save_models": { "total": 0.15891860000010638, "count": 1, "self": 0.005721599999560567, "children": { "RLTrainer._checkpoint": { "total": 0.1531970000005458, "count": 1, "self": 0.1531970000005458 } } } } } } }