|
{
|
|
"name": "root",
|
|
"gauges": {
|
|
"Agent.Policy.Entropy.mean": {
|
|
"value": 3.915287971496582,
|
|
"min": 3.8286855220794678,
|
|
"max": 4.388269424438477,
|
|
"count": 100
|
|
},
|
|
"Agent.Policy.Entropy.sum": {
|
|
"value": 14165.51171875,
|
|
"min": 8352.5712890625,
|
|
"max": 29077.494140625,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.IndividualResourceCount.mean": {
|
|
"value": 0.900000093711747,
|
|
"min": 0.5527778027786149,
|
|
"max": 0.9000000953674316,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.IndividualResourceCount.sum": {
|
|
"value": 16.200001686811447,
|
|
"min": 7.100000619888306,
|
|
"max": 38.90000382065773,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.RewardforMovingResourcestoNeighbours.mean": {
|
|
"value": 1.427949388175168,
|
|
"min": 0.8593527575241864,
|
|
"max": 13.473621871736315,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.RewardforMovingResourcestoNeighbours.sum": {
|
|
"value": 25.703088987153023,
|
|
"min": 20.61485666036606,
|
|
"max": 331.6421630382538,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.RewardforMovingResourcestoSelf.mean": {
|
|
"value": 245.42562950981988,
|
|
"min": 39.79780564043257,
|
|
"max": 619.4954562717014,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.RewardforMovingResourcestoSelf.sum": {
|
|
"value": 4417.661331176758,
|
|
"min": 1432.7210030555725,
|
|
"max": 7888.533269882202,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.CollectivePerformance.mean": {
|
|
"value": 56.38962088690864,
|
|
"min": 25.348989952935113,
|
|
"max": 134.0869344075521,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.CollectivePerformance.sum": {
|
|
"value": 1015.0131759643555,
|
|
"min": 597.5732574462891,
|
|
"max": 1808.8400115966797,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.IndividualPerformance.mean": {
|
|
"value": 28.600747956169975,
|
|
"min": 13.09142631954617,
|
|
"max": 72.36796273125543,
|
|
"count": 100
|
|
},
|
|
"Agent.WildfireResourceManagement.IndividualPerformance.sum": {
|
|
"value": 514.8134632110596,
|
|
"min": 316.33687686920166,
|
|
"max": 930.763605594635,
|
|
"count": 100
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 100
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 100
|
|
},
|
|
"Agent.Environment.LessonNumber.task.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 100
|
|
},
|
|
"Agent.Environment.LessonNumber.task.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 100
|
|
},
|
|
"Agent.Environment.EpisodeLength.mean": {
|
|
"value": 200.5,
|
|
"min": 99.6,
|
|
"max": 454.0,
|
|
"count": 100
|
|
},
|
|
"Agent.Environment.EpisodeLength.sum": {
|
|
"value": 3609.0,
|
|
"min": 1971.0,
|
|
"max": 6993.0,
|
|
"count": 100
|
|
},
|
|
"Agent.Step.mean": {
|
|
"value": 449967.0,
|
|
"min": 4460.0,
|
|
"max": 449967.0,
|
|
"count": 100
|
|
},
|
|
"Agent.Step.sum": {
|
|
"value": 449967.0,
|
|
"min": 4460.0,
|
|
"max": 449967.0,
|
|
"count": 100
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.mean": {
|
|
"value": 0.007648792117834091,
|
|
"min": -0.037885017693042755,
|
|
"max": 0.058897458016872406,
|
|
"count": 100
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.sum": {
|
|
"value": 0.13002946972846985,
|
|
"min": -0.7198153138160706,
|
|
"max": 1.5313339233398438,
|
|
"count": 100
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.mean": {
|
|
"value": 79.99057006835938,
|
|
"min": 1.986396312713623,
|
|
"max": 122.08625793457031,
|
|
"count": 100
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.sum": {
|
|
"value": 1359.8397216796875,
|
|
"min": 49.659908294677734,
|
|
"max": 3734.32177734375,
|
|
"count": 100
|
|
},
|
|
"Agent.Environment.CumulativeReward.mean": {
|
|
"value": 337.52348866182217,
|
|
"min": 95.12189712524415,
|
|
"max": 545.2072296142578,
|
|
"count": 100
|
|
},
|
|
"Agent.Environment.CumulativeReward.sum": {
|
|
"value": 5737.899307250977,
|
|
"min": 2378.0474281311035,
|
|
"max": 9027.143272399902,
|
|
"count": 100
|
|
},
|
|
"Agent.Policy.CuriosityReward.mean": {
|
|
"value": 0.0344109117765637,
|
|
"min": 0.018234928297859273,
|
|
"max": 0.3611468697587649,
|
|
"count": 100
|
|
},
|
|
"Agent.Policy.CuriosityReward.sum": {
|
|
"value": 0.5849855002015829,
|
|
"min": 0.4833681769669056,
|
|
"max": 5.4172030463814735,
|
|
"count": 100
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.mean": {
|
|
"value": 337.52348866182217,
|
|
"min": 95.12189712524415,
|
|
"max": 545.2072296142578,
|
|
"count": 100
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.sum": {
|
|
"value": 5737.899307250977,
|
|
"min": 2378.0474281311035,
|
|
"max": 9027.143272399902,
|
|
"count": 100
|
|
},
|
|
"Agent.Losses.PolicyLoss.mean": {
|
|
"value": 0.06865449139139485,
|
|
"min": 0.05436178324486788,
|
|
"max": 0.088415523402785,
|
|
"count": 99
|
|
},
|
|
"Agent.Losses.PolicyLoss.sum": {
|
|
"value": 0.1373089827827897,
|
|
"min": 0.058083717240601455,
|
|
"max": 0.15414777564144722,
|
|
"count": 99
|
|
},
|
|
"Agent.Losses.ValueLoss.mean": {
|
|
"value": 505.428675700849,
|
|
"min": 69.4129291436611,
|
|
"max": 979.6034150965073,
|
|
"count": 99
|
|
},
|
|
"Agent.Losses.ValueLoss.sum": {
|
|
"value": 1010.857351401698,
|
|
"min": 69.4129291436611,
|
|
"max": 1746.0488988430955,
|
|
"count": 99
|
|
},
|
|
"Agent.Policy.LearningRate.mean": {
|
|
"value": 8.670997110000013e-07,
|
|
"min": 8.670997110000013e-07,
|
|
"max": 0.00029775000075000005,
|
|
"count": 99
|
|
},
|
|
"Agent.Policy.LearningRate.sum": {
|
|
"value": 1.7341994220000026e-06,
|
|
"min": 1.7341994220000026e-06,
|
|
"max": 0.0005801040066320001,
|
|
"count": 99
|
|
},
|
|
"Agent.Policy.Epsilon.mean": {
|
|
"value": 0.10028899999999999,
|
|
"min": 0.10028899999999999,
|
|
"max": 0.19924999999999993,
|
|
"count": 99
|
|
},
|
|
"Agent.Policy.Epsilon.sum": {
|
|
"value": 0.20057799999999998,
|
|
"min": 0.10254,
|
|
"max": 0.39336799999999994,
|
|
"count": 99
|
|
},
|
|
"Agent.Policy.Beta.mean": {
|
|
"value": 3.887110000000003e-05,
|
|
"min": 3.887110000000003e-05,
|
|
"max": 0.009925074999999997,
|
|
"count": 99
|
|
},
|
|
"Agent.Policy.Beta.sum": {
|
|
"value": 7.774220000000006e-05,
|
|
"min": 7.774220000000006e-05,
|
|
"max": 0.0193374632,
|
|
"count": 99
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.mean": {
|
|
"value": 0.005907778681916601,
|
|
"min": 0.005171077248329918,
|
|
"max": 0.3516008852957151,
|
|
"count": 99
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.sum": {
|
|
"value": 0.011815557363833202,
|
|
"min": 0.005171077248329918,
|
|
"max": 0.3516008852957151,
|
|
"count": 99
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.mean": {
|
|
"value": 3.8244420469550953,
|
|
"min": 3.732571636504243,
|
|
"max": 4.390753862185356,
|
|
"count": 99
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.sum": {
|
|
"value": 7.6488840939101905,
|
|
"min": 3.736011778866803,
|
|
"max": 8.560909711796306,
|
|
"count": 99
|
|
},
|
|
"Agent.IsTraining.mean": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 100
|
|
},
|
|
"Agent.IsTraining.sum": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 100
|
|
}
|
|
},
|
|
"metadata": {
|
|
"timer_format_version": "0.1.0",
|
|
"start_time_seconds": "1716667366",
|
|
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
|
|
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/WildfireResourceManagement_difficulty_7_task_1_run_id_0_train.yaml --run-id=WildfireResourceManagement/train/WildfireResourceManagement_difficulty_7_task_1_run_id_0_train --base-port 5006",
|
|
"mlagents_version": "0.30.0",
|
|
"mlagents_envs_version": "0.30.0",
|
|
"communication_protocol_version": "1.5.0",
|
|
"pytorch_version": "1.7.1+cu110",
|
|
"numpy_version": "1.21.0",
|
|
"end_time_seconds": "1716669282"
|
|
},
|
|
"total": 1916.3543140000002,
|
|
"count": 1,
|
|
"self": 0.20027280000022074,
|
|
"children": {
|
|
"run_training.setup": {
|
|
"total": 0.0532956,
|
|
"count": 1,
|
|
"self": 0.0532956
|
|
},
|
|
"TrainerController.start_learning": {
|
|
"total": 1916.1007456,
|
|
"count": 1,
|
|
"self": 1.2124744000045666,
|
|
"children": {
|
|
"TrainerController._reset_env": {
|
|
"total": 2.1096662,
|
|
"count": 1,
|
|
"self": 2.1096662
|
|
},
|
|
"TrainerController.advance": {
|
|
"total": 1912.6821870999956,
|
|
"count": 50136,
|
|
"self": 0.9964985999811233,
|
|
"children": {
|
|
"env_step": {
|
|
"total": 1911.6856885000145,
|
|
"count": 50136,
|
|
"self": 1633.4743607000073,
|
|
"children": {
|
|
"SubprocessEnvManager._take_step": {
|
|
"total": 277.7303721999926,
|
|
"count": 50136,
|
|
"self": 1.7871896999790806,
|
|
"children": {
|
|
"TorchPolicy.evaluate": {
|
|
"total": 275.9431825000135,
|
|
"count": 50136,
|
|
"self": 275.9431825000135
|
|
}
|
|
}
|
|
},
|
|
"workers": {
|
|
"total": 0.48095560001450854,
|
|
"count": 50136,
|
|
"self": 0.0,
|
|
"children": {
|
|
"worker_root": {
|
|
"total": 1913.4493875999985,
|
|
"count": 50136,
|
|
"is_parallel": true,
|
|
"self": 340.3863610999763,
|
|
"children": {
|
|
"steps_from_proto": {
|
|
"total": 0.0002394999999999481,
|
|
"count": 1,
|
|
"is_parallel": true,
|
|
"self": 0.00011400000000016952,
|
|
"children": {
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 0.00012549999999977857,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 0.00012549999999977857
|
|
}
|
|
}
|
|
},
|
|
"UnityEnvironment.step": {
|
|
"total": 1573.0627870000224,
|
|
"count": 50136,
|
|
"is_parallel": true,
|
|
"self": 3.4146254000422687,
|
|
"children": {
|
|
"UnityEnvironment._generate_step_input": {
|
|
"total": 4.22944119999039,
|
|
"count": 50136,
|
|
"is_parallel": true,
|
|
"self": 4.22944119999039
|
|
},
|
|
"communicator.exchange": {
|
|
"total": 1555.4927993000078,
|
|
"count": 50136,
|
|
"is_parallel": true,
|
|
"self": 1555.4927993000078
|
|
},
|
|
"steps_from_proto": {
|
|
"total": 9.925921099981919,
|
|
"count": 50136,
|
|
"is_parallel": true,
|
|
"self": 4.999919899978398,
|
|
"children": {
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 4.926001200003521,
|
|
"count": 100272,
|
|
"is_parallel": true,
|
|
"self": 4.926001200003521
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"trainer_threads": {
|
|
"total": 2.2099999796409975e-05,
|
|
"count": 1,
|
|
"self": 2.2099999796409975e-05,
|
|
"children": {
|
|
"thread_root": {
|
|
"total": 0.0,
|
|
"count": 0,
|
|
"is_parallel": true,
|
|
"self": 0.0,
|
|
"children": {
|
|
"trainer_advance": {
|
|
"total": 1912.6062513000165,
|
|
"count": 97830,
|
|
"is_parallel": true,
|
|
"self": 3.4296286000389955,
|
|
"children": {
|
|
"process_trajectory": {
|
|
"total": 1551.7061777999797,
|
|
"count": 97830,
|
|
"is_parallel": true,
|
|
"self": 1551.7061777999797
|
|
},
|
|
"_update_policy": {
|
|
"total": 357.47044489999786,
|
|
"count": 151,
|
|
"is_parallel": true,
|
|
"self": 175.59495200000754,
|
|
"children": {
|
|
"TorchPPOOptimizer.update": {
|
|
"total": 181.87549289999032,
|
|
"count": 10320,
|
|
"is_parallel": true,
|
|
"self": 181.87549289999032
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"TrainerController._save_models": {
|
|
"total": 0.09639579999998205,
|
|
"count": 1,
|
|
"self": 0.004710700000032375,
|
|
"children": {
|
|
"RLTrainer._checkpoint": {
|
|
"total": 0.09168509999994967,
|
|
"count": 1,
|
|
"self": 0.09168509999994967
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} |