philippds's picture
Upload 17 files
1d9d8be verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 0.790346086025238,
"min": 0.7343851327896118,
"max": 1.096280813217163,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 31613.84375,
"min": 29387.15625,
"max": 43931.8515625,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Step.mean": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Step.sum": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.780200004577637,
"min": 2.4280426502227783,
"max": 9.820754051208496,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 234.72479248046875,
"min": 55.84497833251953,
"max": 235.69810485839844,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.04866604715246625,
"min": 0.04545393447545986,
"max": 0.05493653173690536,
"count": 200
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.14599814145739876,
"min": 0.10218151656833166,
"max": 0.1648095952107161,
"count": 200
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.003906352770591473,
"min": 0.002974676047400978,
"max": 3.8673377021526294,
"count": 200
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.011719058311774418,
"min": 0.008924028142202934,
"max": 10.25289195190583,
"count": 200
},
"Agent.Policy.LearningRate.mean": {
"value": 8.856997047999973e-07,
"min": 8.856997047999973e-07,
"max": 0.00029907840030719997,
"count": 200
},
"Agent.Policy.LearningRate.sum": {
"value": 2.657099114399992e-06,
"min": 2.657099114399992e-06,
"max": 0.0008936568021144,
"count": 200
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10029520000000001,
"min": 0.10029520000000001,
"max": 0.1996928,
"count": 200
},
"Agent.Policy.Epsilon.sum": {
"value": 0.30088560000000003,
"min": 0.30088560000000003,
"max": 0.5978856,
"count": 200
},
"Agent.Policy.Beta.mean": {
"value": 2.4730479999999958e-05,
"min": 2.4730479999999958e-05,
"max": 0.004984670720000001,
"count": 200
},
"Agent.Policy.Beta.sum": {
"value": 7.419143999999988e-05,
"min": 7.419143999999988e-05,
"max": 0.014894491440000001,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 4999.0,
"min": 4999.0,
"max": 4999.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 39992.0,
"min": 39992.0,
"max": 39992.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.WindFarmControl.AvoidDamageReward.mean": {
"value": 4875.396667480469,
"min": 1677.5268249511719,
"max": 4897.457214355469,
"count": 200
},
"Agent.WindFarmControl.AvoidDamageReward.sum": {
"value": 39003.17333984375,
"min": 13420.214599609375,
"max": 39179.65771484375,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 4857.6708984375,
"min": 1789.2040557861328,
"max": 4891.219200134277,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 38861.3671875,
"min": 14313.632446289062,
"max": 39129.75360107422,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 4857.6708984375,
"min": 1789.2040557861328,
"max": 4891.219200134277,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 38861.3671875,
"min": 14313.632446289062,
"max": 39129.75360107422,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715269261",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/WindFarmControl_pattern_6_task_1_run_id_1_train.yaml --run-id=WindFarmControl/train/WindFarmControl_pattern_6_task_1_run_id_1_train",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.2",
"end_time_seconds": "1715273193"
},
"total": 3932.1076965,
"count": 1,
"self": 0.2694571000001815,
"children": {
"run_training.setup": {
"total": 0.057912399999999975,
"count": 1,
"self": 0.057912399999999975
},
"TrainerController.start_learning": {
"total": 3931.780327,
"count": 1,
"self": 11.143726699908257,
"children": {
"TrainerController._reset_env": {
"total": 1.6015077999999998,
"count": 1,
"self": 1.6015077999999998
},
"TrainerController.advance": {
"total": 3919.008560400092,
"count": 1002053,
"self": 10.426466300008997,
"children": {
"env_step": {
"total": 3908.582094100083,
"count": 1002053,
"self": 1664.7135692999927,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2237.3401259001093,
"count": 1002053,
"self": 28.281610000073215,
"children": {
"TorchPolicy.evaluate": {
"total": 2209.058515900036,
"count": 1002053,
"self": 2209.058515900036
}
}
},
"workers": {
"total": 6.528398899980708,
"count": 1002053,
"self": 0.0,
"children": {
"worker_root": {
"total": 3916.478589399937,
"count": 1002053,
"is_parallel": true,
"self": 2935.2456467999436,
"children": {
"steps_from_proto": {
"total": 0.00024140000000005823,
"count": 1,
"is_parallel": true,
"self": 0.00011190000000005362,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0001295000000000046,
"count": 2,
"is_parallel": true,
"self": 0.0001295000000000046
}
}
},
"UnityEnvironment.step": {
"total": 981.2327011999935,
"count": 1002053,
"is_parallel": true,
"self": 52.88822529990375,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.6870290998966,
"count": 1002053,
"is_parallel": true,
"self": 86.6870290998966
},
"communicator.exchange": {
"total": 696.799883800111,
"count": 1002053,
"is_parallel": true,
"self": 696.799883800111
},
"steps_from_proto": {
"total": 144.85756300008217,
"count": 1002053,
"is_parallel": true,
"self": 81.40976620002975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.447796800052416,
"count": 2004106,
"is_parallel": true,
"self": 63.447796800052416
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.189999986512703e-05,
"count": 1,
"self": 2.189999986512703e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3929.2400280000875,
"count": 110669,
"is_parallel": true,
"self": 4.825245700135383,
"children": {
"process_trajectory": {
"total": 1952.6322472999582,
"count": 110669,
"is_parallel": true,
"self": 1952.1611276999586,
"children": {
"RLTrainer._checkpoint": {
"total": 0.471119599999696,
"count": 16,
"is_parallel": true,
"self": 0.471119599999696
}
}
},
"_update_policy": {
"total": 1971.7825349999937,
"count": 600,
"is_parallel": true,
"self": 580.4958276999798,
"children": {
"TorchPPOOptimizer.update": {
"total": 1391.2867073000139,
"count": 93600,
"is_parallel": true,
"self": 1391.2867073000139
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.026510199999847828,
"count": 1,
"self": 0.005493799999840121,
"children": {
"RLTrainer._checkpoint": {
"total": 0.021016400000007707,
"count": 1,
"self": 0.021016400000007707
}
}
}
}
}
}
}