philippds's picture
Upload 17 files
3c301bc verified
raw
history blame
16.4 kB
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.0432231426239014,
"min": 0.8955050706863403,
"max": 1.095898985862732,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 41770.65234375,
"min": 35848.859375,
"max": 43897.328125,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Step.mean": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Step.sum": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.18587875366211,
"min": 0.5215618014335632,
"max": 9.54175090789795,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 220.46109008789062,
"min": 12.51748275756836,
"max": 229.00201416015625,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.049005920570163564,
"min": 0.044099667442972695,
"max": 0.053326188027096495,
"count": 200
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.1470177617104907,
"min": 0.10106326845076788,
"max": 0.1599785640812895,
"count": 200
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.2060797198029745,
"min": 0.06279220564188324,
"max": 1.518491332186386,
"count": 200
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.6182391594089235,
"min": 0.18837661692564975,
"max": 3.036982664372772,
"count": 200
},
"Agent.Policy.LearningRate.mean": {
"value": 8.856997047999973e-07,
"min": 8.856997047999973e-07,
"max": 0.00029907840030719997,
"count": 200
},
"Agent.Policy.LearningRate.sum": {
"value": 2.657099114399992e-06,
"min": 2.657099114399992e-06,
"max": 0.0008936568021144,
"count": 200
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10029520000000001,
"min": 0.10029520000000001,
"max": 0.1996928,
"count": 200
},
"Agent.Policy.Epsilon.sum": {
"value": 0.30088560000000003,
"min": 0.30088560000000003,
"max": 0.5978856,
"count": 200
},
"Agent.Policy.Beta.mean": {
"value": 2.4730479999999958e-05,
"min": 2.4730479999999958e-05,
"max": 0.004984670720000001,
"count": 200
},
"Agent.Policy.Beta.sum": {
"value": 7.419143999999988e-05,
"min": 7.419143999999988e-05,
"max": 0.014894491440000001,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 4999.0,
"min": 4999.0,
"max": 4999.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 39992.0,
"min": 39992.0,
"max": 39992.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.mean": {
"value": 4558.020690917969,
"min": 446.97447872161865,
"max": 4685.875671386719,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.sum": {
"value": 36464.16552734375,
"min": 3575.795829772949,
"max": 37487.00537109375,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 4554.759002685547,
"min": 470.43567184761196,
"max": 4676.411651611328,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 36438.072021484375,
"min": 3763.4853747808957,
"max": 37411.293212890625,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 4554.759002685547,
"min": 470.43567184761196,
"max": 4676.411651611328,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 36438.072021484375,
"min": 3763.4853747808957,
"max": 37411.293212890625,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715328384",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/WindFarmControl_pattern_5_task_0_run_id_2_train.yaml --run-id=WindFarmControl/train/WindFarmControl_pattern_5_task_0_run_id_2_train",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.2",
"end_time_seconds": "1715331990"
},
"total": 3606.0257179,
"count": 1,
"self": 0.16022790000033638,
"children": {
"run_training.setup": {
"total": 0.04490939999999999,
"count": 1,
"self": 0.04490939999999999
},
"TrainerController.start_learning": {
"total": 3605.8205805999996,
"count": 1,
"self": 10.519716600265838,
"children": {
"TrainerController._reset_env": {
"total": 1.6028524,
"count": 1,
"self": 1.6028524
},
"TrainerController.advance": {
"total": 3593.672235099734,
"count": 1002054,
"self": 9.436604299632563,
"children": {
"env_step": {
"total": 3584.2356308001013,
"count": 1002054,
"self": 1487.7461318999108,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2090.800368300095,
"count": 1002054,
"self": 25.431422900004236,
"children": {
"TorchPolicy.evaluate": {
"total": 2065.368945400091,
"count": 1002054,
"self": 2065.368945400091
}
}
},
"workers": {
"total": 5.68913060009508,
"count": 1002054,
"self": 0.0,
"children": {
"worker_root": {
"total": 3592.593155399929,
"count": 1002054,
"is_parallel": true,
"self": 2738.5719981999655,
"children": {
"steps_from_proto": {
"total": 0.000260000000000038,
"count": 1,
"is_parallel": true,
"self": 0.00012149999999999661,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00013850000000004137,
"count": 2,
"is_parallel": true,
"self": 0.00013850000000004137
}
}
},
"UnityEnvironment.step": {
"total": 854.0208971999637,
"count": 1002054,
"is_parallel": true,
"self": 42.11394360005306,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 62.361521299834585,
"count": 1002054,
"is_parallel": true,
"self": 62.361521299834585
},
"communicator.exchange": {
"total": 631.7332501999549,
"count": 1002054,
"is_parallel": true,
"self": 631.7332501999549
},
"steps_from_proto": {
"total": 117.81218210012103,
"count": 1002054,
"is_parallel": true,
"self": 65.76215430011845,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.05002780000258,
"count": 2004108,
"is_parallel": true,
"self": 52.05002780000258
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.8700000055105193e-05,
"count": 1,
"self": 1.8700000055105193e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3603.3099547000866,
"count": 106489,
"is_parallel": true,
"self": 4.505542100119783,
"children": {
"process_trajectory": {
"total": 1857.001655799972,
"count": 106489,
"is_parallel": true,
"self": 1856.591344499972,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4103113000001031,
"count": 16,
"is_parallel": true,
"self": 0.4103113000001031
}
}
},
"_update_policy": {
"total": 1741.8027567999948,
"count": 600,
"is_parallel": true,
"self": 572.0102799999847,
"children": {
"TorchPPOOptimizer.update": {
"total": 1169.79247680001,
"count": 93600,
"is_parallel": true,
"self": 1169.79247680001
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.02575780000006489,
"count": 1,
"self": 0.005545900000015536,
"children": {
"RLTrainer._checkpoint": {
"total": 0.020211900000049354,
"count": 1,
"self": 0.020211900000049354
}
}
}
}
}
}
}