ppo-Huggy / run_logs /timers.json
lordsauron's picture
Huggy the dog
ff2b795
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.411449670791626,
"min": 1.411449670791626,
"max": 1.4296773672103882,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69711.5,
"min": 69307.34375,
"max": 77428.25,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.622,
"min": 84.295025728988,
"max": 397.0866141732283,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49311.0,
"min": 49144.0,
"max": 50430.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999926.0,
"min": 49925.0,
"max": 1999926.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999926.0,
"min": 49925.0,
"max": 1999926.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.440776824951172,
"min": 0.04950188100337982,
"max": 2.4713337421417236,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1220.388427734375,
"min": 6.237236976623535,
"max": 1450.6728515625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7567028418779373,
"min": 1.7678349919262386,
"max": 4.010196814139007,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1878.3514209389687,
"min": 222.74720898270607,
"max": 2353.985529899597,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7567028418779373,
"min": 1.7678349919262386,
"max": 4.010196814139007,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1878.3514209389687,
"min": 222.74720898270607,
"max": 2353.985529899597,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01602369905061399,
"min": 0.012144613888813183,
"max": 0.02134164810219469,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04807109715184197,
"min": 0.024289227777626366,
"max": 0.05485199139317653,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05048041683104303,
"min": 0.023012347674618166,
"max": 0.06142609547823667,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1514412504931291,
"min": 0.04602469534923633,
"max": 0.18394419203201928,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1783989405666666e-06,
"min": 3.1783989405666666e-06,
"max": 0.0002953038015654,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.5351968217e-06,
"min": 9.5351968217e-06,
"max": 0.0008441457186181001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105943333333334,
"min": 0.10105943333333334,
"max": 0.19843460000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031783,
"min": 0.20728610000000003,
"max": 0.5813818999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.286572333333335e-05,
"min": 6.286572333333335e-05,
"max": 0.00492188654,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018859717000000004,
"min": 0.00018859717000000004,
"max": 0.01407095681,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688562297",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688565115"
},
"total": 2817.869219956,
"count": 1,
"self": 0.4891443279998384,
"children": {
"run_training.setup": {
"total": 0.04692462900004557,
"count": 1,
"self": 0.04692462900004557
},
"TrainerController.start_learning": {
"total": 2817.3331509990003,
"count": 1,
"self": 5.5868130540807215,
"children": {
"TrainerController._reset_env": {
"total": 4.694052495999983,
"count": 1,
"self": 4.694052495999983
},
"TrainerController.advance": {
"total": 2806.9235694839194,
"count": 232089,
"self": 5.874194109677774,
"children": {
"env_step": {
"total": 2199.4110540472043,
"count": 232089,
"self": 1852.08444269541,
"children": {
"SubprocessEnvManager._take_step": {
"total": 343.7108871658516,
"count": 232089,
"self": 20.045282802783333,
"children": {
"TorchPolicy.evaluate": {
"total": 323.66560436306827,
"count": 222966,
"self": 323.66560436306827
}
}
},
"workers": {
"total": 3.6157241859427245,
"count": 232089,
"self": 0.0,
"children": {
"worker_root": {
"total": 2808.077209137037,
"count": 232089,
"is_parallel": true,
"self": 1297.0733768840173,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009569579999606503,
"count": 1,
"is_parallel": true,
"self": 0.00026919699996597046,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006877609999946799,
"count": 2,
"is_parallel": true,
"self": 0.0006877609999946799
}
}
},
"UnityEnvironment.step": {
"total": 0.03167150600000923,
"count": 1,
"is_parallel": true,
"self": 0.00037225100004434353,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002735969999889676,
"count": 1,
"is_parallel": true,
"self": 0.0002735969999889676
},
"communicator.exchange": {
"total": 0.030216411999958837,
"count": 1,
"is_parallel": true,
"self": 0.030216411999958837
},
"steps_from_proto": {
"total": 0.0008092460000170831,
"count": 1,
"is_parallel": true,
"self": 0.000216485000009925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005927610000071581,
"count": 2,
"is_parallel": true,
"self": 0.0005927610000071581
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1511.0038322530195,
"count": 232088,
"is_parallel": true,
"self": 46.40152693611935,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.57158550595761,
"count": 232088,
"is_parallel": true,
"self": 87.57158550595761
},
"communicator.exchange": {
"total": 1264.0207770169727,
"count": 232088,
"is_parallel": true,
"self": 1264.0207770169727
},
"steps_from_proto": {
"total": 113.00994279396991,
"count": 232088,
"is_parallel": true,
"self": 37.886122977199875,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.12381981677004,
"count": 464176,
"is_parallel": true,
"self": 75.12381981677004
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 601.6383213270375,
"count": 232089,
"self": 8.863617601106512,
"children": {
"process_trajectory": {
"total": 151.15275472093123,
"count": 232089,
"self": 149.7966501569311,
"children": {
"RLTrainer._checkpoint": {
"total": 1.356104564000134,
"count": 10,
"self": 1.356104564000134
}
}
},
"_update_policy": {
"total": 441.6219490049998,
"count": 97,
"self": 375.8173287990081,
"children": {
"TorchPPOOptimizer.update": {
"total": 65.80462020599168,
"count": 2910,
"self": 65.80462020599168
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0490002750884742e-06,
"count": 1,
"self": 1.0490002750884742e-06
},
"TrainerController._save_models": {
"total": 0.128714915999808,
"count": 1,
"self": 0.0020443280000108643,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12667058799979714,
"count": 1,
"self": 0.12667058799979714
}
}
}
}
}
}
}