poca-SoccerTwos / run_logs /timers.json
lotek93's picture
First Push
b3da14b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5091946125030518,
"min": 1.325435996055603,
"max": 3.295685291290283,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31439.54296875,
"min": 21926.326171875,
"max": 110975.4609375,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 70.8955223880597,
"min": 37.26923076923077,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19000.0,
"min": 7992.0,
"max": 31188.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1801.7418215807775,
"min": 1198.190099908711,
"max": 1835.7299469282439,
"count": 4958
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 241433.4040918242,
"min": 2396.380199817422,
"max": 432303.4689743812,
"count": 4958
},
"SoccerTwos.Step.mean": {
"value": 49999967.0,
"min": 9502.0,
"max": 49999967.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999967.0,
"min": 9502.0,
"max": 49999967.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.021310919895768166,
"min": -0.1324259340763092,
"max": 0.19452635943889618,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.876974105834961,
"min": -22.916593551635742,
"max": 26.819599151611328,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02532254531979561,
"min": -0.1327137053012848,
"max": 0.19241538643836975,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.418543577194214,
"min": -22.944385528564453,
"max": 26.660112380981445,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09117629616348832,
"min": -0.5,
"max": 0.4508639945834875,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -12.308799982070923,
"min": -68.18639993667603,
"max": 70.21480011940002,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09117629616348832,
"min": -0.5,
"max": 0.4508639945834875,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -12.308799982070923,
"min": -68.18639993667603,
"max": 70.21480011940002,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01989641071219618,
"min": 0.010296740662306547,
"max": 0.026408996262277166,
"count": 2424
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01989641071219618,
"min": 0.010296740662306547,
"max": 0.026408996262277166,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10399795373280843,
"min": 5.052829351370747e-07,
"max": 0.13746680021286012,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10399795373280843,
"min": 5.052829351370747e-07,
"max": 0.13746680021286012,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10489270761609078,
"min": 3.9180880359405513e-07,
"max": 0.13886504471302033,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10489270761609078,
"min": 3.9180880359405513e-07,
"max": 0.13886504471302033,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2424
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2424
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679394437",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/home/madd/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --force --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1679445552"
},
"total": 51115.75790369604,
"count": 1,
"self": 0.27135065908078104,
"children": {
"run_training.setup": {
"total": 0.006677009048871696,
"count": 1,
"self": 0.006677009048871696
},
"TrainerController.start_learning": {
"total": 51115.47987602791,
"count": 1,
"self": 44.699953548726626,
"children": {
"TrainerController._reset_env": {
"total": 5.279510647407733,
"count": 250,
"self": 5.279510647407733
},
"TrainerController.advance": {
"total": 51065.3623340216,
"count": 3454035,
"self": 42.94749108084943,
"children": {
"env_step": {
"total": 37613.22994301212,
"count": 3454035,
"self": 29650.399021285702,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7932.426932244911,
"count": 3454035,
"self": 265.4437365505146,
"children": {
"TorchPolicy.evaluate": {
"total": 7666.983195694396,
"count": 6279544,
"self": 7666.983195694396
}
}
},
"workers": {
"total": 30.40398948150687,
"count": 3454035,
"self": 0.0,
"children": {
"worker_root": {
"total": 51055.4050460998,
"count": 3454035,
"is_parallel": true,
"self": 26617.727656310075,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0014895820058882236,
"count": 2,
"is_parallel": true,
"self": 0.00045397202484309673,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001035609981045127,
"count": 8,
"is_parallel": true,
"self": 0.001035609981045127
}
}
},
"UnityEnvironment.step": {
"total": 0.016421745996922255,
"count": 1,
"is_parallel": true,
"self": 0.00036188610829412937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00026072398759424686,
"count": 1,
"is_parallel": true,
"self": 0.00026072398759424686
},
"communicator.exchange": {
"total": 0.014791199006140232,
"count": 1,
"is_parallel": true,
"self": 0.014791199006140232
},
"steps_from_proto": {
"total": 0.0010079368948936462,
"count": 2,
"is_parallel": true,
"self": 0.00021583097986876965,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007921059150248766,
"count": 8,
"is_parallel": true,
"self": 0.0007921059150248766
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 24437.424309372902,
"count": 3454034,
"is_parallel": true,
"self": 1201.1986244178843,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 807.0662587000988,
"count": 3454034,
"is_parallel": true,
"self": 807.0662587000988
},
"communicator.exchange": {
"total": 19068.473930052714,
"count": 3454034,
"is_parallel": true,
"self": 19068.473930052714
},
"steps_from_proto": {
"total": 3360.6854962022044,
"count": 6908068,
"is_parallel": true,
"self": 682.2011010588612,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2678.484395143343,
"count": 27632272,
"is_parallel": true,
"self": 2678.484395143343
}
}
}
}
},
"steps_from_proto": {
"total": 0.25308041682001203,
"count": 498,
"is_parallel": true,
"self": 0.0524178285850212,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.20066258823499084,
"count": 1992,
"is_parallel": true,
"self": 0.20066258823499084
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 13409.18489992863,
"count": 3454035,
"self": 306.95177966193296,
"children": {
"process_trajectory": {
"total": 4909.24327490665,
"count": 3454035,
"self": 4895.40603415505,
"children": {
"RLTrainer._checkpoint": {
"total": 13.837240751599893,
"count": 100,
"self": 13.837240751599893
}
}
},
"_update_policy": {
"total": 8192.989845360047,
"count": 2424,
"self": 5312.4901024515275,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2880.4997429085197,
"count": 72729,
"self": 2880.4997429085197
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.891787052154541e-07,
"count": 1,
"self": 6.891787052154541e-07
},
"TrainerController._save_models": {
"total": 0.13807712099514902,
"count": 1,
"self": 0.0012613199651241302,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1368158010300249,
"count": 1,
"self": 0.1368158010300249
}
}
}
}
}
}
}