z4x's picture
simple
23ff75e
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4308271408081055,
"min": 1.3409537076950073,
"max": 3.295689344406128,
"count": 4999
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28708.115234375,
"min": 19419.453125,
"max": 156500.25,
"count": 4999
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 61.525,
"min": 40.983193277310924,
"max": 999.0,
"count": 4999
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19688.0,
"min": 9184.0,
"max": 35300.0,
"count": 4999
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1581.858768135467,
"min": 1188.4916925601074,
"max": 1698.4761226117432,
"count": 4992
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 253097.40290167474,
"min": 2380.279313538583,
"max": 367086.8878069242,
"count": 4992
},
"SoccerTwos.Step.mean": {
"value": 49999995.0,
"min": 9104.0,
"max": 49999995.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999995.0,
"min": 9104.0,
"max": 49999995.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.010109087452292442,
"min": -0.16198593378067017,
"max": 0.1480364352464676,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.6174540519714355,
"min": -26.565692901611328,
"max": 23.532058715820312,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.010827122256159782,
"min": -0.16508963704109192,
"max": 0.14846718311309814,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.7323395013809204,
"min": -27.0747013092041,
"max": 24.650981903076172,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.12688500061631203,
"min": -0.6478533327579499,
"max": 0.39966545104980467,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -20.301600098609924,
"min": -64.50240015983582,
"max": 56.001999855041504,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.12688500061631203,
"min": -0.6478533327579499,
"max": 0.39966545104980467,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -20.301600098609924,
"min": -64.50240015983582,
"max": 56.001999855041504,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017218954709824173,
"min": 0.00956947815817936,
"max": 0.026000298119227713,
"count": 2422
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017218954709824173,
"min": 0.00956947815817936,
"max": 0.026000298119227713,
"count": 2422
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10388810609777768,
"min": 0.0007105947634651481,
"max": 0.12171690414349239,
"count": 2422
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10388810609777768,
"min": 0.0007105947634651481,
"max": 0.12171690414349239,
"count": 2422
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10512812460462252,
"min": 0.0007158667203232956,
"max": 0.12356023316582045,
"count": 2422
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10512812460462252,
"min": 0.0007158667203232956,
"max": 0.12356023316582045,
"count": 2422
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2422
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2422
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 2422
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 2422
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2422
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2422
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676316305",
"python_version": "3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0]",
"command_line_arguments": "/home/deep-rl/.local/bin/mlagents-learn config/poca/SoccerTwos.yaml --env training-envs-executables/SoccerTwos.x86_64 --run-id SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1676363500"
},
"total": 47194.998105576,
"count": 1,
"self": 0.2186989499896299,
"children": {
"run_training.setup": {
"total": 0.010428734007291496,
"count": 1,
"self": 0.010428734007291496
},
"TrainerController.start_learning": {
"total": 47194.768977892,
"count": 1,
"self": 38.20051181523013,
"children": {
"TrainerController._reset_env": {
"total": 4.155645973834908,
"count": 250,
"self": 4.155645973834908
},
"TrainerController.advance": {
"total": 47152.316890533926,
"count": 3428432,
"self": 38.966143722238485,
"children": {
"env_step": {
"total": 33675.42588105894,
"count": 3428432,
"self": 25718.717259774945,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7934.048436769139,
"count": 3428432,
"self": 254.51408410366275,
"children": {
"TorchPolicy.evaluate": {
"total": 7679.534352665476,
"count": 6283042,
"self": 7679.534352665476
}
}
},
"workers": {
"total": 22.660184514854336,
"count": 3428432,
"self": 0.0,
"children": {
"worker_root": {
"total": 47143.67764265783,
"count": 3428432,
"is_parallel": true,
"self": 26053.4131465526,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001226332999067381,
"count": 2,
"is_parallel": true,
"self": 0.000302325002849102,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000924007996218279,
"count": 8,
"is_parallel": true,
"self": 0.000924007996218279
}
}
},
"UnityEnvironment.step": {
"total": 0.015415244997711852,
"count": 1,
"is_parallel": true,
"self": 0.00031738801044411957,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002728039980866015,
"count": 1,
"is_parallel": true,
"self": 0.0002728039980866015
},
"communicator.exchange": {
"total": 0.013663667981745675,
"count": 1,
"is_parallel": true,
"self": 0.013663667981745675
},
"steps_from_proto": {
"total": 0.001161385007435456,
"count": 2,
"is_parallel": true,
"self": 0.00024142200709320605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009199630003422499,
"count": 8,
"is_parallel": true,
"self": 0.0009199630003422499
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 21090.02816724684,
"count": 3428431,
"is_parallel": true,
"self": 1070.6337631969946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 759.2072407228698,
"count": 3428431,
"is_parallel": true,
"self": 759.2072407228698
},
"communicator.exchange": {
"total": 16120.65393558715,
"count": 3428431,
"is_parallel": true,
"self": 16120.65393558715
},
"steps_from_proto": {
"total": 3139.5332277398265,
"count": 6856862,
"is_parallel": true,
"self": 625.9189817667357,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2513.614245973091,
"count": 27427448,
"is_parallel": true,
"self": 2513.614245973091
}
}
}
}
},
"steps_from_proto": {
"total": 0.23632885838742368,
"count": 498,
"is_parallel": true,
"self": 0.04753050676663406,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.18879835162078962,
"count": 1992,
"is_parallel": true,
"self": 0.18879835162078962
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 13437.92486575275,
"count": 3428432,
"self": 290.6448201589519,
"children": {
"process_trajectory": {
"total": 4714.613510220719,
"count": 3428432,
"self": 4705.077789016708,
"children": {
"RLTrainer._checkpoint": {
"total": 9.53572120401077,
"count": 100,
"self": 9.53572120401077
}
}
},
"_update_policy": {
"total": 8432.666535373079,
"count": 2422,
"self": 5494.97190105819,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2937.6946343148884,
"count": 72678,
"self": 2937.6946343148884
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.80010237172246e-07,
"count": 1,
"self": 5.80010237172246e-07
},
"TrainerController._save_models": {
"total": 0.09592898900154978,
"count": 1,
"self": 0.0013139630027581006,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09461502599879168,
"count": 1,
"self": 0.09461502599879168
}
}
}
}
}
}
}