jdollman's picture
BB Push
73bcf34 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.4068665504455566,
"min": 2.4068665504455566,
"max": 2.875962257385254,
"count": 5
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 24489.8671875,
"min": 24489.8671875,
"max": 29642.54296875,
"count": 5
},
"SnowballTarget.Step.mean": {
"value": 49936.0,
"min": 9952.0,
"max": 49936.0,
"count": 5
},
"SnowballTarget.Step.sum": {
"value": 49936.0,
"min": 9952.0,
"max": 49936.0,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 4.115147113800049,
"min": 0.40806296467781067,
"max": 4.115147113800049,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 798.3385009765625,
"min": 79.16421508789062,
"max": 798.3385009765625,
"count": 5
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 5
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 5
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07005781891510142,
"min": 0.059381619710870394,
"max": 0.07005781891510142,
"count": 5
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2802312756604057,
"min": 0.23752647884348158,
"max": 0.32589076537588685,
"count": 5
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.24973559459927036,
"min": 0.12096577631788986,
"max": 0.2633334485339183,
"count": 5
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9989423783970814,
"min": 0.48386310527155946,
"max": 1.3166672426695916,
"count": 5
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.9928090024000003e-05,
"min": 2.9928090024000003e-05,
"max": 0.00026752801082399996,
"count": 5
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00011971236009600001,
"min": 0.00011971236009600001,
"max": 0.0010701120432959998,
"count": 5
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10997600000000002,
"min": 0.10997600000000002,
"max": 0.18917599999999998,
"count": 5
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4399040000000001,
"min": 0.4399040000000001,
"max": 0.8468800000000001,
"count": 5
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0005078024,
"min": 0.0005078024,
"max": 0.0044598824,
"count": 5
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0020312096,
"min": 0.0020312096,
"max": 0.0178395296,
"count": 5
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 10.454545454545455,
"min": 3.590909090909091,
"max": 10.454545454545455,
"count": 5
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 460.0,
"min": 158.0,
"max": 492.0,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 10.454545454545455,
"min": 3.590909090909091,
"max": 10.454545454545455,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 460.0,
"min": 158.0,
"max": 492.0,
"count": 5
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1721349030",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1721349155"
},
"total": 125.03761097100005,
"count": 1,
"self": 0.4328856900001483,
"children": {
"run_training.setup": {
"total": 0.07854101299994909,
"count": 1,
"self": 0.07854101299994909
},
"TrainerController.start_learning": {
"total": 124.52618426799995,
"count": 1,
"self": 0.2107322000002796,
"children": {
"TrainerController._reset_env": {
"total": 2.8790511250000463,
"count": 1,
"self": 2.8790511250000463
},
"TrainerController.advance": {
"total": 121.32789357099955,
"count": 4612,
"self": 0.07269133900013003,
"children": {
"env_step": {
"total": 121.25520223199942,
"count": 4612,
"self": 80.00339783399943,
"children": {
"SubprocessEnvManager._take_step": {
"total": 41.17986371700124,
"count": 4612,
"self": 0.3768554020044803,
"children": {
"TorchPolicy.evaluate": {
"total": 40.80300831499676,
"count": 4612,
"self": 40.80300831499676
}
}
},
"workers": {
"total": 0.0719406809987504,
"count": 4612,
"self": 0.0,
"children": {
"worker_root": {
"total": 124.11262741299686,
"count": 4612,
"is_parallel": true,
"self": 64.39389351000148,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008103145999939443,
"count": 1,
"is_parallel": true,
"self": 0.005966528999806542,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002136617000132901,
"count": 10,
"is_parallel": true,
"self": 0.002136617000132901
}
}
},
"UnityEnvironment.step": {
"total": 0.037300041999969835,
"count": 1,
"is_parallel": true,
"self": 0.0006181939999123642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039423100008662004,
"count": 1,
"is_parallel": true,
"self": 0.00039423100008662004
},
"communicator.exchange": {
"total": 0.03443223599992962,
"count": 1,
"is_parallel": true,
"self": 0.03443223599992962
},
"steps_from_proto": {
"total": 0.001855381000041234,
"count": 1,
"is_parallel": true,
"self": 0.00037131000010504067,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014840709999361934,
"count": 10,
"is_parallel": true,
"self": 0.0014840709999361934
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 59.71873390299538,
"count": 4611,
"is_parallel": true,
"self": 2.5856044029912937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.3710803910099685,
"count": 4611,
"is_parallel": true,
"self": 1.3710803910099685
},
"communicator.exchange": {
"total": 46.994615585998986,
"count": 4611,
"is_parallel": true,
"self": 46.994615585998986
},
"steps_from_proto": {
"total": 8.767433522995134,
"count": 4611,
"is_parallel": true,
"self": 1.6769655100181353,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.090468012976999,
"count": 46110,
"is_parallel": true,
"self": 7.090468012976999
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0004267460001301515,
"count": 1,
"self": 0.0004267460001301515,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 119.86053349498218,
"count": 178609,
"is_parallel": true,
"self": 3.868341434943545,
"children": {
"process_trajectory": {
"total": 66.71842147003838,
"count": 178609,
"is_parallel": true,
"self": 66.14001820203828,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5784032680001019,
"count": 1,
"is_parallel": true,
"self": 0.5784032680001019
}
}
},
"_update_policy": {
"total": 49.273770590000254,
"count": 22,
"is_parallel": true,
"self": 16.691510459997744,
"children": {
"TorchPPOOptimizer.update": {
"total": 32.58226013000251,
"count": 1119,
"is_parallel": true,
"self": 32.58226013000251
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10808062599994628,
"count": 1,
"self": 0.0009791790000690526,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10710144699987723,
"count": 1,
"self": 0.10710144699987723
}
}
}
}
}
}
}