admarcosai's picture
Second SnowballTarget push
6486042
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7036930322647095,
"min": 0.6965148448944092,
"max": 2.865513324737549,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6804.0078125,
"min": 6804.0078125,
"max": 29345.72265625,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.576396942138672,
"min": 0.23241914808750153,
"max": 13.756016731262207,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2783.161376953125,
"min": 45.08931350708008,
"max": 2819.9833984375,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06795177943371097,
"min": 0.062100456428500664,
"max": 0.0790830295090749,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3397588971685549,
"min": 0.24840182571400266,
"max": 0.36652226188116227,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15939813504616418,
"min": 0.11868818383435628,
"max": 0.2781624938927445,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7969906752308209,
"min": 0.4747527353374251,
"max": 1.3908124694637225,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.145454545454545,
"min": 3.5,
"max": 27.145454545454545,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1493.0,
"min": 154.0,
"max": 1493.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.145454545454545,
"min": 3.5,
"max": 27.145454545454545,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1493.0,
"min": 154.0,
"max": 1493.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673977222",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673978262"
},
"total": 1040.5268769680001,
"count": 1,
"self": 0.4359356220002155,
"children": {
"run_training.setup": {
"total": 0.1077166559998659,
"count": 1,
"self": 0.1077166559998659
},
"TrainerController.start_learning": {
"total": 1039.98322469,
"count": 1,
"self": 1.2501699180215837,
"children": {
"TrainerController._reset_env": {
"total": 6.186515630000031,
"count": 1,
"self": 6.186515630000031
},
"TrainerController.advance": {
"total": 1032.4272220109783,
"count": 45479,
"self": 0.6490676989956228,
"children": {
"env_step": {
"total": 1031.7781543119827,
"count": 45479,
"self": 668.297482972926,
"children": {
"SubprocessEnvManager._take_step": {
"total": 362.84822139705784,
"count": 45479,
"self": 3.2607984840528843,
"children": {
"TorchPolicy.evaluate": {
"total": 359.58742291300496,
"count": 45479,
"self": 80.95873871299318,
"children": {
"TorchPolicy.sample_actions": {
"total": 278.6286842000118,
"count": 45479,
"self": 278.6286842000118
}
}
}
}
},
"workers": {
"total": 0.632449941998857,
"count": 45479,
"self": 0.0,
"children": {
"worker_root": {
"total": 1037.0784407149927,
"count": 45479,
"is_parallel": true,
"self": 498.58295759703105,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002002569000069343,
"count": 1,
"is_parallel": true,
"self": 0.000679618000958726,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001322950999110617,
"count": 10,
"is_parallel": true,
"self": 0.001322950999110617
}
}
},
"UnityEnvironment.step": {
"total": 0.031075796999630256,
"count": 1,
"is_parallel": true,
"self": 0.0003762799997275579,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037553500033027376,
"count": 1,
"is_parallel": true,
"self": 0.00037553500033027376
},
"communicator.exchange": {
"total": 0.02886006099970473,
"count": 1,
"is_parallel": true,
"self": 0.02886006099970473
},
"steps_from_proto": {
"total": 0.001463920999867696,
"count": 1,
"is_parallel": true,
"self": 0.0004166099997746642,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001047311000093032,
"count": 10,
"is_parallel": true,
"self": 0.001047311000093032
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 538.4954831179616,
"count": 45478,
"is_parallel": true,
"self": 20.548498388015105,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.07262519103142,
"count": 45478,
"is_parallel": true,
"self": 12.07262519103142
},
"communicator.exchange": {
"total": 423.9461759119622,
"count": 45478,
"is_parallel": true,
"self": 423.9461759119622
},
"steps_from_proto": {
"total": 81.92818362695289,
"count": 45478,
"is_parallel": true,
"self": 16.231376463048946,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.69680716390394,
"count": 454780,
"is_parallel": true,
"self": 65.69680716390394
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.812400018432527e-05,
"count": 1,
"self": 3.812400018432527e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1025.1477156789588,
"count": 837269,
"is_parallel": true,
"self": 22.223550913907275,
"children": {
"process_trajectory": {
"total": 583.556921280051,
"count": 837269,
"is_parallel": true,
"self": 581.5426234150514,
"children": {
"RLTrainer._checkpoint": {
"total": 2.014297864999662,
"count": 10,
"is_parallel": true,
"self": 2.014297864999662
}
}
},
"_update_policy": {
"total": 419.3672434850005,
"count": 227,
"is_parallel": true,
"self": 108.7628012249811,
"children": {
"TorchPPOOptimizer.update": {
"total": 310.60444226001937,
"count": 11574,
"is_parallel": true,
"self": 310.60444226001937
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11927900699993188,
"count": 1,
"self": 0.0008293249998132524,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11844968200011863,
"count": 1,
"self": 0.11844968200011863
}
}
}
}
}
}
}