{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4099323749542236, "min": 1.4099323749542236, "max": 1.4292129278182983, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70465.6015625, "min": 68761.2109375, "max": 78220.96875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 106.57815845824412, "min": 79.27608346709471, "max": 388.9922480620155, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49772.0, "min": 48842.0, "max": 50180.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999685.0, "min": 49559.0, "max": 1999685.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999685.0, "min": 49559.0, "max": 1999685.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.3646159172058105, "min": 0.16908565163612366, "max": 2.5027546882629395, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1104.275634765625, "min": 21.642963409423828, "max": 1534.2764892578125, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.508184065783049, "min": 1.740510475821793, "max": 4.06889255579353, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1638.321958720684, "min": 222.7853409051895, "max": 2451.994974732399, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.508184065783049, "min": 1.740510475821793, "max": 4.06889255579353, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1638.321958720684, "min": 222.7853409051895, "max": 2451.994974732399, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.017148488525279188, "min": 0.012953496576099295, "max": 0.0188888559564172, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.05144546557583757, "min": 0.027860676694156913, "max": 0.056653323959714424, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.048627642914652824, "min": 0.020235149438182512, "max": 0.06401841708769401, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.14588292874395847, "min": 0.040470298876365024, "max": 0.17957337213059266, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.301048899683335e-06, "min": 3.301048899683335e-06, "max": 0.00029532360155879996, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.903146699050004e-06, "min": 9.903146699050004e-06, "max": 0.0008438938687020498, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10110031666666668, "min": 0.10110031666666668, "max": 0.19844119999999998, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30330095, "min": 0.20743480000000003, "max": 0.5812979500000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.490580166666671e-05, "min": 6.490580166666671e-05, "max": 0.004922215880000001, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00019471740500000016, "min": 0.00019471740500000016, "max": 0.014066767705000002, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717871660", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1717874127" }, "total": 2467.1859702919996, "count": 1, "self": 0.9024579649999396, "children": { "run_training.setup": { "total": 0.05731574700001829, "count": 1, "self": 0.05731574700001829 }, "TrainerController.start_learning": { "total": 2466.22619658, "count": 1, "self": 4.513253234962576, "children": { "TrainerController._reset_env": { "total": 3.2170997239999792, "count": 1, "self": 3.2170997239999792 }, "TrainerController.advance": { "total": 2458.3203436150375, "count": 232254, "self": 4.677470596011972, "children": { "env_step": { "total": 1947.4997554789986, "count": 232254, "self": 1610.7824727869297, "children": { "SubprocessEnvManager._take_step": { "total": 333.83700998903737, "count": 232254, "self": 16.474374835026197, "children": { "TorchPolicy.evaluate": { "total": 317.3626351540112, "count": 223062, "self": 317.3626351540112 } } }, "workers": { "total": 2.8802727030316078, "count": 232254, "self": 0.0, "children": { "worker_root": { "total": 2458.908264199999, "count": 232254, "is_parallel": true, "self": 1161.4968691510924, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009303710000381216, "count": 1, "is_parallel": true, "self": 0.00025078900006292315, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006795819999751984, "count": 2, "is_parallel": true, "self": 0.0006795819999751984 } } }, "UnityEnvironment.step": { "total": 0.031158795000010286, "count": 1, "is_parallel": true, "self": 0.0004092179999588552, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00021067900001980888, "count": 1, "is_parallel": true, "self": 0.00021067900001980888 }, "communicator.exchange": { "total": 0.02973556599999938, "count": 1, "is_parallel": true, "self": 0.02973556599999938 }, "steps_from_proto": { "total": 0.0008033320000322419, "count": 1, "is_parallel": true, "self": 0.0002114600000027167, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005918720000295252, "count": 2, "is_parallel": true, "self": 0.0005918720000295252 } } } } } } }, "UnityEnvironment.step": { "total": 1297.4113950489066, "count": 232253, "is_parallel": true, "self": 39.04324303785643, "children": { "UnityEnvironment._generate_step_input": { "total": 81.8764548500211, "count": 232253, "is_parallel": true, "self": 81.8764548500211 }, "communicator.exchange": { "total": 1085.5442322429508, "count": 232253, "is_parallel": true, "self": 1085.5442322429508 }, "steps_from_proto": { "total": 90.94746491807808, "count": 232253, "is_parallel": true, "self": 32.353186955133594, "children": { "_process_rank_one_or_two_observation": { "total": 58.59427796294449, "count": 464506, "is_parallel": true, "self": 58.59427796294449 } } } } } } } } } } }, "trainer_advance": { "total": 506.1431175400268, "count": 232254, "self": 6.820673124952975, "children": { "process_trajectory": { "total": 155.55651035407698, "count": 232254, "self": 154.12471583107668, "children": { "RLTrainer._checkpoint": { "total": 1.431794523000292, "count": 10, "self": 1.431794523000292 } } }, "_update_policy": { "total": 343.76593406099687, "count": 97, "self": 279.0288975690018, "children": { "TorchPPOOptimizer.update": { "total": 64.73703649199507, "count": 2910, "self": 64.73703649199507 } } } } } } }, "trainer_threads": { "total": 1.535999672341859e-06, "count": 1, "self": 1.535999672341859e-06 }, "TrainerController._save_models": { "total": 0.17549847000009322, "count": 1, "self": 0.002983672000027582, "children": { "RLTrainer._checkpoint": { "total": 0.17251479800006564, "count": 1, "self": 0.17251479800006564 } } } } } } }