ckandemir commited on
Commit
d4af99f
1 Parent(s): 4d9f0a1
Huggy.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:efdc6b51f893b488171565b96fe93759ac6931a42c0c944ef30c36b2766f24e4
3
- size 2271327
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:121c0eef7a9ee930652647558a17c482306762b99d9534ae106123b0de9f2602
3
+ size 2273984
Huggy/Huggy-1199914.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd1e8521811b5010810472fc3478be4b3bf993f56edcb954c9b429813ecb3bf1
3
+ size 2273984
Huggy/Huggy-1199914.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6de7a444bc5a4295e5c79f4ba18a48fcd79fbd6f5195caeb42dcbc9ce9b4da4f
3
+ size 13509473
Huggy/Huggy-1399897.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72ed037a4877470b8312fbfaf6798924d447d3d67aecd21f10f85db64a170870
3
+ size 2273984
Huggy/Huggy-1399897.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f68441094686bf50281a94bc79074cdc3615f615cacac246de418f3a65ddcd5
3
+ size 13509473
Huggy/Huggy-1599960.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:935c9e76ae2c5b340de7a17213182b96d7065d0df488ab1c7a4dd09879018718
3
+ size 2273984
Huggy/Huggy-1599960.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e3f2d8458c36ce888e744c5b899fb81cbad07f9aa6d0cb24e7a1f78ed77fefc
3
+ size 13509473
Huggy/Huggy-1799940.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acbc454eb66cbf7fbcc98c8f454ddc7a7055548f207ccd0e56db90b1ece52aa8
3
+ size 2273984
Huggy/Huggy-1799940.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c81ce70a8cdb73a40fe311a28a252f34203fa529b63f1c66444d33aa3e2ced5
3
+ size 13509473
Huggy/Huggy-199718.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1158ba45297ab0c981d5fe2647dd88053621d3a8b90835832df371d06eda3ebf
3
+ size 2273984
Huggy/Huggy-199718.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:420361e59ebff5a4dd54f0ff36dd6e7a5377885dd9b5c07def01c6ba843c4f70
3
+ size 13509388
Huggy/Huggy-1999930.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccc134acf94133f323d704a46009967bf77598baabe7c66e402380f2bc3ba7f1
3
+ size 2273984
Huggy/Huggy-1999930.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:926e6fc7c93cc1f379945dccce6c90d7cde78716df0a79ea5358e44bdb950bcb
3
+ size 13509473
Huggy/Huggy-2000044.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:121c0eef7a9ee930652647558a17c482306762b99d9534ae106123b0de9f2602
3
+ size 2273984
Huggy/Huggy-2000044.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0f7f17627fb6aa2438c88f5d3ec6bc0e4dfe7386e47029b812ab50f6f658af8
3
+ size 13509473
Huggy/Huggy-399887.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89f0e2bd72e8b5df3a874ea725083b390bab5ee100a4000c896d289a4bbd146b
3
+ size 2273984
Huggy/Huggy-399887.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1621a27277984ab4c4fcfc7dc74852b67d1baf8711757c2a9ad02fb8a2b9320
3
+ size 13509388
Huggy/Huggy-599944.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc823dfc331432af568aa650f13163d35d6beab0d83c665b8826751ddf7fffd0
3
+ size 2273984
Huggy/Huggy-599944.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30dee610e607d7a894f82f28ee13c9d85e5126e174eb9de9e4279080968cb344
3
+ size 13509388
Huggy/Huggy-799960.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3aae415e94d279f6f90cc38404c583ea0e2d7151a0ed2720c27460f12de0ed1
3
+ size 2273984
Huggy/Huggy-799960.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2436b5e2c496c9d20dfb213700854d5e0be3d887dc578fd9a15749032b92e12a
3
+ size 13509388
Huggy/Huggy-999589.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67030b4730dfbc53f4feb0b49f59eb4c7765c95cee173d1fc2bbc1f2d9d38b9f
3
+ size 2273984
Huggy/Huggy-999589.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5ebe232cd717d69bd5c4064d8411f483fa6f7a25c4e1e017631b1b5bdf55dde
3
+ size 13509388
Huggy/checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13f0f5ee994ee6c046a1409edf82ca51a9df6c248cacecc3e3c318ff7f87fe76
3
- size 13503717
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff746944dcbe434a1dd66342b1afc5fb99f42e11c00231fa16876701bf0c1833
3
+ size 13509218
Huggy/events.out.tfevents.1724096948.5b4814d5e465.3292.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b368c2c157321818fc1ad39e43cddb1ad3fe8f1f3e5c95f9aba29f4a24cb1df8
3
+ size 389153
config.json CHANGED
@@ -1 +1 @@
1
- {"default_settings": null, "behaviors": {"Huggy": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 2048, "buffer_size": 20480, "learning_rate": 0.0003, "beta": 0.005, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 3, "shared_critic": false, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "checkpoint_interval": 200000, "network_settings": {"normalize": true, "hidden_units": 512, "num_layers": 3, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.995, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}}, "init_path": null, "keep_checkpoints": 15, "even_checkpoints": false, "max_steps": 2000000, "time_horizon": 1000, "summary_freq": 50000, "threaded": false, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./trained-envs-executables/linux/Huggy/Huggy", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "Huggy", "initialize_from": null, "load_model": false, "resume": false, "force": false, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
 
1
+ {"default_settings": null, "behaviors": {"Huggy": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 2048, "buffer_size": 20480, "learning_rate": 0.0003, "beta": 0.005, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 3, "shared_critic": false, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "checkpoint_interval": 200000, "network_settings": {"normalize": true, "hidden_units": 512, "num_layers": 3, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.995, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}}, "init_path": null, "keep_checkpoints": 15, "even_checkpoints": false, "max_steps": 2000000, "time_horizon": 1000, "summary_freq": 50000, "threaded": false, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./trained-envs-executables/linux/Huggy/Huggy", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "timeout_wait": 60, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true, "no_graphics_monitor": false}, "environment_parameters": null, "checkpoint_settings": {"run_id": "Huggy", "initialize_from": null, "load_model": false, "resume": false, "force": false, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
configuration.yaml CHANGED
@@ -50,6 +50,7 @@ env_settings:
50
  base_port: 5005
51
  num_envs: 1
52
  num_areas: 1
 
53
  seed: -1
54
  max_lifetime_restarts: 10
55
  restarts_rate_limit_n: 1
@@ -62,6 +63,7 @@ engine_settings:
62
  target_frame_rate: -1
63
  capture_frame_rate: 60
64
  no_graphics: true
 
65
  environment_parameters: null
66
  checkpoint_settings:
67
  run_id: Huggy
 
50
  base_port: 5005
51
  num_envs: 1
52
  num_areas: 1
53
+ timeout_wait: 60
54
  seed: -1
55
  max_lifetime_restarts: 10
56
  restarts_rate_limit_n: 1
 
63
  target_frame_rate: -1
64
  capture_frame_rate: 60
65
  no_graphics: true
66
+ no_graphics_monitor: false
67
  environment_parameters: null
68
  checkpoint_settings:
69
  run_id: Huggy
run_logs/Player-0.log CHANGED
@@ -1,12 +1,12 @@
1
- Mono path[0] = '/content/trained-envs-executables/linux/Huggy/Huggy_Data/Managed'
2
- Mono config path = '/content/trained-envs-executables/linux/Huggy/Huggy_Data/MonoBleedingEdge/etc'
3
  Preloaded 'lib_burst_generated.so'
4
  Preloaded 'libgrpc_csharp_ext.x64.so'
5
  PlayerPrefs - Creating folder: /root/.config/unity3d/Hugging Face
6
  PlayerPrefs - Creating folder: /root/.config/unity3d/Hugging Face/Huggy
7
  Unable to load player prefs
8
  Initialize engine version: 2021.3.14f1 (eee1884e7226)
9
- [Subsystems] Discovering subsystems at path /content/trained-envs-executables/linux/Huggy/Huggy_Data/UnitySubsystems
10
  Forcing GfxDevice: Null
11
  GfxDevice: creating device client; threaded=0; jobified=0
12
  NullGfxDevice:
@@ -34,7 +34,7 @@ ALSA lib pcm.c:2664:(snd_pcm_open_noupdate) Unknown PCM default
34
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
35
  FMOD initialized on nosound output
36
  Begin MonoManager ReloadAssembly
37
- - Completed reload, in 0.127 seconds
38
  ERROR: Shader Hidden/Universal Render Pipeline/Blit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
39
  ERROR: Shader Hidden/Universal Render Pipeline/CopyDepth shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
40
  ERROR: Shader Hidden/Universal Render Pipeline/ScreenSpaceShadows shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
@@ -78,14 +78,14 @@ ERROR: Shader Universal Render Pipeline/Lit shader is not supported on this GPU
78
  WARNING: Shader Unsupported: 'Universal Render Pipeline/Lit' - All subshaders removed
79
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
80
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
81
- UnloadTime: 0.925064 ms
82
  requesting resize 84 x 84
83
  Setting up 1 worker threads for Enlighten.
84
  Memory Statistics:
85
  [ALLOC_TEMP_TLS] TLS Allocator
86
  StackAllocators :
87
  [ALLOC_TEMP_MAIN]
88
- Peak usage frame count: [4.0 KB-8.0 KB]: 26755 frames, [2.0 MB-4.0 MB]: 1 frames
89
  Initial Block Size 4.0 MB
90
  Current Block Size 4.0 MB
91
  Peak Allocated Bytes 3.6 MB
@@ -93,7 +93,7 @@ Memory Statistics:
93
  [ALLOC_TEMP_Loading.AsyncRead]
94
  Initial Block Size 64.0 KB
95
  Current Block Size 64.0 KB
96
- Peak Allocated Bytes 128 B
97
  Overflow Count 0
98
  [ALLOC_TEMP_Loading.PreloadManager]
99
  Initial Block Size 256.0 KB
@@ -201,22 +201,22 @@ Memory Statistics:
201
  Peak Allocated Bytes 0 B
202
  Overflow Count 0
203
  [ALLOC_DEFAULT] Dual Thread Allocator
204
- Peak main deferred allocation count 193
205
  [ALLOC_BUCKET]
206
  Large Block size 4.0 MB
207
  Used Block count 1
208
  Peak Allocated bytes 1.4 MB
209
  [ALLOC_DEFAULT_MAIN]
210
- Peak usage frame count: [16.0 MB-32.0 MB]: 26756 frames
211
  Requested Block Size 16.0 MB
212
  Peak Block count 1
213
- Peak Allocated memory 23.5 MB
214
  Peak Large allocation bytes 16.0 MB
215
  [ALLOC_DEFAULT_THREAD]
216
- Peak usage frame count: [2.0 MB-4.0 MB]: 26756 frames
217
  Requested Block Size 16.0 MB
218
  Peak Block count 1
219
- Peak Allocated memory 2.6 MB
220
  Peak Large allocation bytes 0 B
221
  [ALLOC_TEMP_JOB_1_FRAME]
222
  Initial Block Size 2.0 MB
@@ -245,13 +245,13 @@ Memory Statistics:
245
  Used Block count 1
246
  Peak Allocated bytes 1.4 MB
247
  [ALLOC_GFX_MAIN]
248
- Peak usage frame count: [32.0 KB-64.0 KB]: 26755 frames, [64.0 KB-128.0 KB]: 1 frames
249
  Requested Block Size 16.0 MB
250
  Peak Block count 1
251
  Peak Allocated memory 65.6 KB
252
  Peak Large allocation bytes 0 B
253
  [ALLOC_GFX_THREAD]
254
- Peak usage frame count: [64.0 KB-128.0 KB]: 26756 frames
255
  Requested Block Size 16.0 MB
256
  Peak Block count 1
257
  Peak Allocated memory 81.8 KB
@@ -263,13 +263,13 @@ Memory Statistics:
263
  Used Block count 1
264
  Peak Allocated bytes 1.4 MB
265
  [ALLOC_CACHEOBJECTS_MAIN]
266
- Peak usage frame count: [1.0 MB-2.0 MB]: 26755 frames, [16.0 MB-32.0 MB]: 1 frames
267
  Requested Block Size 4.0 MB
268
  Peak Block count 2
269
  Peak Allocated memory 30.6 MB
270
  Peak Large allocation bytes 24.9 MB
271
  [ALLOC_CACHEOBJECTS_THREAD]
272
- Peak usage frame count: [0.5 MB-1.0 MB]: 26755 frames, [2.0 MB-4.0 MB]: 1 frames
273
  Requested Block Size 4.0 MB
274
  Peak Block count 1
275
  Peak Allocated memory 2.6 MB
@@ -281,13 +281,13 @@ Memory Statistics:
281
  Used Block count 1
282
  Peak Allocated bytes 1.4 MB
283
  [ALLOC_TYPETREE_MAIN]
284
- Peak usage frame count: [0-1.0 KB]: 26756 frames
285
  Requested Block Size 2.0 MB
286
  Peak Block count 1
287
  Peak Allocated memory 1.0 KB
288
  Peak Large allocation bytes 0 B
289
  [ALLOC_TYPETREE_THREAD]
290
- Peak usage frame count: [4.0 KB-8.0 KB]: 26756 frames
291
  Requested Block Size 2.0 MB
292
  Peak Block count 1
293
  Peak Allocated memory 7.3 KB
 
1
+ Mono path[0] = '/content/ml-agents/trained-envs-executables/linux/Huggy/Huggy_Data/Managed'
2
+ Mono config path = '/content/ml-agents/trained-envs-executables/linux/Huggy/Huggy_Data/MonoBleedingEdge/etc'
3
  Preloaded 'lib_burst_generated.so'
4
  Preloaded 'libgrpc_csharp_ext.x64.so'
5
  PlayerPrefs - Creating folder: /root/.config/unity3d/Hugging Face
6
  PlayerPrefs - Creating folder: /root/.config/unity3d/Hugging Face/Huggy
7
  Unable to load player prefs
8
  Initialize engine version: 2021.3.14f1 (eee1884e7226)
9
+ [Subsystems] Discovering subsystems at path /content/ml-agents/trained-envs-executables/linux/Huggy/Huggy_Data/UnitySubsystems
10
  Forcing GfxDevice: Null
11
  GfxDevice: creating device client; threaded=0; jobified=0
12
  NullGfxDevice:
 
34
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
35
  FMOD initialized on nosound output
36
  Begin MonoManager ReloadAssembly
37
+ - Completed reload, in 0.092 seconds
38
  ERROR: Shader Hidden/Universal Render Pipeline/Blit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
39
  ERROR: Shader Hidden/Universal Render Pipeline/CopyDepth shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
40
  ERROR: Shader Hidden/Universal Render Pipeline/ScreenSpaceShadows shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
 
78
  WARNING: Shader Unsupported: 'Universal Render Pipeline/Lit' - All subshaders removed
79
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
80
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
81
+ UnloadTime: 0.875426 ms
82
  requesting resize 84 x 84
83
  Setting up 1 worker threads for Enlighten.
84
  Memory Statistics:
85
  [ALLOC_TEMP_TLS] TLS Allocator
86
  StackAllocators :
87
  [ALLOC_TEMP_MAIN]
88
+ Peak usage frame count: [4.0 KB-8.0 KB]: 26744 frames, [2.0 MB-4.0 MB]: 1 frames
89
  Initial Block Size 4.0 MB
90
  Current Block Size 4.0 MB
91
  Peak Allocated Bytes 3.6 MB
 
93
  [ALLOC_TEMP_Loading.AsyncRead]
94
  Initial Block Size 64.0 KB
95
  Current Block Size 64.0 KB
96
+ Peak Allocated Bytes 184 B
97
  Overflow Count 0
98
  [ALLOC_TEMP_Loading.PreloadManager]
99
  Initial Block Size 256.0 KB
 
201
  Peak Allocated Bytes 0 B
202
  Overflow Count 0
203
  [ALLOC_DEFAULT] Dual Thread Allocator
204
+ Peak main deferred allocation count 288
205
  [ALLOC_BUCKET]
206
  Large Block size 4.0 MB
207
  Used Block count 1
208
  Peak Allocated bytes 1.4 MB
209
  [ALLOC_DEFAULT_MAIN]
210
+ Peak usage frame count: [16.0 MB-32.0 MB]: 26745 frames
211
  Requested Block Size 16.0 MB
212
  Peak Block count 1
213
+ Peak Allocated memory 23.6 MB
214
  Peak Large allocation bytes 16.0 MB
215
  [ALLOC_DEFAULT_THREAD]
216
+ Peak usage frame count: [2.0 MB-4.0 MB]: 26745 frames
217
  Requested Block Size 16.0 MB
218
  Peak Block count 1
219
+ Peak Allocated memory 2.5 MB
220
  Peak Large allocation bytes 0 B
221
  [ALLOC_TEMP_JOB_1_FRAME]
222
  Initial Block Size 2.0 MB
 
245
  Used Block count 1
246
  Peak Allocated bytes 1.4 MB
247
  [ALLOC_GFX_MAIN]
248
+ Peak usage frame count: [32.0 KB-64.0 KB]: 26744 frames, [64.0 KB-128.0 KB]: 1 frames
249
  Requested Block Size 16.0 MB
250
  Peak Block count 1
251
  Peak Allocated memory 65.6 KB
252
  Peak Large allocation bytes 0 B
253
  [ALLOC_GFX_THREAD]
254
+ Peak usage frame count: [64.0 KB-128.0 KB]: 26745 frames
255
  Requested Block Size 16.0 MB
256
  Peak Block count 1
257
  Peak Allocated memory 81.8 KB
 
263
  Used Block count 1
264
  Peak Allocated bytes 1.4 MB
265
  [ALLOC_CACHEOBJECTS_MAIN]
266
+ Peak usage frame count: [1.0 MB-2.0 MB]: 26744 frames, [16.0 MB-32.0 MB]: 1 frames
267
  Requested Block Size 4.0 MB
268
  Peak Block count 2
269
  Peak Allocated memory 30.6 MB
270
  Peak Large allocation bytes 24.9 MB
271
  [ALLOC_CACHEOBJECTS_THREAD]
272
+ Peak usage frame count: [0.5 MB-1.0 MB]: 26744 frames, [2.0 MB-4.0 MB]: 1 frames
273
  Requested Block Size 4.0 MB
274
  Peak Block count 1
275
  Peak Allocated memory 2.6 MB
 
281
  Used Block count 1
282
  Peak Allocated bytes 1.4 MB
283
  [ALLOC_TYPETREE_MAIN]
284
+ Peak usage frame count: [0-1.0 KB]: 26745 frames
285
  Requested Block Size 2.0 MB
286
  Peak Block count 1
287
  Peak Allocated memory 1.0 KB
288
  Peak Large allocation bytes 0 B
289
  [ALLOC_TYPETREE_THREAD]
290
+ Peak usage frame count: [4.0 KB-8.0 KB]: 26745 frames
291
  Requested Block Size 2.0 MB
292
  Peak Block count 1
293
  Peak Allocated memory 7.3 KB
run_logs/timers.json CHANGED
@@ -2,135 +2,135 @@
2
  "name": "root",
3
  "gauges": {
4
  "Huggy.Policy.Entropy.mean": {
5
- "value": 1.399520754814148,
6
- "min": 1.399520754814148,
7
- "max": 1.4258242845535278,
8
  "count": 40
9
  },
10
  "Huggy.Policy.Entropy.sum": {
11
- "value": 69728.3203125,
12
- "min": 68652.3359375,
13
- "max": 76006.3359375,
14
  "count": 40
15
  },
16
  "Huggy.Environment.EpisodeLength.mean": {
17
- "value": 82.01326699834162,
18
- "min": 74.90375939849623,
19
- "max": 386.6046511627907,
20
  "count": 40
21
  },
22
  "Huggy.Environment.EpisodeLength.sum": {
23
- "value": 49454.0,
24
- "min": 48911.0,
25
- "max": 49950.0,
26
  "count": 40
27
  },
28
  "Huggy.Step.mean": {
29
- "value": 1999999.0,
30
- "min": 49648.0,
31
- "max": 1999999.0,
32
  "count": 40
33
  },
34
  "Huggy.Step.sum": {
35
- "value": 1999999.0,
36
- "min": 49648.0,
37
- "max": 1999999.0,
38
  "count": 40
39
  },
40
  "Huggy.Policy.ExtrinsicValueEstimate.mean": {
41
- "value": 2.4849090576171875,
42
- "min": 0.04706431180238724,
43
- "max": 2.4997665882110596,
44
  "count": 40
45
  },
46
  "Huggy.Policy.ExtrinsicValueEstimate.sum": {
47
- "value": 1498.400146484375,
48
- "min": 6.024231910705566,
49
- "max": 1628.8299560546875,
50
  "count": 40
51
  },
52
  "Huggy.Environment.CumulativeReward.mean": {
53
- "value": 3.843274138954346,
54
- "min": 1.7075987379066646,
55
- "max": 4.0451268718494635,
56
  "count": 40
57
  },
58
  "Huggy.Environment.CumulativeReward.sum": {
59
- "value": 2317.4943057894707,
60
- "min": 218.57263845205307,
61
- "max": 2610.9078951478004,
62
  "count": 40
63
  },
64
  "Huggy.Policy.ExtrinsicReward.mean": {
65
- "value": 3.843274138954346,
66
- "min": 1.7075987379066646,
67
- "max": 4.0451268718494635,
68
  "count": 40
69
  },
70
  "Huggy.Policy.ExtrinsicReward.sum": {
71
- "value": 2317.4943057894707,
72
- "min": 218.57263845205307,
73
- "max": 2610.9078951478004,
74
  "count": 40
75
  },
76
  "Huggy.Losses.PolicyLoss.mean": {
77
- "value": 0.016582932571570078,
78
- "min": 0.012780692598425958,
79
- "max": 0.020540416541674253,
80
  "count": 40
81
  },
82
  "Huggy.Losses.PolicyLoss.sum": {
83
- "value": 0.04974879771471023,
84
- "min": 0.025561385196851916,
85
- "max": 0.060113032577404135,
86
  "count": 40
87
  },
88
  "Huggy.Losses.ValueLoss.mean": {
89
- "value": 0.05856529598434767,
90
- "min": 0.02291902263338367,
91
- "max": 0.06584875180075567,
92
  "count": 40
93
  },
94
  "Huggy.Losses.ValueLoss.sum": {
95
- "value": 0.175695887953043,
96
- "min": 0.04583804526676734,
97
- "max": 0.19096862437824408,
98
  "count": 40
99
  },
100
  "Huggy.Policy.LearningRate.mean": {
101
- "value": 3.7665987444999953e-06,
102
- "min": 3.7665987444999953e-06,
103
- "max": 0.00029536237654587496,
104
  "count": 40
105
  },
106
  "Huggy.Policy.LearningRate.sum": {
107
- "value": 1.1299796233499986e-05,
108
- "min": 1.1299796233499986e-05,
109
- "max": 0.0008441991186003,
110
  "count": 40
111
  },
112
  "Huggy.Policy.Epsilon.mean": {
113
- "value": 0.1012555,
114
- "min": 0.1012555,
115
- "max": 0.198454125,
116
  "count": 40
117
  },
118
  "Huggy.Policy.Epsilon.sum": {
119
- "value": 0.3037665,
120
- "min": 0.20768055000000002,
121
- "max": 0.5813997,
122
  "count": 40
123
  },
124
  "Huggy.Policy.Beta.mean": {
125
- "value": 7.264944999999991e-05,
126
- "min": 7.264944999999991e-05,
127
- "max": 0.004922860837500001,
128
  "count": 40
129
  },
130
  "Huggy.Policy.Beta.sum": {
131
- "value": 0.00021794834999999972,
132
- "min": 0.00021794834999999972,
133
- "max": 0.014071845030000004,
134
  "count": 40
135
  },
136
  "Huggy.IsTraining.mean": {
@@ -148,67 +148,67 @@
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
- "start_time_seconds": "1691653849",
152
- "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
153
- "command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
154
- "mlagents_version": "0.31.0.dev0",
155
- "mlagents_envs_version": "0.31.0.dev0",
156
  "communication_protocol_version": "1.5.0",
157
- "pytorch_version": "1.11.0+cu102",
158
- "numpy_version": "1.21.2",
159
- "end_time_seconds": "1691656279"
160
  },
161
- "total": 2429.5885550139997,
162
  "count": 1,
163
- "self": 0.8204254199999923,
164
  "children": {
165
  "run_training.setup": {
166
- "total": 0.045301838000000316,
167
  "count": 1,
168
- "self": 0.045301838000000316
169
  },
170
  "TrainerController.start_learning": {
171
- "total": 2428.722827756,
172
  "count": 1,
173
- "self": 4.3954734199055565,
174
  "children": {
175
  "TrainerController._reset_env": {
176
- "total": 6.380078089999984,
177
  "count": 1,
178
- "self": 6.380078089999984
179
  },
180
  "TrainerController.advance": {
181
- "total": 2417.7480508180943,
182
- "count": 233104,
183
- "self": 4.503064929876018,
184
  "children": {
185
  "env_step": {
186
- "total": 1863.9432559561217,
187
- "count": 233104,
188
- "self": 1572.8232210782141,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
- "total": 288.27201703596086,
192
- "count": 233104,
193
- "self": 16.16587983398108,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
- "total": 272.1061372019798,
197
- "count": 222963,
198
- "self": 272.1061372019798
199
  }
200
  }
201
  },
202
  "workers": {
203
- "total": 2.848017841946671,
204
- "count": 233104,
205
  "self": 0.0,
206
  "children": {
207
  "worker_root": {
208
- "total": 2421.261487121018,
209
- "count": 233104,
210
  "is_parallel": true,
211
- "self": 1135.1816269730416,
212
  "children": {
213
  "run_training.setup": {
214
  "total": 0.0,
@@ -217,48 +217,48 @@
217
  "self": 0.0,
218
  "children": {
219
  "steps_from_proto": {
220
- "total": 0.0010205099999893719,
221
  "count": 1,
222
  "is_parallel": true,
223
- "self": 0.0003043350000098144,
224
  "children": {
225
  "_process_rank_one_or_two_observation": {
226
- "total": 0.0007161749999795575,
227
  "count": 2,
228
  "is_parallel": true,
229
- "self": 0.0007161749999795575
230
  }
231
  }
232
  },
233
  "UnityEnvironment.step": {
234
- "total": 0.030062470999951074,
235
  "count": 1,
236
  "is_parallel": true,
237
- "self": 0.00030608799988840474,
238
  "children": {
239
  "UnityEnvironment._generate_step_input": {
240
- "total": 0.00023839800007863232,
241
  "count": 1,
242
  "is_parallel": true,
243
- "self": 0.00023839800007863232
244
  },
245
  "communicator.exchange": {
246
- "total": 0.02870000699999764,
247
  "count": 1,
248
  "is_parallel": true,
249
- "self": 0.02870000699999764
250
  },
251
  "steps_from_proto": {
252
- "total": 0.0008179779999863968,
253
  "count": 1,
254
  "is_parallel": true,
255
- "self": 0.00024687400014045124,
256
  "children": {
257
  "_process_rank_one_or_two_observation": {
258
- "total": 0.0005711039998459455,
259
  "count": 2,
260
  "is_parallel": true,
261
- "self": 0.0005711039998459455
262
  }
263
  }
264
  }
@@ -267,34 +267,34 @@
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
- "total": 1286.0798601479764,
271
- "count": 233103,
272
  "is_parallel": true,
273
- "self": 39.2085500309704,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
- "total": 81.43275011199626,
277
- "count": 233103,
278
  "is_parallel": true,
279
- "self": 81.43275011199626
280
  },
281
  "communicator.exchange": {
282
- "total": 1067.8115230330177,
283
- "count": 233103,
284
  "is_parallel": true,
285
- "self": 1067.8115230330177
286
  },
287
  "steps_from_proto": {
288
- "total": 97.6270369719922,
289
- "count": 233103,
290
  "is_parallel": true,
291
- "self": 34.972608737865926,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
- "total": 62.65442823412627,
295
- "count": 466206,
296
  "is_parallel": true,
297
- "self": 62.65442823412627
298
  }
299
  }
300
  }
@@ -307,31 +307,31 @@
307
  }
308
  },
309
  "trainer_advance": {
310
- "total": 549.3017299320964,
311
- "count": 233104,
312
- "self": 6.484701505072394,
313
  "children": {
314
  "process_trajectory": {
315
- "total": 141.7644283080224,
316
- "count": 233104,
317
- "self": 140.43714554602298,
318
  "children": {
319
  "RLTrainer._checkpoint": {
320
- "total": 1.3272827619994132,
321
  "count": 10,
322
- "self": 1.3272827619994132
323
  }
324
  }
325
  },
326
  "_update_policy": {
327
- "total": 401.05260011900157,
328
  "count": 97,
329
- "self": 340.9283634850033,
330
  "children": {
331
  "TorchPPOOptimizer.update": {
332
- "total": 60.124236633998294,
333
  "count": 2910,
334
- "self": 60.124236633998294
335
  }
336
  }
337
  }
@@ -340,19 +340,19 @@
340
  }
341
  },
342
  "trainer_threads": {
343
- "total": 1.424999936716631e-06,
344
  "count": 1,
345
- "self": 1.424999936716631e-06
346
  },
347
  "TrainerController._save_models": {
348
- "total": 0.1992240029999266,
349
  "count": 1,
350
- "self": 0.0028830139999627136,
351
  "children": {
352
  "RLTrainer._checkpoint": {
353
- "total": 0.19634098899996388,
354
  "count": 1,
355
- "self": 0.19634098899996388
356
  }
357
  }
358
  }
 
2
  "name": "root",
3
  "gauges": {
4
  "Huggy.Policy.Entropy.mean": {
5
+ "value": 1.3986119031906128,
6
+ "min": 1.3986119031906128,
7
+ "max": 1.4272193908691406,
8
  "count": 40
9
  },
10
  "Huggy.Policy.Entropy.sum": {
11
+ "value": 69909.6171875,
12
+ "min": 68980.515625,
13
+ "max": 76948.8203125,
14
  "count": 40
15
  },
16
  "Huggy.Environment.EpisodeLength.mean": {
17
+ "value": 74.85887708649469,
18
+ "min": 71.59071117561683,
19
+ "max": 407.0731707317073,
20
  "count": 40
21
  },
22
  "Huggy.Environment.EpisodeLength.sum": {
23
+ "value": 49332.0,
24
+ "min": 48797.0,
25
+ "max": 50070.0,
26
  "count": 40
27
  },
28
  "Huggy.Step.mean": {
29
+ "value": 1999930.0,
30
+ "min": 49782.0,
31
+ "max": 1999930.0,
32
  "count": 40
33
  },
34
  "Huggy.Step.sum": {
35
+ "value": 1999930.0,
36
+ "min": 49782.0,
37
+ "max": 1999930.0,
38
  "count": 40
39
  },
40
  "Huggy.Policy.ExtrinsicValueEstimate.mean": {
41
+ "value": 2.4748716354370117,
42
+ "min": -0.009717012755572796,
43
+ "max": 2.5046610832214355,
44
  "count": 40
45
  },
46
  "Huggy.Policy.ExtrinsicValueEstimate.sum": {
47
+ "value": 1630.9404296875,
48
+ "min": -1.1854755878448486,
49
+ "max": 1703.673828125,
50
  "count": 40
51
  },
52
  "Huggy.Environment.CumulativeReward.mean": {
53
+ "value": 3.863484154883574,
54
+ "min": 1.8489029527199072,
55
+ "max": 4.017217723283911,
56
  "count": 40
57
  },
58
  "Huggy.Environment.CumulativeReward.sum": {
59
+ "value": 2546.0360580682755,
60
+ "min": 225.5661602318287,
61
+ "max": 2615.5263051986694,
62
  "count": 40
63
  },
64
  "Huggy.Policy.ExtrinsicReward.mean": {
65
+ "value": 3.863484154883574,
66
+ "min": 1.8489029527199072,
67
+ "max": 4.017217723283911,
68
  "count": 40
69
  },
70
  "Huggy.Policy.ExtrinsicReward.sum": {
71
+ "value": 2546.0360580682755,
72
+ "min": 225.5661602318287,
73
+ "max": 2615.5263051986694,
74
  "count": 40
75
  },
76
  "Huggy.Losses.PolicyLoss.mean": {
77
+ "value": 0.015541410738822177,
78
+ "min": 0.012846298208589561,
79
+ "max": 0.020392909957445228,
80
  "count": 40
81
  },
82
  "Huggy.Losses.PolicyLoss.sum": {
83
+ "value": 0.046624232216466534,
84
+ "min": 0.02715820744560915,
85
+ "max": 0.057562461139961364,
86
  "count": 40
87
  },
88
  "Huggy.Losses.ValueLoss.mean": {
89
+ "value": 0.05791126021908389,
90
+ "min": 0.024869270933171116,
91
+ "max": 0.06369256221999725,
92
  "count": 40
93
  },
94
  "Huggy.Losses.ValueLoss.sum": {
95
+ "value": 0.17373378065725167,
96
+ "min": 0.04973854186634223,
97
+ "max": 0.18548102130492528,
98
  "count": 40
99
  },
100
  "Huggy.Policy.LearningRate.mean": {
101
+ "value": 3.926398691233338e-06,
102
+ "min": 3.926398691233338e-06,
103
+ "max": 0.000295387726537425,
104
  "count": 40
105
  },
106
  "Huggy.Policy.LearningRate.sum": {
107
+ "value": 1.1779196073700013e-05,
108
+ "min": 1.1779196073700013e-05,
109
+ "max": 0.00084447466850845,
110
  "count": 40
111
  },
112
  "Huggy.Policy.Epsilon.mean": {
113
+ "value": 0.10130876666666665,
114
+ "min": 0.10130876666666665,
115
+ "max": 0.19846257499999997,
116
  "count": 40
117
  },
118
  "Huggy.Policy.Epsilon.sum": {
119
+ "value": 0.3039262999999999,
120
+ "min": 0.20775120000000002,
121
+ "max": 0.5814915500000001,
122
  "count": 40
123
  },
124
  "Huggy.Policy.Beta.mean": {
125
+ "value": 7.530745666666678e-05,
126
+ "min": 7.530745666666678e-05,
127
+ "max": 0.0049232824925,
128
  "count": 40
129
  },
130
  "Huggy.Policy.Beta.sum": {
131
+ "value": 0.00022592237000000032,
132
+ "min": 0.00022592237000000032,
133
+ "max": 0.014076428344999996,
134
  "count": 40
135
  },
136
  "Huggy.IsTraining.mean": {
 
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
+ "start_time_seconds": "1724096947",
152
+ "python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
153
+ "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
154
+ "mlagents_version": "1.1.0.dev0",
155
+ "mlagents_envs_version": "1.1.0.dev0",
156
  "communication_protocol_version": "1.5.0",
157
+ "pytorch_version": "2.3.1+cu121",
158
+ "numpy_version": "1.23.5",
159
+ "end_time_seconds": "1724099356"
160
  },
161
+ "total": 2408.986247542,
162
  "count": 1,
163
+ "self": 0.4427949030000491,
164
  "children": {
165
  "run_training.setup": {
166
+ "total": 0.058424556999966626,
167
  "count": 1,
168
+ "self": 0.058424556999966626
169
  },
170
  "TrainerController.start_learning": {
171
+ "total": 2408.485028082,
172
  "count": 1,
173
+ "self": 4.405687960121213,
174
  "children": {
175
  "TrainerController._reset_env": {
176
+ "total": 2.704635742999926,
177
  "count": 1,
178
+ "self": 2.704635742999926
179
  },
180
  "TrainerController.advance": {
181
+ "total": 2401.264632557879,
182
+ "count": 232843,
183
+ "self": 5.003428151972912,
184
  "children": {
185
  "env_step": {
186
+ "total": 1898.0806372418672,
187
+ "count": 232843,
188
+ "self": 1563.6740324176942,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
+ "total": 331.52968991307523,
192
+ "count": 232843,
193
+ "self": 17.26057462911956,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
+ "total": 314.26911528395567,
197
+ "count": 222868,
198
+ "self": 314.26911528395567
199
  }
200
  }
201
  },
202
  "workers": {
203
+ "total": 2.8769149110977423,
204
+ "count": 232843,
205
  "self": 0.0,
206
  "children": {
207
  "worker_root": {
208
+ "total": 2401.222506307028,
209
+ "count": 232843,
210
  "is_parallel": true,
211
+ "self": 1140.0579356769776,
212
  "children": {
213
  "run_training.setup": {
214
  "total": 0.0,
 
217
  "self": 0.0,
218
  "children": {
219
  "steps_from_proto": {
220
+ "total": 0.0008576300000413539,
221
  "count": 1,
222
  "is_parallel": true,
223
+ "self": 0.00023306299999603652,
224
  "children": {
225
  "_process_rank_one_or_two_observation": {
226
+ "total": 0.0006245670000453174,
227
  "count": 2,
228
  "is_parallel": true,
229
+ "self": 0.0006245670000453174
230
  }
231
  }
232
  },
233
  "UnityEnvironment.step": {
234
+ "total": 0.028529405999961455,
235
  "count": 1,
236
  "is_parallel": true,
237
+ "self": 0.000368550999951367,
238
  "children": {
239
  "UnityEnvironment._generate_step_input": {
240
+ "total": 0.00023145199997998134,
241
  "count": 1,
242
  "is_parallel": true,
243
+ "self": 0.00023145199997998134
244
  },
245
  "communicator.exchange": {
246
+ "total": 0.027188260999992053,
247
  "count": 1,
248
  "is_parallel": true,
249
+ "self": 0.027188260999992053
250
  },
251
  "steps_from_proto": {
252
+ "total": 0.0007411420000380531,
253
  "count": 1,
254
  "is_parallel": true,
255
+ "self": 0.00020010400010050944,
256
  "children": {
257
  "_process_rank_one_or_two_observation": {
258
+ "total": 0.0005410379999375436,
259
  "count": 2,
260
  "is_parallel": true,
261
+ "self": 0.0005410379999375436
262
  }
263
  }
264
  }
 
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
+ "total": 1261.1645706300505,
271
+ "count": 232842,
272
  "is_parallel": true,
273
+ "self": 38.45945179221826,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
+ "total": 79.20543157889131,
277
+ "count": 232842,
278
  "is_parallel": true,
279
+ "self": 79.20543157889131
280
  },
281
  "communicator.exchange": {
282
+ "total": 1054.0489979159247,
283
+ "count": 232842,
284
  "is_parallel": true,
285
+ "self": 1054.0489979159247
286
  },
287
  "steps_from_proto": {
288
+ "total": 89.45068934301605,
289
+ "count": 232842,
290
  "is_parallel": true,
291
+ "self": 31.83942659090951,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
+ "total": 57.61126275210654,
295
+ "count": 465684,
296
  "is_parallel": true,
297
+ "self": 57.61126275210654
298
  }
299
  }
300
  }
 
307
  }
308
  },
309
  "trainer_advance": {
310
+ "total": 498.18056716403885,
311
+ "count": 232843,
312
+ "self": 6.787140146057482,
313
  "children": {
314
  "process_trajectory": {
315
+ "total": 158.68227981397922,
316
+ "count": 232843,
317
+ "self": 157.42939257697913,
318
  "children": {
319
  "RLTrainer._checkpoint": {
320
+ "total": 1.2528872370000954,
321
  "count": 10,
322
+ "self": 1.2528872370000954
323
  }
324
  }
325
  },
326
  "_update_policy": {
327
+ "total": 332.71114720400215,
328
  "count": 97,
329
+ "self": 268.575997863001,
330
  "children": {
331
  "TorchPPOOptimizer.update": {
332
+ "total": 64.13514934100112,
333
  "count": 2910,
334
+ "self": 64.13514934100112
335
  }
336
  }
337
  }
 
340
  }
341
  },
342
  "trainer_threads": {
343
+ "total": 8.810002327663824e-07,
344
  "count": 1,
345
+ "self": 8.810002327663824e-07
346
  },
347
  "TrainerController._save_models": {
348
+ "total": 0.11007093999978679,
349
  "count": 1,
350
+ "self": 0.002672168999652058,
351
  "children": {
352
  "RLTrainer._checkpoint": {
353
+ "total": 0.10739877100013473,
354
  "count": 1,
355
+ "self": 0.10739877100013473
356
  }
357
  }
358
  }
run_logs/training_status.json CHANGED
@@ -2,118 +2,118 @@
2
  "Huggy": {
3
  "checkpoints": [
4
  {
5
- "steps": 199668,
6
- "file_path": "results/Huggy/Huggy/Huggy-199668.onnx",
7
- "reward": 3.6473708591963114,
8
- "creation_time": 1691654093.408428,
9
  "auxillary_file_paths": [
10
- "results/Huggy/Huggy/Huggy-199668.pt"
11
  ]
12
  },
13
  {
14
- "steps": 399992,
15
- "file_path": "results/Huggy/Huggy/Huggy-399992.onnx",
16
- "reward": 3.4205776333808897,
17
- "creation_time": 1691654334.0444267,
18
  "auxillary_file_paths": [
19
- "results/Huggy/Huggy/Huggy-399992.pt"
20
  ]
21
  },
22
  {
23
- "steps": 599887,
24
- "file_path": "results/Huggy/Huggy/Huggy-599887.onnx",
25
- "reward": 3.8949327258502735,
26
- "creation_time": 1691654576.8509789,
27
  "auxillary_file_paths": [
28
- "results/Huggy/Huggy/Huggy-599887.pt"
29
  ]
30
  },
31
  {
32
- "steps": 799956,
33
- "file_path": "results/Huggy/Huggy/Huggy-799956.onnx",
34
- "reward": 3.8836050683801826,
35
- "creation_time": 1691654814.3381343,
36
  "auxillary_file_paths": [
37
- "results/Huggy/Huggy/Huggy-799956.pt"
38
  ]
39
  },
40
  {
41
- "steps": 999960,
42
- "file_path": "results/Huggy/Huggy/Huggy-999960.onnx",
43
- "reward": 3.7619761100551425,
44
- "creation_time": 1691655058.0659974,
45
  "auxillary_file_paths": [
46
- "results/Huggy/Huggy/Huggy-999960.pt"
47
  ]
48
  },
49
  {
50
- "steps": 1199953,
51
- "file_path": "results/Huggy/Huggy/Huggy-1199953.onnx",
52
- "reward": 4.09340716963229,
53
- "creation_time": 1691655303.2689927,
54
  "auxillary_file_paths": [
55
- "results/Huggy/Huggy/Huggy-1199953.pt"
56
  ]
57
  },
58
  {
59
- "steps": 1399935,
60
- "file_path": "results/Huggy/Huggy/Huggy-1399935.onnx",
61
- "reward": 3.808826977556402,
62
- "creation_time": 1691655548.5993836,
63
  "auxillary_file_paths": [
64
- "results/Huggy/Huggy/Huggy-1399935.pt"
65
  ]
66
  },
67
  {
68
- "steps": 1599992,
69
- "file_path": "results/Huggy/Huggy/Huggy-1599992.onnx",
70
- "reward": 3.6366643091807,
71
- "creation_time": 1691655788.8909352,
72
  "auxillary_file_paths": [
73
- "results/Huggy/Huggy/Huggy-1599992.pt"
74
  ]
75
  },
76
  {
77
- "steps": 1799989,
78
- "file_path": "results/Huggy/Huggy/Huggy-1799989.onnx",
79
- "reward": 3.718426309766308,
80
- "creation_time": 1691656032.9758444,
81
  "auxillary_file_paths": [
82
- "results/Huggy/Huggy/Huggy-1799989.pt"
83
  ]
84
  },
85
  {
86
- "steps": 1999999,
87
- "file_path": "results/Huggy/Huggy/Huggy-1999999.onnx",
88
- "reward": 4.064502081700733,
89
- "creation_time": 1691656278.1556702,
90
  "auxillary_file_paths": [
91
- "results/Huggy/Huggy/Huggy-1999999.pt"
92
  ]
93
  },
94
  {
95
- "steps": 2000118,
96
- "file_path": "results/Huggy/Huggy/Huggy-2000118.onnx",
97
- "reward": 4.091174681981404,
98
- "creation_time": 1691656278.3632476,
99
  "auxillary_file_paths": [
100
- "results/Huggy/Huggy/Huggy-2000118.pt"
101
  ]
102
  }
103
  ],
104
  "final_checkpoint": {
105
- "steps": 2000118,
106
  "file_path": "results/Huggy/Huggy.onnx",
107
- "reward": 4.091174681981404,
108
- "creation_time": 1691656278.3632476,
109
  "auxillary_file_paths": [
110
- "results/Huggy/Huggy/Huggy-2000118.pt"
111
  ]
112
  }
113
  },
114
  "metadata": {
115
  "stats_format_version": "0.3.0",
116
- "mlagents_version": "0.31.0.dev0",
117
- "torch_version": "1.11.0+cu102"
118
  }
119
  }
 
2
  "Huggy": {
3
  "checkpoints": [
4
  {
5
+ "steps": 199718,
6
+ "file_path": "results/Huggy/Huggy/Huggy-199718.onnx",
7
+ "reward": 3.3635429367423058,
8
+ "creation_time": 1724097183.0497184,
9
  "auxillary_file_paths": [
10
+ "results/Huggy/Huggy/Huggy-199718.pt"
11
  ]
12
  },
13
  {
14
+ "steps": 399887,
15
+ "file_path": "results/Huggy/Huggy/Huggy-399887.onnx",
16
+ "reward": 3.530230677733987,
17
+ "creation_time": 1724097417.904084,
18
  "auxillary_file_paths": [
19
+ "results/Huggy/Huggy/Huggy-399887.pt"
20
  ]
21
  },
22
  {
23
+ "steps": 599944,
24
+ "file_path": "results/Huggy/Huggy/Huggy-599944.onnx",
25
+ "reward": 3.839178647994995,
26
+ "creation_time": 1724097656.887657,
27
  "auxillary_file_paths": [
28
+ "results/Huggy/Huggy/Huggy-599944.pt"
29
  ]
30
  },
31
  {
32
+ "steps": 799960,
33
+ "file_path": "results/Huggy/Huggy/Huggy-799960.onnx",
34
+ "reward": 3.6240010769529776,
35
+ "creation_time": 1724097893.412152,
36
  "auxillary_file_paths": [
37
+ "results/Huggy/Huggy/Huggy-799960.pt"
38
  ]
39
  },
40
  {
41
+ "steps": 999589,
42
+ "file_path": "results/Huggy/Huggy/Huggy-999589.onnx",
43
+ "reward": 3.9100323442150566,
44
+ "creation_time": 1724098136.3660781,
45
  "auxillary_file_paths": [
46
+ "results/Huggy/Huggy/Huggy-999589.pt"
47
  ]
48
  },
49
  {
50
+ "steps": 1199914,
51
+ "file_path": "results/Huggy/Huggy/Huggy-1199914.onnx",
52
+ "reward": 3.88228909755021,
53
+ "creation_time": 1724098383.616469,
54
  "auxillary_file_paths": [
55
+ "results/Huggy/Huggy/Huggy-1199914.pt"
56
  ]
57
  },
58
  {
59
+ "steps": 1399897,
60
+ "file_path": "results/Huggy/Huggy/Huggy-1399897.onnx",
61
+ "reward": 4.881224036216736,
62
+ "creation_time": 1724098627.2696922,
63
  "auxillary_file_paths": [
64
+ "results/Huggy/Huggy/Huggy-1399897.pt"
65
  ]
66
  },
67
  {
68
+ "steps": 1599960,
69
+ "file_path": "results/Huggy/Huggy/Huggy-1599960.onnx",
70
+ "reward": 3.9023103459314865,
71
+ "creation_time": 1724098867.4479458,
72
  "auxillary_file_paths": [
73
+ "results/Huggy/Huggy/Huggy-1599960.pt"
74
  ]
75
  },
76
  {
77
+ "steps": 1799940,
78
+ "file_path": "results/Huggy/Huggy/Huggy-1799940.onnx",
79
+ "reward": 4.0003840247510185,
80
+ "creation_time": 1724099110.7495985,
81
  "auxillary_file_paths": [
82
+ "results/Huggy/Huggy/Huggy-1799940.pt"
83
  ]
84
  },
85
  {
86
+ "steps": 1999930,
87
+ "file_path": "results/Huggy/Huggy/Huggy-1999930.onnx",
88
+ "reward": 3.8655002066067286,
89
+ "creation_time": 1724099355.771384,
90
  "auxillary_file_paths": [
91
+ "results/Huggy/Huggy/Huggy-1999930.pt"
92
  ]
93
  },
94
  {
95
+ "steps": 2000044,
96
+ "file_path": "results/Huggy/Huggy/Huggy-2000044.onnx",
97
+ "reward": 3.89092278984231,
98
+ "creation_time": 1724099355.886748,
99
  "auxillary_file_paths": [
100
+ "results/Huggy/Huggy/Huggy-2000044.pt"
101
  ]
102
  }
103
  ],
104
  "final_checkpoint": {
105
+ "steps": 2000044,
106
  "file_path": "results/Huggy/Huggy.onnx",
107
+ "reward": 3.89092278984231,
108
+ "creation_time": 1724099355.886748,
109
  "auxillary_file_paths": [
110
+ "results/Huggy/Huggy/Huggy-2000044.pt"
111
  ]
112
  }
113
  },
114
  "metadata": {
115
  "stats_format_version": "0.3.0",
116
+ "mlagents_version": "1.1.0.dev0",
117
+ "torch_version": "2.3.1+cu121"
118
  }
119
  }