aliberts HF staff commited on
Commit
1c5ebc4
·
verified ·
1 Parent(s): 5cc3bae

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. config.json +27 -3
  2. model.safetensors +2 -2
  3. train_config.json +65 -22
config.json CHANGED
@@ -1,12 +1,28 @@
1
  {
2
- "chunk_size": 100,
3
  "dim_feedforward": 3200,
4
- "dim_model": 512,
5
  "dropout": 0.1,
6
  "feedforward_activation": "relu",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "kl_weight": 10.0,
8
  "latent_dim": 32,
9
- "n_action_steps": 100,
10
  "n_decoder_layers": 1,
11
  "n_encoder_layers": 4,
12
  "n_heads": 8,
@@ -20,6 +36,14 @@
20
  "optimizer_lr": 1e-05,
21
  "optimizer_lr_backbone": 1e-05,
22
  "optimizer_weight_decay": 0.0001,
 
 
 
 
 
 
 
 
23
  "pre_norm": false,
24
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
25
  "replace_final_stride_with_dilation": false,
 
1
  {
2
+ "chunk_size": 20,
3
  "dim_feedforward": 3200,
4
+ "dim_model": 64,
5
  "dropout": 0.1,
6
  "feedforward_activation": "relu",
7
+ "input_features": {
8
+ "observation.images.top": {
9
+ "shape": [
10
+ 3,
11
+ 480,
12
+ 640
13
+ ],
14
+ "type": "VISUAL"
15
+ },
16
+ "observation.state": {
17
+ "shape": [
18
+ 14
19
+ ],
20
+ "type": "STATE"
21
+ }
22
+ },
23
  "kl_weight": 10.0,
24
  "latent_dim": 32,
25
+ "n_action_steps": 20,
26
  "n_decoder_layers": 1,
27
  "n_encoder_layers": 4,
28
  "n_heads": 8,
 
36
  "optimizer_lr": 1e-05,
37
  "optimizer_lr_backbone": 1e-05,
38
  "optimizer_weight_decay": 0.0001,
39
+ "output_features": {
40
+ "action": {
41
+ "shape": [
42
+ 14
43
+ ],
44
+ "type": "ACTION"
45
+ }
46
+ },
47
  "pre_norm": false,
48
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
49
  "replace_final_stride_with_dilation": false,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81db69111d49e02f0adec79c9217d76c9b3e1d3df11574b76c33790aea07b18c
3
- size 206766560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39628cc4c2accfc30bed32ed52c244e2a608f6d10d2122d69cf0f4f328e431b0
3
+ size 60492680
train_config.json CHANGED
@@ -1,9 +1,11 @@
1
  {
2
  "dataset": {
3
  "repo_id": "lerobot/aloha_sim_transfer_cube_human",
4
- "episodes": null,
 
 
5
  "image_transforms": {
6
- "enable": false,
7
  "max_num_transforms": 3,
8
  "random_order": false,
9
  "tfs": {
@@ -66,16 +68,37 @@
66
  "env": {
67
  "type": "aloha",
68
  "n_envs": null,
69
- "task": "AlohaTransferCube-v0",
70
  "fps": 50,
71
- "feature_types": {
72
- "agent_pos": "STATE",
73
- "pixels": {
74
- "top": "VISUAL"
 
 
 
 
 
 
 
 
75
  },
76
- "action": "ACTION"
 
 
 
 
 
 
 
77
  },
78
- "episode_length": 400,
 
 
 
 
 
 
79
  "obs_type": "pixels_agent_pos",
80
  "render_mode": "rgb_array"
81
  },
@@ -87,13 +110,37 @@
87
  "STATE": "MEAN_STD",
88
  "ACTION": "MEAN_STD"
89
  },
90
- "chunk_size": 100,
91
- "n_action_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  "vision_backbone": "resnet18",
93
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
94
  "replace_final_stride_with_dilation": false,
95
  "pre_norm": false,
96
- "dim_model": 512,
97
  "n_heads": 8,
98
  "dim_feedforward": 3200,
99
  "feedforward_activation": "relu",
@@ -112,17 +159,17 @@
112
  "output_dir": "outputs/train/act_aloha_sim_transfer_cube_human",
113
  "job_name": "aloha_act",
114
  "resume": false,
115
- "device": "mps",
116
  "use_amp": false,
117
  "seed": 1000,
118
  "num_workers": 4,
119
- "batch_size": 8,
120
- "eval_freq": 10,
121
  "log_freq": 1,
122
  "save_checkpoint": true,
123
- "save_freq": 5,
124
  "offline": {
125
- "steps": 10
126
  },
127
  "online": {
128
  "steps": 0,
@@ -147,13 +194,9 @@
147
  "weight_decay": 0.0001,
148
  "grad_clip_norm": 10.0
149
  },
150
- "scheduler": {
151
- "type": "none",
152
- "num_warmup_steps": 0
153
- },
154
  "eval": {
155
  "n_episodes": 1,
156
- "episode_length": null,
157
  "batch_size": 1,
158
  "use_async_envs": false
159
  },
 
1
  {
2
  "dataset": {
3
  "repo_id": "lerobot/aloha_sim_transfer_cube_human",
4
+ "episodes": [
5
+ 0
6
+ ],
7
  "image_transforms": {
8
+ "enable": true,
9
  "max_num_transforms": 3,
10
  "random_order": false,
11
  "tfs": {
 
68
  "env": {
69
  "type": "aloha",
70
  "n_envs": null,
71
+ "task": "AlohaInsertion-v0",
72
  "fps": 50,
73
+ "features": {
74
+ "action": {
75
+ "type": "ACTION",
76
+ "shape": [
77
+ 14
78
+ ]
79
+ },
80
+ "agent_pos": {
81
+ "type": "STATE",
82
+ "shape": [
83
+ 14
84
+ ]
85
  },
86
+ "pixels/top": {
87
+ "type": "VISUAL",
88
+ "shape": [
89
+ 480,
90
+ 640,
91
+ 3
92
+ ]
93
+ }
94
  },
95
+ "features_map": {
96
+ "action": "action",
97
+ "agent_pos": "observation.state",
98
+ "top": "observation.image.top",
99
+ "pixels/top": "observation.images.top"
100
+ },
101
+ "episode_length": 5,
102
  "obs_type": "pixels_agent_pos",
103
  "render_mode": "rgb_array"
104
  },
 
110
  "STATE": "MEAN_STD",
111
  "ACTION": "MEAN_STD"
112
  },
113
+ "input_features": {
114
+ "observation.images.top": {
115
+ "type": "VISUAL",
116
+ "shape": [
117
+ 3,
118
+ 480,
119
+ 640
120
+ ]
121
+ },
122
+ "observation.state": {
123
+ "type": "STATE",
124
+ "shape": [
125
+ 14
126
+ ]
127
+ }
128
+ },
129
+ "output_features": {
130
+ "action": {
131
+ "type": "ACTION",
132
+ "shape": [
133
+ 14
134
+ ]
135
+ }
136
+ },
137
+ "chunk_size": 20,
138
+ "n_action_steps": 20,
139
  "vision_backbone": "resnet18",
140
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
141
  "replace_final_stride_with_dilation": false,
142
  "pre_norm": false,
143
+ "dim_model": 64,
144
  "n_heads": 8,
145
  "dim_feedforward": 3200,
146
  "feedforward_activation": "relu",
 
159
  "output_dir": "outputs/train/act_aloha_sim_transfer_cube_human",
160
  "job_name": "aloha_act",
161
  "resume": false,
162
+ "device": "cpu",
163
  "use_amp": false,
164
  "seed": 1000,
165
  "num_workers": 4,
166
+ "batch_size": 2,
167
+ "eval_freq": 20000,
168
  "log_freq": 1,
169
  "save_checkpoint": true,
170
+ "save_freq": 2,
171
  "offline": {
172
+ "steps": 4
173
  },
174
  "online": {
175
  "steps": 0,
 
194
  "weight_decay": 0.0001,
195
  "grad_clip_norm": 10.0
196
  },
197
+ "scheduler": null,
 
 
 
198
  "eval": {
199
  "n_episodes": 1,
 
200
  "batch_size": 1,
201
  "use_async_envs": false
202
  },