lleticiasilvaa commited on
Commit
3b81b54
1 Parent(s): 6333ba6

Training in progress, step 1071, checkpoint

Browse files
checkpoint-1071/adapter_config.json CHANGED
@@ -24,12 +24,12 @@
24
  "revision": null,
25
  "target_modules": [
26
  "up_proj",
27
- "down_proj",
28
  "v_proj",
29
  "q_proj",
30
  "k_proj",
 
31
  "gate_proj",
32
- "o_proj"
33
  ],
34
  "task_type": null,
35
  "use_dora": false,
 
24
  "revision": null,
25
  "target_modules": [
26
  "up_proj",
 
27
  "v_proj",
28
  "q_proj",
29
  "k_proj",
30
+ "o_proj",
31
  "gate_proj",
32
+ "down_proj"
33
  ],
34
  "task_type": null,
35
  "use_dora": false,
checkpoint-1071/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab5551299479ba911ecd5411343414b7492d9c3f8906f23f91ed7c13f47f8e92
3
  size 400616360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6032c9bfd3dd3cfbd399f6df89e6745b953a0cce46e3ac8f37f69ea4dd08c949
3
  size 400616360
checkpoint-1071/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48a08f8a4cce2dcd93e566a4154fff311744329e01ebc7648885f66c8d4603ea
3
  size 203945684
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a53b37eb7ae0024a6122f993df44e0b804cc99c0d1d097f4b3cd413395d27c0a
3
  size 203945684
checkpoint-1071/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e977bc2b30023a922fb3707d1a2e138d9823618c90421fc6115a8c8b6143f91
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a70ea75b4dbc4e7ea7d24e89c83e9fa40a49068c80c4665bd494e08830072168
3
  size 14308
checkpoint-1071/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e5aa052a9cf776ef4705a882d4a844e05e8bf7d99da3c7d5257cacbf49be4a3
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a08e80ee90076e5bbfa1d9e624782bc839ca768f4eac3252d59d8cf20555f20
3
  size 1064
checkpoint-1071/trainer_state.json CHANGED
@@ -10,62 +10,62 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.2333994631812347,
13
- "grad_norm": 5.120560169219971,
14
- "learning_rate": 8.987402315498223e-05,
15
- "loss": 0.4681,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.2333994631812347,
20
- "eval_loss": 0.3557259440422058,
21
- "eval_runtime": 26.5914,
22
- "eval_samples_per_second": 3.272,
23
- "eval_steps_per_second": 3.272,
24
  "step": 250
25
  },
26
  {
27
  "epoch": 0.4667989263624694,
28
- "grad_norm": 3.340378999710083,
29
- "learning_rate": 5.828471682626175e-05,
30
- "loss": 0.3022,
31
  "step": 500
32
  },
33
  {
34
  "epoch": 0.4667989263624694,
35
- "eval_loss": 0.3018253445625305,
36
- "eval_runtime": 26.6712,
37
- "eval_samples_per_second": 3.262,
38
- "eval_steps_per_second": 3.262,
39
  "step": 500
40
  },
41
  {
42
  "epoch": 0.7001983895437041,
43
- "grad_norm": 3.158240556716919,
44
- "learning_rate": 2.2174321662025427e-05,
45
- "loss": 0.2736,
46
  "step": 750
47
  },
48
  {
49
  "epoch": 0.7001983895437041,
50
- "eval_loss": 0.2686610519886017,
51
- "eval_runtime": 26.6022,
52
- "eval_samples_per_second": 3.27,
53
- "eval_steps_per_second": 3.27,
54
  "step": 750
55
  },
56
  {
57
  "epoch": 0.9335978527249388,
58
- "grad_norm": 3.1761715412139893,
59
- "learning_rate": 1.2487983905362933e-06,
60
- "loss": 0.2525,
61
  "step": 1000
62
  },
63
  {
64
  "epoch": 0.9335978527249388,
65
- "eval_loss": 0.259181410074234,
66
- "eval_runtime": 26.6606,
67
- "eval_samples_per_second": 3.263,
68
- "eval_steps_per_second": 3.263,
69
  "step": 1000
70
  }
71
  ],
@@ -86,7 +86,7 @@
86
  "attributes": {}
87
  }
88
  },
89
- "total_flos": 2.7233286238900224e+16,
90
  "train_batch_size": 1,
91
  "trial_name": null,
92
  "trial_params": null
 
10
  "log_history": [
11
  {
12
  "epoch": 0.2333994631812347,
13
+ "grad_norm": 3.4083237648010254,
14
+ "learning_rate": 8.996514402527381e-05,
15
+ "loss": 0.2768,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.2333994631812347,
20
+ "eval_loss": 0.15054132044315338,
21
+ "eval_runtime": 39.385,
22
+ "eval_samples_per_second": 2.209,
23
+ "eval_steps_per_second": 2.209,
24
  "step": 250
25
  },
26
  {
27
  "epoch": 0.4667989263624694,
28
+ "grad_norm": 7.858141899108887,
29
+ "learning_rate": 5.843391598090062e-05,
30
+ "loss": 0.1092,
31
  "step": 500
32
  },
33
  {
34
  "epoch": 0.4667989263624694,
35
+ "eval_loss": 0.11498060077428818,
36
+ "eval_runtime": 39.2923,
37
+ "eval_samples_per_second": 2.214,
38
+ "eval_steps_per_second": 2.214,
39
  "step": 500
40
  },
41
  {
42
  "epoch": 0.7001983895437041,
43
+ "grad_norm": 2.502495765686035,
44
+ "learning_rate": 2.2300178981718183e-05,
45
+ "loss": 0.0945,
46
  "step": 750
47
  },
48
  {
49
  "epoch": 0.7001983895437041,
50
+ "eval_loss": 0.10474376380443573,
51
+ "eval_runtime": 39.2665,
52
+ "eval_samples_per_second": 2.216,
53
+ "eval_steps_per_second": 2.216,
54
  "step": 750
55
  },
56
  {
57
  "epoch": 0.9335978527249388,
58
+ "grad_norm": 2.4208877086639404,
59
+ "learning_rate": 1.2826317302122581e-06,
60
+ "loss": 0.0862,
61
  "step": 1000
62
  },
63
  {
64
  "epoch": 0.9335978527249388,
65
+ "eval_loss": 0.10126293450593948,
66
+ "eval_runtime": 39.3834,
67
+ "eval_samples_per_second": 2.209,
68
+ "eval_steps_per_second": 2.209,
69
  "step": 1000
70
  }
71
  ],
 
86
  "attributes": {}
87
  }
88
  },
89
+ "total_flos": 8.796557404984934e+16,
90
  "train_batch_size": 1,
91
  "trial_name": null,
92
  "trial_params": null