fpadovani commited on
Commit
9ff5857
1 Parent(s): 206f30c

Training in progress, step 8000, checkpoint

Browse files
checkpoint-8000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b0a4e77a2f9966920d9b368c47cd86e6e721982eb4fd7a27e8fe9c940d57ab5
3
  size 14809584
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5c941ad6670e5d4b3bed275869dd3de4b284bc778a97b7d1bf3fdf8f88775dd
3
  size 14809584
checkpoint-8000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6942d2e3b7e165da14b7ffeffe0c609d45277d5a3c8c34d5b57a5bbd0eb5c55f
3
  size 29680378
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43ebd0f45b3191565dd4485744390e1c3227eaea129f99be37f1665e5841e0f1
3
  size 29680378
checkpoint-8000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2e3ebda62b905bc9803947a63d1d24d79721dc34cf3de0c4067b7577f108788
3
  size 1192
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4429ad348d70562c2d1cd7e89b3f0148fba5553942d687f80839a2dc7db2e01a
3
  size 1192
checkpoint-8000/tokenizer.json CHANGED
@@ -1,21 +1,7 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 128,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
- "padding": {
10
- "strategy": {
11
- "Fixed": 128
12
- },
13
- "direction": "Right",
14
- "pad_to_multiple_of": null,
15
- "pad_id": 3,
16
- "pad_type_id": 0,
17
- "pad_token": "[PAD]"
18
- },
19
  "added_tokens": [
20
  {
21
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
checkpoint-8000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.16548627614974976,
3
  "best_model_checkpoint": "/Users/frapadovani/Desktop/babyLM_controlled/models_trained/convergence_french/random_sentence_french/checkpoint-8000",
4
- "epoch": 0.014467278632552824,
5
  "eval_steps": 2000,
6
  "global_step": 8000,
7
  "is_hyper_param_search": false,
@@ -9,68 +9,68 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.003616819658138206,
13
- "grad_norm": 0.14744240045547485,
14
  "learning_rate": 0.0001,
15
- "loss": 0.7697,
16
  "step": 2000
17
  },
18
  {
19
- "epoch": 0.003616819658138206,
20
- "eval_loss": 0.19154727458953857,
21
- "eval_runtime": 91.812,
22
- "eval_samples_per_second": 397.933,
23
- "eval_steps_per_second": 24.877,
24
  "step": 2000
25
  },
26
  {
27
- "epoch": 0.007233639316276412,
28
- "grad_norm": 0.1623348593711853,
29
  "learning_rate": 0.0001,
30
- "loss": 0.1884,
31
  "step": 4000
32
  },
33
  {
34
- "epoch": 0.007233639316276412,
35
- "eval_loss": 0.17771275341510773,
36
- "eval_runtime": 99.0721,
37
- "eval_samples_per_second": 368.772,
38
- "eval_steps_per_second": 23.054,
39
  "step": 4000
40
  },
41
  {
42
- "epoch": 0.010850458974414617,
43
- "grad_norm": 0.13657265901565552,
44
  "learning_rate": 0.0001,
45
- "loss": 0.178,
46
  "step": 6000
47
  },
48
  {
49
- "epoch": 0.010850458974414617,
50
- "eval_loss": 0.16984443366527557,
51
- "eval_runtime": 90.3285,
52
- "eval_samples_per_second": 404.468,
53
- "eval_steps_per_second": 25.285,
54
  "step": 6000
55
  },
56
  {
57
- "epoch": 0.014467278632552824,
58
- "grad_norm": 0.15028506517410278,
59
  "learning_rate": 0.0001,
60
- "loss": 0.1714,
61
  "step": 8000
62
  },
63
  {
64
- "epoch": 0.014467278632552824,
65
- "eval_loss": 0.16548627614974976,
66
- "eval_runtime": 90.9787,
67
- "eval_samples_per_second": 401.578,
68
- "eval_steps_per_second": 25.105,
69
  "step": 8000
70
  }
71
  ],
72
  "logging_steps": 2000,
73
- "max_steps": 552972,
74
  "num_input_tokens_seen": 0,
75
  "num_train_epochs": 1,
76
  "save_steps": 2000,
 
1
  {
2
+ "best_metric": 3.6455252170562744,
3
  "best_model_checkpoint": "/Users/frapadovani/Desktop/babyLM_controlled/models_trained/convergence_french/random_sentence_french/checkpoint-8000",
4
+ "epoch": 0.23687561069493382,
5
  "eval_steps": 2000,
6
  "global_step": 8000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.059218902673733455,
13
+ "grad_norm": 1.0812722444534302,
14
  "learning_rate": 0.0001,
15
+ "loss": 4.7523,
16
  "step": 2000
17
  },
18
  {
19
+ "epoch": 0.059218902673733455,
20
+ "eval_loss": 4.0236735343933105,
21
+ "eval_runtime": 5.173,
22
+ "eval_samples_per_second": 425.477,
23
+ "eval_steps_per_second": 26.677,
24
  "step": 2000
25
  },
26
  {
27
+ "epoch": 0.11843780534746691,
28
+ "grad_norm": 1.2557106018066406,
29
  "learning_rate": 0.0001,
30
+ "loss": 3.8958,
31
  "step": 4000
32
  },
33
  {
34
+ "epoch": 0.11843780534746691,
35
+ "eval_loss": 3.8087611198425293,
36
+ "eval_runtime": 5.2666,
37
+ "eval_samples_per_second": 417.913,
38
+ "eval_steps_per_second": 26.203,
39
  "step": 4000
40
  },
41
  {
42
+ "epoch": 0.17765670802120037,
43
+ "grad_norm": 1.3326870203018188,
44
  "learning_rate": 0.0001,
45
+ "loss": 3.7344,
46
  "step": 6000
47
  },
48
  {
49
+ "epoch": 0.17765670802120037,
50
+ "eval_loss": 3.704986333847046,
51
+ "eval_runtime": 5.2277,
52
+ "eval_samples_per_second": 421.023,
53
+ "eval_steps_per_second": 26.398,
54
  "step": 6000
55
  },
56
  {
57
+ "epoch": 0.23687561069493382,
58
+ "grad_norm": 1.38993239402771,
59
  "learning_rate": 0.0001,
60
+ "loss": 3.6379,
61
  "step": 8000
62
  },
63
  {
64
+ "epoch": 0.23687561069493382,
65
+ "eval_loss": 3.6455252170562744,
66
+ "eval_runtime": 5.1485,
67
+ "eval_samples_per_second": 427.5,
68
+ "eval_steps_per_second": 26.804,
69
  "step": 8000
70
  }
71
  ],
72
  "logging_steps": 2000,
73
+ "max_steps": 33773,
74
  "num_input_tokens_seen": 0,
75
  "num_train_epochs": 1,
76
  "save_steps": 2000,
checkpoint-8000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5e294196a968439ae6368c7b1ef6eb6e2124702ff760349c5ba2ea15623271d
3
  size 5496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fce3a8508ad2aa1c0ae1cc54c6fe1747e2312953499cad9c035bc201c305894e
3
  size 5496