k4black commited on
Commit
2dc0150
1 Parent(s): 8b7dc52

Training in progress, step 2800

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e47913498cc62e3d622e030fa2381e7512d18d5065ac931177b2dc5decc8f98
3
  size 2843230968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb078f66955011ea6afcb149ea16849afed6783f2a0810d9c8a62cd9a59686c5
3
  size 2843230968
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ad67941a343ddab4739bbed3aafcbc93cf909ef47fa983f60de65a1f5efea2d
3
  size 1421591285
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:202a9040c68109cbb48b8fe2017897cfb1a7a3f10ba5d835b9cae6758ab0fd4f
3
  size 1421591285
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8df2fbb5eae4ba8d93df3cabc2751ec21112d1099f8e59f83bee856092c76626
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:778f07573f600fa48cbe5b11ab076696f67b51b6a5db2be2df0befcfb3b87a3b
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b5c0c9d70ab7582ef8cee0fb535ed73768bfc895b20793c16cf28f34c9dff83
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2beb2962f0b6cdb6d771bb320fa65eb0c0fa5a9c4fbb9b329b61e29cdbef62fe
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 0.7974911320075254,
3
- "best_model_checkpoint": "/home3/s5431786/nlp-final-project/results/roberta-large-e-snli-classification-nli_explanation-base-b16/checkpoint-400",
4
- "epoch": 0.011649580615097856,
5
- "global_step": 400,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -22,11 +22,107 @@
22
  "eval_samples_per_second": 764.91,
23
  "eval_steps_per_second": 47.875,
24
  "step": 400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  }
26
  ],
27
  "max_steps": 103008,
28
  "num_train_epochs": 3,
29
- "total_flos": 792493810750848.0,
30
  "trial_name": null,
31
  "trial_params": null
32
  }
 
1
  {
2
+ "best_metric": 0.983181371856266,
3
+ "best_model_checkpoint": "/home3/s5431786/nlp-final-project/results/roberta-large-e-snli-classification-nli_explanation-base-b16/checkpoint-2800",
4
+ "epoch": 0.081547064305685,
5
+ "global_step": 2800,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
22
  "eval_samples_per_second": 764.91,
23
  "eval_steps_per_second": 47.875,
24
  "step": 400
25
+ },
26
+ {
27
+ "epoch": 0.02,
28
+ "learning_rate": 1.5530964861192002e-06,
29
+ "loss": 0.3233,
30
+ "step": 800
31
+ },
32
+ {
33
+ "epoch": 0.02,
34
+ "eval_accuracy": 0.9642349116033326,
35
+ "eval_f1": 0.9642718853707861,
36
+ "eval_loss": 0.14329373836517334,
37
+ "eval_runtime": 12.933,
38
+ "eval_samples_per_second": 760.998,
39
+ "eval_steps_per_second": 47.63,
40
+ "step": 800
41
+ },
42
+ {
43
+ "epoch": 0.03,
44
+ "learning_rate": 2.3296447291788007e-06,
45
+ "loss": 0.173,
46
+ "step": 1200
47
+ },
48
+ {
49
+ "epoch": 0.03,
50
+ "eval_accuracy": 0.9762243446453973,
51
+ "eval_f1": 0.9761597449883214,
52
+ "eval_loss": 0.1054357960820198,
53
+ "eval_runtime": 12.9969,
54
+ "eval_samples_per_second": 757.258,
55
+ "eval_steps_per_second": 47.396,
56
+ "step": 1200
57
+ },
58
+ {
59
+ "epoch": 0.05,
60
+ "learning_rate": 3.1061929722384003e-06,
61
+ "loss": 0.1452,
62
+ "step": 1600
63
+ },
64
+ {
65
+ "epoch": 0.05,
66
+ "eval_accuracy": 0.9799837431416378,
67
+ "eval_f1": 0.9799316333368723,
68
+ "eval_loss": 0.09648650884628296,
69
+ "eval_runtime": 13.0065,
70
+ "eval_samples_per_second": 756.7,
71
+ "eval_steps_per_second": 47.361,
72
+ "step": 1600
73
+ },
74
+ {
75
+ "epoch": 0.06,
76
+ "learning_rate": 3.882741215298001e-06,
77
+ "loss": 0.1449,
78
+ "step": 2000
79
+ },
80
+ {
81
+ "epoch": 0.06,
82
+ "eval_accuracy": 0.9813046128835603,
83
+ "eval_f1": 0.9812341372941731,
84
+ "eval_loss": 0.09331633150577545,
85
+ "eval_runtime": 12.921,
86
+ "eval_samples_per_second": 761.706,
87
+ "eval_steps_per_second": 47.674,
88
+ "step": 2000
89
+ },
90
+ {
91
+ "epoch": 0.07,
92
+ "learning_rate": 4.659289458357601e-06,
93
+ "loss": 0.1303,
94
+ "step": 2400
95
+ },
96
+ {
97
+ "epoch": 0.07,
98
+ "eval_accuracy": 0.9815078236130867,
99
+ "eval_f1": 0.9814520761782233,
100
+ "eval_loss": 0.10890379548072815,
101
+ "eval_runtime": 12.8683,
102
+ "eval_samples_per_second": 764.824,
103
+ "eval_steps_per_second": 47.87,
104
+ "step": 2400
105
+ },
106
+ {
107
+ "epoch": 0.08,
108
+ "learning_rate": 5.435837701417202e-06,
109
+ "loss": 0.1372,
110
+ "step": 2800
111
+ },
112
+ {
113
+ "epoch": 0.08,
114
+ "eval_accuracy": 0.9832351148140622,
115
+ "eval_f1": 0.983181371856266,
116
+ "eval_loss": 0.07784133404493332,
117
+ "eval_runtime": 12.8322,
118
+ "eval_samples_per_second": 766.977,
119
+ "eval_steps_per_second": 48.004,
120
+ "step": 2800
121
  }
122
  ],
123
  "max_steps": 103008,
124
  "num_train_epochs": 3,
125
+ "total_flos": 5492559907900896.0,
126
  "trial_name": null,
127
  "trial_params": null
128
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ad67941a343ddab4739bbed3aafcbc93cf909ef47fa983f60de65a1f5efea2d
3
  size 1421591285
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:202a9040c68109cbb48b8fe2017897cfb1a7a3f10ba5d835b9cae6758ab0fd4f
3
  size 1421591285