supermy commited on
Commit
0f3b465
1 Parent(s): 5c94ed8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +37 -25
README.md CHANGED
@@ -76,38 +76,50 @@ bpe分词:"vocab_size"=30000
76
  [INFO|trainer.py:1614] 2022-12-02 19:52:59,024 >> Total optimization steps = 84996
77
  [INFO|trainer.py:1616] 2022-12-02 19:52:59,025 >> Number of trainable parameters = 124439808
78
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  {'loss': 8.0431, 'learning_rate': 4.970998635229893e-05, 'epoch': 0.64}
80
  {'loss': 7.4867, 'learning_rate': 4.94158548637583e-05, 'epoch': 1.27}
81
  {'loss': 7.322, 'learning_rate': 4.912172337521766e-05, 'epoch': 1.91}
82
  ......
 
 
83
  ......
84
- ......
85
- {'loss': 3.8686, 'learning_rate': 9.035719327968376e-07, 'epoch': 106.1}
86
- {'loss': 3.8685, 'learning_rate': 6.094404442562004e-07, 'epoch': 106.73}
87
- {'loss': 3.8678, 'learning_rate': 3.1530895571556306e-07, 'epoch': 107.37}
88
-
89
- {'train_runtime': 71919.9835, 'train_samples_per_second': 14.18, 'train_steps_per_second': 1.182, 'train_loss': 4.661963973798675, 'epoch': 108.0}
90
  ***** train metrics *****
91
- epoch = 108.0
92
- train_loss = 4.662
93
- train_runtime = 19:58:39.98
94
  train_samples = 9443
95
- train_samples_per_second = 14.18
96
- train_steps_per_second = 1.182
97
- 12/03/2022 15:51:42 - INFO - __main__ - *** Evaluate ***
98
- [INFO|trainer.py:2929] 2022-12-03 15:51:42,270 >> ***** Running Evaluation *****
99
- [INFO|trainer.py:2931] 2022-12-03 15:51:42,270 >> Num examples = 283
100
- [INFO|trainer.py:2934] 2022-12-03 15:51:42,270 >> Batch size = 12
101
- 100%|██████████| 24/24 [00:07<00:00, 3.17it/s]
102
- [INFO|modelcard.py:449] 2022-12-03 15:51:52,077 >> Dropping the following result as it does not have all the necessary fields:
103
- {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}, 'metrics': [{'name': 'Accuracy', 'type': 'accuracy', 'value': 0.2100502721055507}]}
104
  ***** eval metrics *****
105
- epoch = 108.0
106
- eval_accuracy = 0.2101
107
- eval_loss = 6.889
108
- eval_runtime = 0:00:07.90
109
  eval_samples = 283
110
- eval_samples_per_second = 35.79
111
- eval_steps_per_second = 3.035
112
- perplexity = 981.4321
113
  ```
 
76
  [INFO|trainer.py:1614] 2022-12-02 19:52:59,024 >> Total optimization steps = 84996
77
  [INFO|trainer.py:1616] 2022-12-02 19:52:59,025 >> Number of trainable parameters = 124439808
78
 
79
+ [INFO|trainer.py:1608] 2022-12-03 21:44:00,182 >> ***** Running training *****
80
+ [INFO|trainer.py:1609] 2022-12-03 21:44:00,182 >> Num examples = 9443
81
+ [INFO|trainer.py:1610] 2022-12-03 21:44:00,182 >> Num Epochs = 216
82
+ [INFO|trainer.py:1611] 2022-12-03 21:44:00,182 >> Instantaneous batch size per device = 12
83
+ [INFO|trainer.py:1612] 2022-12-03 21:44:00,182 >> Total train batch size (w. parallel, distributed & accumulation) = 12
84
+ [INFO|trainer.py:1613] 2022-12-03 21:44:00,182 >> Gradient Accumulation steps = 1
85
+ [INFO|trainer.py:1614] 2022-12-03 21:44:00,182 >> Total optimization steps = 169992
86
+ [INFO|trainer.py:1616] 2022-12-03 21:44:00,183 >> Number of trainable parameters = 124439808
87
+ [INFO|trainer.py:1637] 2022-12-03 21:44:00,184 >> Continuing training from checkpoint, will skip to saved global_step
88
+ [INFO|trainer.py:1638] 2022-12-03 21:44:00,184 >> Continuing training from epoch 107
89
+ [INFO|trainer.py:1639] 2022-12-03 21:44:00,184 >> Continuing training from global step 84500
90
+
91
  {'loss': 8.0431, 'learning_rate': 4.970998635229893e-05, 'epoch': 0.64}
92
  {'loss': 7.4867, 'learning_rate': 4.94158548637583e-05, 'epoch': 1.27}
93
  {'loss': 7.322, 'learning_rate': 4.912172337521766e-05, 'epoch': 1.91}
94
  ......
95
+ {'loss': 3.901, 'learning_rate': 2.5010882865076008e-05, 'epoch': 108.01}
96
+ {'loss': 3.8959, 'learning_rate': 2.4863817120805686e-05, 'epoch': 108.64}
97
  ......
98
+ {'loss': 3.1625, 'learning_rate': 4.6090404254317857e-07, 'epoch': 214.1}
99
+ {'loss': 3.1592, 'learning_rate': 3.1413242976140055e-07, 'epoch': 214.74}
100
+ {'loss': 3.1625, 'learning_rate': 1.6706668549108195e-07, 'epoch': 215.37}
101
+ {'train_runtime': 72271.9602, 'train_samples_per_second': 28.222, 'train_steps_per_second': 2.352, 'train_loss': 1.7180436183842016, 'epoch': 216.0}
 
 
102
  ***** train metrics *****
103
+ epoch = 216.0
104
+ train_loss = 1.718
105
+ train_runtime = 20:04:31.96
106
  train_samples = 9443
107
+ train_samples_per_second = 28.222
108
+ train_steps_per_second = 2.352
109
+ 12/04/2022 17:48:35 - INFO - __main__ - *** Evaluate ***
110
+ [INFO|trainer.py:2929] 2022-12-04 17:48:35,460 >> ***** Running Evaluation *****
111
+ [INFO|trainer.py:2931] 2022-12-04 17:48:35,460 >> Num examples = 283
112
+ [INFO|trainer.py:2934] 2022-12-04 17:48:35,460 >> Batch size = 12
113
+ 100%|██████████| 24/24 [00:07<00:00, 3.20it/s]
114
+ [INFO|modelcard.py:449] 2022-12-04 17:48:45,840 >> Dropping the following result as it does not have all the necessary fields:
115
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}, 'metrics': [{'name': 'Accuracy', 'type': 'accuracy', 'value': 0.20171768789804512}]}
116
  ***** eval metrics *****
117
+ epoch = 216.0
118
+ eval_accuracy = 0.2017
119
+ eval_loss = 7.4687
120
+ eval_runtime = 0:00:07.92
121
  eval_samples = 283
122
+ eval_samples_per_second = 35.695
123
+ eval_steps_per_second = 3.027
124
+ perplexity = 1752.2686
125
  ```