jtatman commited on
Commit
497f3e1
1 Parent(s): 85b8956

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +46 -2
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  base_model: EleutherAI/pythia-160m-deduped
3
- library_name: peft
4
  license: apache-2.0
5
  tags:
6
  - axolotl
@@ -9,6 +9,12 @@ tags:
9
  model-index:
10
  - name: pythia-160m-storytelling
11
  results: []
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -150,4 +156,42 @@ The following hyperparameters were used during training:
150
  - Transformers 4.41.2
151
  - Pytorch 2.3.0+cu121
152
  - Datasets 2.19.1
153
- - Tokenizers 0.19.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  base_model: EleutherAI/pythia-160m-deduped
3
+ library_name: transformers
4
  license: apache-2.0
5
  tags:
6
  - axolotl
 
9
  model-index:
10
  - name: pythia-160m-storytelling
11
  results: []
12
+ datasets:
13
+ - jtatman/storywriting_combined_instruct
14
+ metrics:
15
+ - accuracy
16
+ - bleu
17
+ - rouge
18
  ---
19
 
20
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
156
  - Transformers 4.41.2
157
  - Pytorch 2.3.0+cu121
158
  - Datasets 2.19.1
159
+ - Tokenizers 0.19.1
160
+
161
+ ### Metrics
162
+
163
+ "Open LLM Leaderboard": {
164
+ "exact_match,flexible-extract": 0.022,
165
+ "exact_match_stderr,flexible-extract": 0.006566447781940106,
166
+ "acc_norm,none": 0.318,
167
+ "acc_norm_stderr,none": 0.014487919091408506,
168
+ "acc,none": 0.2664044125478186,
169
+ "acc_stderr,none": 0.003623534644130716,
170
+ "bleu_diff,none": -0.6500479549286462,
171
+ "bleu_diff_stderr,none": 0.6420841882903697,
172
+ "rougeL_diff,none": -0.7765084899781842,
173
+ "rougeL_diff_stderr,none": 1.0033586571635116,
174
+ "exact_match,strict-match": 0.006,
175
+ "exact_match_stderr,strict-match": 0.003457152557758373,
176
+ "rouge2_acc,none": 0.192,
177
+ "rouge2_acc_stderr,none": 0.017632180454360994,
178
+ "rouge1_acc,none": 0.37,
179
+ "rouge1_acc_stderr,none": 0.02161328916516578,
180
+ "bleu_acc,none": 0.436,
181
+ "bleu_acc_stderr,none": 0.0221989546414768,
182
+ "rouge1_diff,none": -1.5563905118333812,
183
+ "rouge1_diff_stderr,none": 1.022327995054994,
184
+ "rouge2_diff,none": -3.3177627227020277,
185
+ "rouge2_diff_stderr,none": 0.9477297777821475,
186
+ "bleu_max,none": 15.229235419512532,
187
+ "bleu_max_stderr,none": 0.6713582602539528,
188
+ "rouge2_max,none": 16.487324929036955,
189
+ "rouge2_max_stderr,none": 1.0171593586088354,
190
+ "rouge1_max,none": 36.3549677399668,
191
+ "rouge1_max_stderr,none": 0.9461627463383844,
192
+ "rougeL_max,none": 33.87976960164143,
193
+ "rougeL_max_stderr,none": 0.9366539036852334,
194
+ "rougeL_acc,none": 0.386,
195
+ "rougeL_acc_stderr,none": 0.021793529219281158,
196
+ "alias": "Open LLM Leaderboard"
197
+ },