Upload folder using huggingface_hub
Browse files- all_results.json +34 -0
- config.json +130 -0
- eval-predictions.csv +117 -0
- eval_instances_normalized_attn_scores.pt +3 -0
- eval_results.json +12 -0
- performance_results.json +4 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- spiece.model +3 -0
- test-predictions-1723595815.csv +0 -0
- test-predictions-1723597053.csv +0 -0
- test-predictions-1723597416.csv +0 -0
- test-predictions-1723597418.csv +117 -0
- test_instances_normalized_attn_scores.pt +3 -0
- test_results.json +12 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- train_results.json +14 -0
- trainer_state.json +0 -0
- training_args.bin +3 -0
all_results.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 200.0,
|
3 |
+
"eval_accuracy": 78.44827586206897,
|
4 |
+
"eval_average_metrics": 79.95936897716001,
|
5 |
+
"eval_classification_report": "{\"0\": {\"precision\": 0.8333333333333334, \"recall\": 0.6756756756756757, \"f1-score\": 0.746268656716418, \"support\": 37.0}, \"1\": {\"precision\": 0.65, \"recall\": 0.8387096774193549, \"f1-score\": 0.7323943661971831, \"support\": 31.0}, \"2\": {\"precision\": 0.9090909090909091, \"recall\": 0.8333333333333334, \"f1-score\": 0.8695652173913043, \"support\": 12.0}, \"3\": {\"precision\": 1.0, \"recall\": 0.875, \"f1-score\": 0.9333333333333333, \"support\": 8.0}, \"4\": {\"precision\": 0.7, \"recall\": 0.7777777777777778, \"f1-score\": 0.7368421052631577, \"support\": 9.0}, \"5\": {\"precision\": 1.0, \"recall\": 0.8888888888888888, \"f1-score\": 0.9411764705882353, \"support\": 9.0}, \"6\": {\"precision\": 0.7777777777777778, \"recall\": 0.7777777777777778, \"f1-score\": 0.7777777777777778, \"support\": 9.0}, \"7\": {\"precision\": 1.0, \"recall\": 1.0, \"f1-score\": 1.0, \"support\": 1.0}, \"micro avg\": {\"precision\": 0.7844827586206896, \"recall\": 0.7844827586206896, \"f1-score\": 0.7844827586206896, \"support\": 116.0}, \"macro avg\": {\"precision\": 0.8587752525252526, \"recall\": 0.833395391359101, \"f1-score\": 0.8421697409084262, \"support\": 116.0}, \"weighted avg\": {\"precision\": 0.8033829676071055, \"recall\": 0.7844827586206896, \"f1-score\": 0.7872395009365947, \"support\": 116.0}, \"samples avg\": {\"precision\": 0.7844827586206896, \"recall\": 0.7844827586206896, \"f1-score\": 0.7844827586206896, \"support\": 116.0}}",
|
6 |
+
"eval_f1_macro": 84.21697409084263,
|
7 |
+
"eval_f1_micro": 78.44827586206897,
|
8 |
+
"eval_f1_weighted": 78.72395009365947,
|
9 |
+
"eval_loss": 0.6754371523857117,
|
10 |
+
"eval_runtime": 1.5884,
|
11 |
+
"eval_samples_per_second": 73.029,
|
12 |
+
"init_mem_cpu_alloc_delta": -89677824,
|
13 |
+
"init_mem_cpu_peaked_delta": 89681920,
|
14 |
+
"init_mem_gpu_alloc_delta": 891528192,
|
15 |
+
"init_mem_gpu_peaked_delta": 0,
|
16 |
+
"peak_memory": 5.395798828125,
|
17 |
+
"test_accuracy": 78.44827586206897,
|
18 |
+
"test_average_metrics": 79.95936897716001,
|
19 |
+
"test_classification_report": "{\"0\": {\"precision\": 0.8333333333333334, \"recall\": 0.6756756756756757, \"f1-score\": 0.746268656716418, \"support\": 37.0}, \"1\": {\"precision\": 0.65, \"recall\": 0.8387096774193549, \"f1-score\": 0.7323943661971831, \"support\": 31.0}, \"2\": {\"precision\": 0.9090909090909091, \"recall\": 0.8333333333333334, \"f1-score\": 0.8695652173913043, \"support\": 12.0}, \"3\": {\"precision\": 1.0, \"recall\": 0.875, \"f1-score\": 0.9333333333333333, \"support\": 8.0}, \"4\": {\"precision\": 0.7, \"recall\": 0.7777777777777778, \"f1-score\": 0.7368421052631577, \"support\": 9.0}, \"5\": {\"precision\": 1.0, \"recall\": 0.8888888888888888, \"f1-score\": 0.9411764705882353, \"support\": 9.0}, \"6\": {\"precision\": 0.7777777777777778, \"recall\": 0.7777777777777778, \"f1-score\": 0.7777777777777778, \"support\": 9.0}, \"7\": {\"precision\": 1.0, \"recall\": 1.0, \"f1-score\": 1.0, \"support\": 1.0}, \"micro avg\": {\"precision\": 0.7844827586206896, \"recall\": 0.7844827586206896, \"f1-score\": 0.7844827586206896, \"support\": 116.0}, \"macro avg\": {\"precision\": 0.8587752525252526, \"recall\": 0.833395391359101, \"f1-score\": 0.8421697409084262, \"support\": 116.0}, \"weighted avg\": {\"precision\": 0.8033829676071055, \"recall\": 0.7844827586206896, \"f1-score\": 0.7872395009365947, \"support\": 116.0}, \"samples avg\": {\"precision\": 0.7844827586206896, \"recall\": 0.7844827586206896, \"f1-score\": 0.7844827586206896, \"support\": 116.0}}",
|
20 |
+
"test_f1_macro": 84.21697409084263,
|
21 |
+
"test_f1_micro": 78.44827586206897,
|
22 |
+
"test_f1_weighted": 78.72395009365947,
|
23 |
+
"test_loss": 0.6754371523857117,
|
24 |
+
"test_runtime": 1.6141,
|
25 |
+
"test_samples_per_second": 71.868,
|
26 |
+
"total_time in minutes ": 47.965,
|
27 |
+
"train_mem_cpu_alloc_delta": 29777920,
|
28 |
+
"train_mem_cpu_peaked_delta": 310181888,
|
29 |
+
"train_mem_gpu_alloc_delta": 40215552,
|
30 |
+
"train_mem_gpu_peaked_delta": 4718911488,
|
31 |
+
"train_runtime": 2875.9944,
|
32 |
+
"train_samples": 488,
|
33 |
+
"train_samples_per_second": 1.113
|
34 |
+
}
|
config.json
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "t5-base",
|
3 |
+
"architectures": [
|
4 |
+
"T5ForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"attn_method": "linear",
|
7 |
+
"attn_prefix_tuning": false,
|
8 |
+
"class_weights": null,
|
9 |
+
"d_ff": 3072,
|
10 |
+
"d_kv": 64,
|
11 |
+
"d_model": 768,
|
12 |
+
"decoder_start_token_id": 0,
|
13 |
+
"dropout_rate": 0.1,
|
14 |
+
"eos_token_id": 1,
|
15 |
+
"feed_forward_proj": "relu",
|
16 |
+
"fix_attention": false,
|
17 |
+
"gradient_checkpointing": false,
|
18 |
+
"id2label": {
|
19 |
+
"0": "LABEL_0",
|
20 |
+
"1": "LABEL_1",
|
21 |
+
"2": "LABEL_2",
|
22 |
+
"3": "LABEL_3",
|
23 |
+
"4": "LABEL_4",
|
24 |
+
"5": "LABEL_5",
|
25 |
+
"6": "LABEL_6",
|
26 |
+
"7": "LABEL_7"
|
27 |
+
},
|
28 |
+
"ignore_target": false,
|
29 |
+
"init_prefix_method": "random",
|
30 |
+
"initializer_factor": 1.0,
|
31 |
+
"is_contrastive": false,
|
32 |
+
"is_encoder_decoder": true,
|
33 |
+
"label2id": {
|
34 |
+
"LABEL_0": 0,
|
35 |
+
"LABEL_1": 1,
|
36 |
+
"LABEL_2": 2,
|
37 |
+
"LABEL_3": 3,
|
38 |
+
"LABEL_4": 4,
|
39 |
+
"LABEL_5": 5,
|
40 |
+
"LABEL_6": 6,
|
41 |
+
"LABEL_7": 7
|
42 |
+
},
|
43 |
+
"label_based": false,
|
44 |
+
"label_token_ids": [
|
45 |
+
[
|
46 |
+
16,
|
47 |
+
10454,
|
48 |
+
2493,
|
49 |
+
1
|
50 |
+
],
|
51 |
+
[
|
52 |
+
1921,
|
53 |
+
1
|
54 |
+
],
|
55 |
+
[
|
56 |
+
1690,
|
57 |
+
1
|
58 |
+
],
|
59 |
+
[
|
60 |
+
21530,
|
61 |
+
1
|
62 |
+
],
|
63 |
+
[
|
64 |
+
11746,
|
65 |
+
1
|
66 |
+
],
|
67 |
+
[
|
68 |
+
11122,
|
69 |
+
1
|
70 |
+
],
|
71 |
+
[
|
72 |
+
822,
|
73 |
+
1
|
74 |
+
],
|
75 |
+
[
|
76 |
+
30430,
|
77 |
+
1
|
78 |
+
]
|
79 |
+
],
|
80 |
+
"layer_norm_epsilon": 1e-06,
|
81 |
+
"learned_temperature": false,
|
82 |
+
"max_length": 12,
|
83 |
+
"model_type": "t5",
|
84 |
+
"n_positions": 512,
|
85 |
+
"normalize_prefixes": false,
|
86 |
+
"num_decoder_layers": 12,
|
87 |
+
"num_heads": 12,
|
88 |
+
"num_layers": 12,
|
89 |
+
"num_target": 1,
|
90 |
+
"output_past": true,
|
91 |
+
"pad_token_id": 0,
|
92 |
+
"prefix_num": 1,
|
93 |
+
"prefix_tuning": false,
|
94 |
+
"relative_attention_num_buckets": 32,
|
95 |
+
"shared_attn": false,
|
96 |
+
"task_specific_params": {
|
97 |
+
"summarization": {
|
98 |
+
"early_stopping": true,
|
99 |
+
"length_penalty": 2.0,
|
100 |
+
"max_length": 200,
|
101 |
+
"min_length": 30,
|
102 |
+
"no_repeat_ngram_size": 3,
|
103 |
+
"num_beams": 4,
|
104 |
+
"prefix": "summarize: "
|
105 |
+
},
|
106 |
+
"translation_en_to_de": {
|
107 |
+
"early_stopping": true,
|
108 |
+
"max_length": 300,
|
109 |
+
"num_beams": 4,
|
110 |
+
"prefix": "translate English to German: "
|
111 |
+
},
|
112 |
+
"translation_en_to_fr": {
|
113 |
+
"early_stopping": true,
|
114 |
+
"max_length": 300,
|
115 |
+
"num_beams": 4,
|
116 |
+
"prefix": "translate English to French: "
|
117 |
+
},
|
118 |
+
"translation_en_to_ro": {
|
119 |
+
"early_stopping": true,
|
120 |
+
"max_length": 300,
|
121 |
+
"num_beams": 4,
|
122 |
+
"prefix": "translate English to Romanian: "
|
123 |
+
}
|
124 |
+
},
|
125 |
+
"temperature": 2000,
|
126 |
+
"train_task_adapters": true,
|
127 |
+
"transformers_version": "4.6.0",
|
128 |
+
"use_cache": true,
|
129 |
+
"vocab_size": 32100
|
130 |
+
}
|
eval-predictions.csv
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
id,text
|
2 |
+
2,negative
|
3 |
+
5,Tammy
|
4 |
+
9,False
|
5 |
+
16,negative
|
6 |
+
21,0:59 it gives me relif?
|
7 |
+
24,False
|
8 |
+
45,not_equivalent
|
9 |
+
47,False
|
10 |
+
57,Ddx: Ddx: Ddx
|
11 |
+
59,False
|
12 |
+
62,False
|
13 |
+
64,: please show us the evidence I asked for:
|
14 |
+
72,semen
|
15 |
+
91,True
|
16 |
+
95,False
|
17 |
+
100,False
|
18 |
+
106,"""Reality"""
|
19 |
+
108,False
|
20 |
+
109,entailment
|
21 |
+
115,False
|
22 |
+
124,True
|
23 |
+
129,a boater took a boat class: apparently
|
24 |
+
130,positive
|
25 |
+
139,False
|
26 |
+
144,1st Comment
|
27 |
+
152,False
|
28 |
+
159,canada is under some of the strictest guidelines lol
|
29 |
+
160,True
|
30 |
+
174,True
|
31 |
+
178,False
|
32 |
+
186,negative
|
33 |
+
189,a friend of mine had a collapsed lung
|
34 |
+
193,ederly and obese
|
35 |
+
194,True
|
36 |
+
199,True
|
37 |
+
209,: good initiative: good initiative: good initiative:
|
38 |
+
213,what exactly did they expect.
|
39 |
+
214,False
|
40 |
+
216,True
|
41 |
+
226,False
|
42 |
+
230,No they haven't.
|
43 |
+
246,False
|
44 |
+
255,"""How Dare You!"""
|
45 |
+
257,negative
|
46 |
+
258,True
|
47 |
+
259,@hara gopal
|
48 |
+
263,classification: PPC 2019 Mad Max.
|
49 |
+
268,positive
|
50 |
+
270,negative
|
51 |
+
287,positive
|
52 |
+
288,False
|
53 |
+
289,O U C H??
|
54 |
+
305,True
|
55 |
+
313,instagram account.
|
56 |
+
326,
|
57 |
+
327,False
|
58 |
+
329,
|
59 |
+
331,False
|
60 |
+
333,True
|
61 |
+
336,classification: Climate change is a hoax!
|
62 |
+
342,Sarah
|
63 |
+
343,False
|
64 |
+
348,positive
|
65 |
+
358,positive
|
66 |
+
360,dalal
|
67 |
+
379,: Lots of respect sir????????????
|
68 |
+
404,No Friday Im booked all day
|
69 |
+
415,@Emerson Rocky
|
70 |
+
436,False
|
71 |
+
444,True
|
72 |
+
448,True
|
73 |
+
449,True
|
74 |
+
462,not_equivalent
|
75 |
+
466,False
|
76 |
+
479,????
|
77 |
+
482,True
|
78 |
+
494,True
|
79 |
+
498,False
|
80 |
+
501,True
|
81 |
+
512,negative
|
82 |
+
523,negative
|
83 |
+
530,True
|
84 |
+
548,"Australians all let us rejoice, we've"
|
85 |
+
549,classification: very goiot.
|
86 |
+
550,Can use passport instead of classification: Can use passport instead
|
87 |
+
554,False
|
88 |
+
560,a tyrant...
|
89 |
+
565,@Shivendra Diwakar my father was
|
90 |
+
567,False
|
91 |
+
572,Bruh they treat us like kids
|
92 |
+
587,False
|
93 |
+
589,False
|
94 |
+
592,.
|
95 |
+
600,116.Priyanshu Kumar
|
96 |
+
611,not_equivalent
|
97 |
+
612,classification: underlying illness
|
98 |
+
614,False
|
99 |
+
616,False
|
100 |
+
617,False
|
101 |
+
619,False
|
102 |
+
623,:
|
103 |
+
627,False
|
104 |
+
632,True
|
105 |
+
633,weather
|
106 |
+
639,False
|
107 |
+
641,"""""media sources"""
|
108 |
+
643,.
|
109 |
+
645,False
|
110 |
+
651,@Anders Santana it worked and I finally
|
111 |
+
652,SCP-049
|
112 |
+
653,False
|
113 |
+
655,@The Pioneering Pixel
|
114 |
+
671,Shawn
|
115 |
+
673,-
|
116 |
+
677,KGB low paid worker
|
117 |
+
682,False
|
eval_instances_normalized_attn_scores.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:50764592c09d3053c8ca12d3bd7108d14b2d3b3e93191cb4e40cfdac94418f81
|
3 |
+
size 1048
|
eval_results.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 200.0,
|
3 |
+
"eval_accuracy": 78.44827586206897,
|
4 |
+
"eval_average_metrics": 79.95936897716001,
|
5 |
+
"eval_classification_report": "{\"0\": {\"precision\": 0.8333333333333334, \"recall\": 0.6756756756756757, \"f1-score\": 0.746268656716418, \"support\": 37.0}, \"1\": {\"precision\": 0.65, \"recall\": 0.8387096774193549, \"f1-score\": 0.7323943661971831, \"support\": 31.0}, \"2\": {\"precision\": 0.9090909090909091, \"recall\": 0.8333333333333334, \"f1-score\": 0.8695652173913043, \"support\": 12.0}, \"3\": {\"precision\": 1.0, \"recall\": 0.875, \"f1-score\": 0.9333333333333333, \"support\": 8.0}, \"4\": {\"precision\": 0.7, \"recall\": 0.7777777777777778, \"f1-score\": 0.7368421052631577, \"support\": 9.0}, \"5\": {\"precision\": 1.0, \"recall\": 0.8888888888888888, \"f1-score\": 0.9411764705882353, \"support\": 9.0}, \"6\": {\"precision\": 0.7777777777777778, \"recall\": 0.7777777777777778, \"f1-score\": 0.7777777777777778, \"support\": 9.0}, \"7\": {\"precision\": 1.0, \"recall\": 1.0, \"f1-score\": 1.0, \"support\": 1.0}, \"micro avg\": {\"precision\": 0.7844827586206896, \"recall\": 0.7844827586206896, \"f1-score\": 0.7844827586206896, \"support\": 116.0}, \"macro avg\": {\"precision\": 0.8587752525252526, \"recall\": 0.833395391359101, \"f1-score\": 0.8421697409084262, \"support\": 116.0}, \"weighted avg\": {\"precision\": 0.8033829676071055, \"recall\": 0.7844827586206896, \"f1-score\": 0.7872395009365947, \"support\": 116.0}, \"samples avg\": {\"precision\": 0.7844827586206896, \"recall\": 0.7844827586206896, \"f1-score\": 0.7844827586206896, \"support\": 116.0}}",
|
6 |
+
"eval_f1_macro": 84.21697409084263,
|
7 |
+
"eval_f1_micro": 78.44827586206897,
|
8 |
+
"eval_f1_weighted": 78.72395009365947,
|
9 |
+
"eval_loss": 0.6754371523857117,
|
10 |
+
"eval_runtime": 1.5884,
|
11 |
+
"eval_samples_per_second": 73.029
|
12 |
+
}
|
performance_results.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"peak_memory": 5.395798828125,
|
3 |
+
"total_time in minutes ": 47.965
|
4 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea68f4d9e96b554aca884b5f7a4a93f4ed12a50fabdee4d9c30af049aca6082e
|
3 |
+
size 898974606
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
|
spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
|
3 |
+
size 791656
|
test-predictions-1723595815.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
test-predictions-1723597053.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
test-predictions-1723597416.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
test-predictions-1723597418.csv
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
,id,text
|
2 |
+
40,2,"challenge, question"
|
3 |
+
31,5,informing statement
|
4 |
+
91,9,informing statement
|
5 |
+
46,16,informing statement
|
6 |
+
48,21,informing statement
|
7 |
+
1,24,"acceptance, informing statement"
|
8 |
+
8,45,question
|
9 |
+
36,47,"challenge, informing statement"
|
10 |
+
104,57,informing statement
|
11 |
+
95,59,request
|
12 |
+
19,62,request
|
13 |
+
97,64,request
|
14 |
+
11,72,question
|
15 |
+
0,91,informing statement
|
16 |
+
63,95,"challenge, informing statement"
|
17 |
+
37,100,"acceptance, challenge"
|
18 |
+
74,106,informing statement
|
19 |
+
22,108,request
|
20 |
+
53,109,request
|
21 |
+
111,115,"challenge, informing statement"
|
22 |
+
2,124,request
|
23 |
+
55,129,informing statement
|
24 |
+
64,130,appreciation
|
25 |
+
92,139,challenge
|
26 |
+
17,144,informing statement
|
27 |
+
4,152,"challenge, informing statement"
|
28 |
+
70,159,"challenge, question"
|
29 |
+
7,160,"challenge, informing statement"
|
30 |
+
72,174,"acceptance, informing statement"
|
31 |
+
5,178,"challenge, informing statement"
|
32 |
+
82,186,informing statement
|
33 |
+
99,189,"rejection, informing statement"
|
34 |
+
114,193,"challenge, question"
|
35 |
+
77,194,informing statement
|
36 |
+
67,199,"challenge, informing statement"
|
37 |
+
15,209,appreciation
|
38 |
+
6,213,question
|
39 |
+
100,214,"rejection, challenge"
|
40 |
+
103,216,appreciation
|
41 |
+
80,226,"challenge, informing statement"
|
42 |
+
102,230,rejection
|
43 |
+
28,246,challenge
|
44 |
+
61,255,"challenge, informing statement"
|
45 |
+
115,257,challenge
|
46 |
+
101,258,challenge
|
47 |
+
113,259,informing statement
|
48 |
+
75,263,informing statement
|
49 |
+
58,268,appreciation
|
50 |
+
10,270,informing statement
|
51 |
+
29,287,"informing statement, challenge"
|
52 |
+
68,288,challenge
|
53 |
+
3,289,"challenge, informing statement"
|
54 |
+
34,305,acceptance
|
55 |
+
106,313,"question, challenge"
|
56 |
+
94,326,informing statement
|
57 |
+
98,327,"question, challenge"
|
58 |
+
42,329,appreciation
|
59 |
+
18,331,informing statement
|
60 |
+
85,333,appreciation
|
61 |
+
108,336,challenge
|
62 |
+
49,342,"challenge, informing statement"
|
63 |
+
76,343,informing statement
|
64 |
+
56,348,informing statement
|
65 |
+
38,358,appreciation
|
66 |
+
62,360,"challenge, informing statement"
|
67 |
+
32,379,appreciation
|
68 |
+
54,404,"rejection, informing statement"
|
69 |
+
90,415,"appreciation, informing statement"
|
70 |
+
20,436,"challenge, informing statement"
|
71 |
+
45,444,"acceptance, informing statement"
|
72 |
+
51,448,"challenge, informing statement"
|
73 |
+
13,449,"rejection, challenge"
|
74 |
+
30,462,"rejection, informing statement"
|
75 |
+
25,466,challenge
|
76 |
+
23,479,informing statement
|
77 |
+
83,482,informing statement
|
78 |
+
88,494,"request, informing statement"
|
79 |
+
107,498,acceptance
|
80 |
+
14,501,informing statement
|
81 |
+
66,512,question
|
82 |
+
105,523,"challenge, informing statement"
|
83 |
+
86,530,"request, informing statement"
|
84 |
+
52,548,challenge
|
85 |
+
89,549,informing statement
|
86 |
+
44,550,request
|
87 |
+
21,554,"challenge, question"
|
88 |
+
33,560,challenge
|
89 |
+
57,565,informing statement
|
90 |
+
73,567,apology
|
91 |
+
65,572,challenge
|
92 |
+
78,587,challenge
|
93 |
+
26,589,"challenge, question"
|
94 |
+
87,592,"request, informing statement"
|
95 |
+
112,600,informing statement
|
96 |
+
12,611,question
|
97 |
+
79,612,"challenge, request"
|
98 |
+
96,614,"challenge, question"
|
99 |
+
35,616,"challenge, informing statement"
|
100 |
+
93,617,informing statement
|
101 |
+
41,619,question
|
102 |
+
109,623,"request, challenge"
|
103 |
+
60,627,"challenge, informing statement"
|
104 |
+
110,632,informing statement
|
105 |
+
69,633,informing statement
|
106 |
+
71,639,acceptance
|
107 |
+
24,641,"rejection, informing statement"
|
108 |
+
27,643,"challenge, request"
|
109 |
+
39,645,"challenge, informing statement"
|
110 |
+
9,651,"appreciation, informing statement"
|
111 |
+
84,652,question
|
112 |
+
81,653,"acceptance, informing statement"
|
113 |
+
47,655,challenge
|
114 |
+
50,671,informing statement
|
115 |
+
59,673,informing statement
|
116 |
+
16,677,challenge
|
117 |
+
43,682,"challenge, informing statement"
|
test_instances_normalized_attn_scores.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0cf18366eb389698056f39a6158bd36abae1b39c77767057ccaca0a3c413cd26
|
3 |
+
size 1048
|
test_results.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 200.0,
|
3 |
+
"test_accuracy": 78.44827586206897,
|
4 |
+
"test_average_metrics": 79.95936897716001,
|
5 |
+
"test_classification_report": "{\"0\": {\"precision\": 0.8333333333333334, \"recall\": 0.6756756756756757, \"f1-score\": 0.746268656716418, \"support\": 37.0}, \"1\": {\"precision\": 0.65, \"recall\": 0.8387096774193549, \"f1-score\": 0.7323943661971831, \"support\": 31.0}, \"2\": {\"precision\": 0.9090909090909091, \"recall\": 0.8333333333333334, \"f1-score\": 0.8695652173913043, \"support\": 12.0}, \"3\": {\"precision\": 1.0, \"recall\": 0.875, \"f1-score\": 0.9333333333333333, \"support\": 8.0}, \"4\": {\"precision\": 0.7, \"recall\": 0.7777777777777778, \"f1-score\": 0.7368421052631577, \"support\": 9.0}, \"5\": {\"precision\": 1.0, \"recall\": 0.8888888888888888, \"f1-score\": 0.9411764705882353, \"support\": 9.0}, \"6\": {\"precision\": 0.7777777777777778, \"recall\": 0.7777777777777778, \"f1-score\": 0.7777777777777778, \"support\": 9.0}, \"7\": {\"precision\": 1.0, \"recall\": 1.0, \"f1-score\": 1.0, \"support\": 1.0}, \"micro avg\": {\"precision\": 0.7844827586206896, \"recall\": 0.7844827586206896, \"f1-score\": 0.7844827586206896, \"support\": 116.0}, \"macro avg\": {\"precision\": 0.8587752525252526, \"recall\": 0.833395391359101, \"f1-score\": 0.8421697409084262, \"support\": 116.0}, \"weighted avg\": {\"precision\": 0.8033829676071055, \"recall\": 0.7844827586206896, \"f1-score\": 0.7872395009365947, \"support\": 116.0}, \"samples avg\": {\"precision\": 0.7844827586206896, \"recall\": 0.7844827586206896, \"f1-score\": 0.7844827586206896, \"support\": 116.0}}",
|
6 |
+
"test_f1_macro": 84.21697409084263,
|
7 |
+
"test_f1_micro": 78.44827586206897,
|
8 |
+
"test_f1_weighted": 78.72395009365947,
|
9 |
+
"test_loss": 0.6754371523857117,
|
10 |
+
"test_runtime": 1.6141,
|
11 |
+
"test_samples_per_second": 71.868
|
12 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "t5-base"}
|
train_results.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 200.0,
|
3 |
+
"init_mem_cpu_alloc_delta": -89677824,
|
4 |
+
"init_mem_cpu_peaked_delta": 89681920,
|
5 |
+
"init_mem_gpu_alloc_delta": 891528192,
|
6 |
+
"init_mem_gpu_peaked_delta": 0,
|
7 |
+
"train_mem_cpu_alloc_delta": 29777920,
|
8 |
+
"train_mem_cpu_peaked_delta": 310181888,
|
9 |
+
"train_mem_gpu_alloc_delta": 40215552,
|
10 |
+
"train_mem_gpu_peaked_delta": 4718911488,
|
11 |
+
"train_runtime": 2875.9944,
|
12 |
+
"train_samples": 488,
|
13 |
+
"train_samples_per_second": 1.113
|
14 |
+
}
|
trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c3e59d1ecb3b221e8e78f7b42ab83c6eadd979d587576a0b7832cce8a9040efe
|
3 |
+
size 3384
|