abhik1505040 commited on
Commit
c0bee52
1 Parent(s): e951e42

Initial commit

Browse files
README.md ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - summarization
4
+ - mT5
5
+ language:
6
+ - am
7
+ - ar
8
+ - az
9
+ - bn
10
+ - my
11
+ - zh
12
+ - en
13
+ - fr
14
+ - gu
15
+ - ha
16
+ - hi
17
+ - ig
18
+ - id
19
+ - ja
20
+ - rn
21
+ - ko
22
+ - ky
23
+ - mr
24
+ - ne
25
+ - om
26
+ - ps
27
+ - fa
28
+ - pcm
29
+ - pt
30
+ - pa
31
+ - ru
32
+ - gd
33
+ - sr
34
+ - si
35
+ - so
36
+ - es
37
+ - sw
38
+ - ta
39
+ - te
40
+ - th
41
+ - ti
42
+ - tr
43
+ - uk
44
+ - ur
45
+ - uz
46
+ - vi
47
+ - cy
48
+ - yo
49
+ licenses:
50
+ - cc-by-nc-sa-4.0
51
+ widget:
52
+ - text: "Videos that say approved vaccines are dangerous and cause autism, cancer or infertility are among those that will be taken down, the company said. The policy includes the termination of accounts of anti-vaccine influencers. Tech giants have been criticised for not doing more to counter false health information on their sites. In July, US President Joe Biden said social media platforms were largely responsible for people's scepticism in getting vaccinated by spreading misinformation, and appealed for them to address the issue. YouTube, which is owned by Google, said 130,000 videos were removed from its platform since last year, when it implemented a ban on content spreading misinformation about Covid vaccines. In a blog post, the company said it had seen false claims about Covid jabs \"spill over into misinformation about vaccines in general\". The new policy covers long-approved vaccines, such as those against measles or hepatitis B. \"We're expanding our medical misinformation policies on YouTube with new guidelines on currently administered vaccines that are approved and confirmed to be safe and effective by local health authorities and the WHO,\" the post said, referring to the World Health Organization."
53
+
54
+ ---
55
+
56
+ # mT5-m2o-english-CrossSum
57
+
58
+ This repository contains the mT5 checkpoint finetuned on all cross-lingual pairs of the [CrossSum](https://huggingface.co/datasets/csebuetnlp/xlsum) dataset, where the target summary was in `english`, i.e. this model tries to summarize text written in any language in English. For finetuning details and scripts, see the [paper]() and the [official repository](https://github.com/csebuetnlp/CrossSum).
59
+
60
+
61
+ ## Using this model in `transformers` (tested on 4.11.0.dev0)
62
+
63
+ ```python
64
+ import re
65
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
66
+
67
+ WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip()))
68
+
69
+ article_text = """Videos that say approved vaccines are dangerous and cause autism, cancer or infertility are among those that will be taken down, the company said. The policy includes the termination of accounts of anti-vaccine influencers. Tech giants have been criticised for not doing more to counter false health information on their sites. In July, US President Joe Biden said social media platforms were largely responsible for people's scepticism in getting vaccinated by spreading misinformation, and appealed for them to address the issue. YouTube, which is owned by Google, said 130,000 videos were removed from its platform since last year, when it implemented a ban on content spreading misinformation about Covid vaccines. In a blog post, the company said it had seen false claims about Covid jabs "spill over into misinformation about vaccines in general". The new policy covers long-approved vaccines, such as those against measles or hepatitis B. "We're expanding our medical misinformation policies on YouTube with new guidelines on currently administered vaccines that are approved and confirmed to be safe and effective by local health authorities and the WHO," the post said, referring to the World Health Organization."""
70
+
71
+ model_name = "mT5_m2o_english_crossSum"
72
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
73
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
74
+
75
+ input_ids = tokenizer(
76
+ [WHITESPACE_HANDLER(article_text)],
77
+ return_tensors="pt",
78
+ padding="max_length",
79
+ truncation=True,
80
+ max_length=512
81
+ )["input_ids"]
82
+
83
+ output_ids = model.generate(
84
+ input_ids=input_ids,
85
+ max_length=84,
86
+ no_repeat_ngram_size=2,
87
+ num_beams=4
88
+ )[0]
89
+
90
+ summary = tokenizer.decode(
91
+ output_ids,
92
+ skip_special_tokens=True,
93
+ clean_up_tokenization_spaces=False
94
+ )
95
+
96
+ print(summary)
97
+ ```
98
+
99
+
100
+
101
+
102
+ ## Citation
103
+
104
+ If you use this model, please cite the following paper:
105
+ ```
106
+ @inproceedings{hasan-etal-2021-xl,
107
+ title = "{XL}-Sum: Large-Scale Multilingual Abstractive Summarization for 44 Languages",
108
+ author = "Hasan, Tahmid and
109
+ Bhattacharjee, Abhik and
110
+ Islam, Md. Saiful and
111
+ Mubasshir, Kazi and
112
+ Li, Yuan-Fang and
113
+ Kang, Yong-Bin and
114
+ Rahman, M. Sohel and
115
+ Shahriyar, Rifat",
116
+ booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
117
+ month = aug,
118
+ year = "2021",
119
+ address = "Online",
120
+ publisher = "Association for Computational Linguistics",
121
+ url = "https://aclanthology.org/2021.findings-acl.413",
122
+ pages = "4693--4703",
123
+ }
124
+ ```
config.json ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/mt5-base",
3
+ "architectures": [
4
+ "MT5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 250030,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "gated-gelu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "length_penalty": 0.6,
17
+ "max_length": 84,
18
+ "model_type": "mt5",
19
+ "num_beams": 4,
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 12,
22
+ "num_layers": 12,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_num_buckets": 32,
26
+ "task_specific_params": {
27
+ "langid_map": {
28
+ "amharic": [
29
+ 35,
30
+ "\u2581<extra_id_64>"
31
+ ],
32
+ "arabic": [
33
+ 4,
34
+ "\u2581<extra_id_95>"
35
+ ],
36
+ "azerbaijani": [
37
+ 7,
38
+ "\u2581<extra_id_92>"
39
+ ],
40
+ "bengali": [
41
+ 42,
42
+ "\u2581<extra_id_57>"
43
+ ],
44
+ "burmese": [
45
+ 33,
46
+ "\u2581<extra_id_66>"
47
+ ],
48
+ "chinese_simplified": [
49
+ 40,
50
+ "\u2581<extra_id_59>"
51
+ ],
52
+ "chinese_traditional": [
53
+ 44,
54
+ "\u2581<extra_id_55>"
55
+ ],
56
+ "english": [
57
+ 30,
58
+ "\u2581<extra_id_69>"
59
+ ],
60
+ "french": [
61
+ 10,
62
+ "\u2581<extra_id_89>"
63
+ ],
64
+ "gujarati": [
65
+ 27,
66
+ "\u2581<extra_id_72>"
67
+ ],
68
+ "hausa": [
69
+ 43,
70
+ "\u2581<extra_id_56>"
71
+ ],
72
+ "hindi": [
73
+ 21,
74
+ "\u2581<extra_id_78>"
75
+ ],
76
+ "igbo": [
77
+ 9,
78
+ "\u2581<extra_id_90>"
79
+ ],
80
+ "indonesian": [
81
+ 1,
82
+ "\u2581<extra_id_98>"
83
+ ],
84
+ "japanese": [
85
+ 37,
86
+ "\u2581<extra_id_62>"
87
+ ],
88
+ "kirundi": [
89
+ 0,
90
+ "\u2581<extra_id_99>"
91
+ ],
92
+ "korean": [
93
+ 29,
94
+ "\u2581<extra_id_70>"
95
+ ],
96
+ "kyrgyz": [
97
+ 5,
98
+ "\u2581<extra_id_94>"
99
+ ],
100
+ "marathi": [
101
+ 13,
102
+ "\u2581<extra_id_86>"
103
+ ],
104
+ "nepali": [
105
+ 20,
106
+ "\u2581<extra_id_79>"
107
+ ],
108
+ "oromo": [
109
+ 41,
110
+ "\u2581<extra_id_58>"
111
+ ],
112
+ "pashto": [
113
+ 34,
114
+ "\u2581<extra_id_65>"
115
+ ],
116
+ "persian": [
117
+ 23,
118
+ "\u2581<extra_id_76>"
119
+ ],
120
+ "pidgin": [
121
+ 14,
122
+ "\u2581<extra_id_85>"
123
+ ],
124
+ "portuguese": [
125
+ 39,
126
+ "\u2581<extra_id_60>"
127
+ ],
128
+ "punjabi": [
129
+ 17,
130
+ "\u2581<extra_id_82>"
131
+ ],
132
+ "russian": [
133
+ 36,
134
+ "\u2581<extra_id_63>"
135
+ ],
136
+ "scottish_gaelic": [
137
+ 24,
138
+ "\u2581<extra_id_75>"
139
+ ],
140
+ "serbian_cyrillic": [
141
+ 28,
142
+ "\u2581<extra_id_71>"
143
+ ],
144
+ "serbian_latin": [
145
+ 11,
146
+ "\u2581<extra_id_88>"
147
+ ],
148
+ "sinhala": [
149
+ 31,
150
+ "\u2581<extra_id_68>"
151
+ ],
152
+ "somali": [
153
+ 19,
154
+ "\u2581<extra_id_80>"
155
+ ],
156
+ "spanish": [
157
+ 3,
158
+ "\u2581<extra_id_96>"
159
+ ],
160
+ "swahili": [
161
+ 18,
162
+ "\u2581<extra_id_81>"
163
+ ],
164
+ "tamil": [
165
+ 32,
166
+ "\u2581<extra_id_67>"
167
+ ],
168
+ "telugu": [
169
+ 22,
170
+ "\u2581<extra_id_77>"
171
+ ],
172
+ "thai": [
173
+ 6,
174
+ "\u2581<extra_id_93>"
175
+ ],
176
+ "tigrinya": [
177
+ 16,
178
+ "\u2581<extra_id_83>"
179
+ ],
180
+ "turkish": [
181
+ 15,
182
+ "\u2581<extra_id_84>"
183
+ ],
184
+ "ukrainian": [
185
+ 2,
186
+ "\u2581<extra_id_97>"
187
+ ],
188
+ "urdu": [
189
+ 38,
190
+ "\u2581<extra_id_61>"
191
+ ],
192
+ "uzbek": [
193
+ 8,
194
+ "\u2581<extra_id_91>"
195
+ ],
196
+ "vietnamese": [
197
+ 12,
198
+ "\u2581<extra_id_87>"
199
+ ],
200
+ "welsh": [
201
+ 26,
202
+ "\u2581<extra_id_73>"
203
+ ],
204
+ "yoruba": [
205
+ 25,
206
+ "\u2581<extra_id_74>"
207
+ ]
208
+ }
209
+ },
210
+ "tie_word_embeddings": false,
211
+ "tokenizer_class": "T5Tokenizer",
212
+ "transformers_version": "4.10.0.dev0",
213
+ "use_cache": true,
214
+ "vocab_size": 250112
215
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02fbed1f8ddee483a9456cd361264cb630774737364cd601fa2ba2338052598d
3
+ size 2329707353
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 0, "additional_special_tokens": null, "special_tokens_map_file": "/home/patrick/.cache/torch/transformers/685ac0ca8568ec593a48b61b0a3c272beee9bc194a3c7241d15dcadb5f875e53.f76030f3ec1b96a8199b2593390c610e76ca8028ef3d24680000619ffb646276", "tokenizer_file": null, "name_or_path": "google/mt5-base"}