Update README.md
#1
by
tiennv
- opened
README.md
CHANGED
@@ -17,4 +17,82 @@ configs:
|
|
17 |
---
|
18 |
# Dataset Card for "vietnamese-mlmcorpus"
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
|
|
17 |
---
|
18 |
# Dataset Card for "vietnamese-mlmcorpus"
|
19 |
|
20 |
+
## How can we setup build dataset
|
21 |
+
Prepare several functions: split puntual, tokenizer, and use HF dataset for optimize processing
|
22 |
+
```
|
23 |
+
def get_tokens(examples):
|
24 |
+
'''
|
25 |
+
Tokenizer samples into token_ids
|
26 |
+
'''
|
27 |
+
return tokenizer(examples)['input_ids']
|
28 |
+
|
29 |
+
def truncation(passage, pattern= '[.\n]'):
|
30 |
+
'''
|
31 |
+
Pattern split passage
|
32 |
+
'''
|
33 |
+
output = re.split(pattern, passage)
|
34 |
+
output = [item for item in output if len(item.split()) > 0]
|
35 |
+
|
36 |
+
return output
|
37 |
+
|
38 |
+
def split_puntual(example, threshold=512):
|
39 |
+
'''
|
40 |
+
Split a long documents into spans with ~512 tokens
|
41 |
+
'''
|
42 |
+
texts = truncation(example)
|
43 |
+
tokenized = get_tokens(texts)
|
44 |
+
|
45 |
+
tmp, group = [], []
|
46 |
+
count = 0
|
47 |
+
for tokens, text in zip(tokenized, texts):
|
48 |
+
count += len(tokens)
|
49 |
+
if count <= threshold:
|
50 |
+
tmp.append(text.strip())
|
51 |
+
else:
|
52 |
+
if len(tmp) > 0:
|
53 |
+
group.append('. '.join(tmp)) # update List[str]
|
54 |
+
count = len(tokens) # set count at current idx
|
55 |
+
tmp = [] # reset list
|
56 |
+
tmp.append(text.strip())
|
57 |
+
else:
|
58 |
+
count = 0
|
59 |
+
group.append('summary')
|
60 |
+
return group
|
61 |
+
|
62 |
+
def process(examples):
|
63 |
+
'''
|
64 |
+
Build a stack processing
|
65 |
+
'''
|
66 |
+
chunks = []
|
67 |
+
for x in examples:
|
68 |
+
chunks += split_puntual(x)
|
69 |
+
return {'text':chunks}
|
70 |
+
```
|
71 |
+
|
72 |
+
Now, we run with this code
|
73 |
+
```
|
74 |
+
import re
|
75 |
+
from datasets import load_dataset
|
76 |
+
from transformers import AutoTokenizer
|
77 |
+
|
78 |
+
tokenizer = AutoTokenizer.from_pretrained('google/mt5-small')
|
79 |
+
|
80 |
+
if __name__ == '__main__':
|
81 |
+
dataset = load_dataset("ademax/binhvq-news-corpus")
|
82 |
+
print("Total original: ", dataset)
|
83 |
+
|
84 |
+
dataset = dataset.map(
|
85 |
+
lambda example : process(example['content']),
|
86 |
+
num_proc=2, batched=True,
|
87 |
+
remove_columns=['content', 'title', 'summary', 'category']
|
88 |
+
)
|
89 |
+
|
90 |
+
# filter samples less than 30 words
|
91 |
+
dataset = dataset.filter(lambda example: len(example['text'].split(' ')) > 30)
|
92 |
+
print("Processing: ", dataset)
|
93 |
+
|
94 |
+
dataset = dataset.train_test_split(test_size=0.0002)
|
95 |
+
dataset.save_to_disk('release')
|
96 |
+
```
|
97 |
+
|
98 |
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|