Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
part-of-speech
Size:
100K - 1M
ArXiv:
License:
File size: 3,364 Bytes
3e010c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
"""TODO(xtreme): Add a description here."""
import csv
import glob
import os
import datasets
_UD_POS_LANG = {
"Afrikaans": 'af',
"Arabic": 'ar',
"Basque": 'eu',
"Bulgarian": 'bg',
"Dutch": 'nl',
"English": 'en',
"Estonian": 'et',
"Finnish": 'fi',
"French": 'fr',
"German": 'de',
"Greek": 'el',
"Hebrew": 'he',
"Hindi": 'hi',
"Hungarian": 'hu',
"Indonesian": 'id',
"Italian": 'it',
"Japanese": 'ja',
"Kazakh": 'kk',
"Korean": 'ko',
"Chinese": 'zh',
"Marathi": 'mr',
"Persian": 'fa',
"Portuguese": 'pt',
"Russian": 'ru',
"Spanish": 'es',
"Tagalog": 'tl',
"Tamil": 'ta',
"Telugu": 'te',
"Thai": 'th',
"Turkish": 'tr',
"Urdu": 'ur',
"Vietnamese": 'vi',
"Yoruba": 'yo',
}
_DATA_URLS = {
"2_5": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz",
"2_7": 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3424/ud-treebanks-v2.7.tgz',
}
def generate_examples(folder: str, lang: str, split: str):
"""Yields examples."""
for path in glob.glob(f'{folder}/UD_{lang}*/*{split}.conllu'):
# For lang other than [see below], we exclude Arabic-NYUAD which does not contains any words, only _
if lang in ["Kazakh", "Tagalog", "Thai", "Yoruba"] or "NYUAD" not in path:
print('read', path)
with open(path) as file:
data = csv.reader(file, delimiter="\t", quoting=csv.QUOTE_NONE)
tokens = []
pos_tags = []
for id_row, row in enumerate(data):
if len(row) >= 10 and row[1] != "_" and row[3] != "_":
tokens.append(row[1])
pos_tags.append(row[3])
if len(row) == 0 and len(tokens) > 0:
yield {"tokens": tokens, "pos_tags": pos_tags}
tokens = []
pos_tags = []
def main():
features = datasets.Features({
"tokens": datasets.Sequence(datasets.Value("string")),
"pos_tags": datasets.Sequence(datasets.features.ClassLabel(names=[
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
])),
})
path = 'ud-treebanks-v2.7/'
if '2.7' in path: # xtreme-r
_UD_POS_LANG.update({
'Lithuanian': 'lt', 'Polish': 'pl', 'Ukrainian': 'uk', 'Wolof': 'wo', 'Romanian': 'ro',
})
for lang, code in _UD_POS_LANG.items():
os.makedirs(f'{path}/parquet/{code}/', exist_ok=True)
splits = ['test'] if code != 'en' else ['train', 'dev', 'test']
for split in splits:
ds = datasets.Dataset.from_generator(
generate_examples, features=features, keep_in_memory=True, gen_kwargs=dict(
folder=path, lang=lang, split=split
)
)
sp = f'{path}/parquet/{code}/{split}.parquet'
ds.to_parquet(sp)
print('save', sp)
if __name__ == '__main__':
main()
|