|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = '' |
|
_DESCRIPTION = """The dataset contains 6273 training samples, 762 validation samples and 749 test samples. |
|
Each sample represents a sentence and includes the following features: sentence ID ('sent_id'), |
|
list of tokens ('tokens'), list of normalised word forms ('norms'), list of lemmas ('lemmas'), |
|
list of Multext-East tags ('xpos_tags), list of morphological features ('feats'), |
|
and list of UPOS tags ('upos_tags'), which are encoded as class labels. |
|
""" |
|
_HOMEPAGE = '' |
|
_LICENSE = '' |
|
|
|
_URL = 'https://huggingface.co/datasets/classla/janes_tag/raw/main/data.zip' |
|
_TRAINING_FILE = 'train_all.conllup' |
|
_DEV_FILE = 'dev_all.conllup' |
|
_TEST_FILE = 'test_all.conllup' |
|
_DATA_DIR = 'data' |
|
|
|
|
|
class JanesTag(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version('1.0.0') |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name='janes_tag', |
|
version=VERSION, |
|
description='' |
|
) |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
'sent_id': datasets.Value('string'), |
|
'tokens': datasets.Sequence(datasets.Value('string')), |
|
'norms': datasets.Sequence(datasets.Value('string')), |
|
'lemmas': datasets.Sequence(datasets.Value('string')), |
|
'xpos_tags': datasets.Sequence(datasets.Value('string')), |
|
'feats': datasets.Sequence(datasets.Value('string')), |
|
'upos_tags': datasets.Sequence( |
|
datasets.features.ClassLabel( |
|
names=[ |
|
'SCONJ VERB', 'NOUN', 'NOUN NOUN', 'CCONJ SCONJ', 'ADV X', 'ADJ', 'NOUN NUM', 'ADP VERB', |
|
'CCONJ', 'SCONJ AUX', 'VERB', 'PRON PRON', 'CCONJ PART', 'ADV ADJ', 'PRON AUX', 'AUX AUX', |
|
'VERB ADP', 'DET ADJ', 'ADJ NOUN', 'PART PART', 'ADV AUX', 'NOUN ADV', 'PART CCONJ', |
|
'DET NOUN', 'CCONJ CCONJ', 'ADV', 'NUM', 'AUX NUM', 'ADV DET', 'ADV ADV', 'PRON VERB', |
|
'ADP PRON', 'DET AUX', 'VERB ADV', 'PROPN PROPN', 'NOUN PROPN', 'ADJ ADP', 'PART AUX', |
|
'PROPN NOUN', 'PROPN ADV', 'ADP NOUN', 'NUM ADV', 'NOUN ADJ', 'SCONJ', 'PART NOUN', |
|
'ADV NUM', 'VERB PRON', 'PART ADJ', 'AUX', 'ADP NUM', 'PRON', 'ADP ADJ', 'INTJ', 'ADV VERB', |
|
'NOUN SYM', 'PART', 'ADV PART', 'DET VERB', 'SCONJ PART', 'ADV SCONJ', 'NOUN CCONJ', |
|
'NUM DET', 'ADP X', 'INTJ X', 'NOUN VERB', 'PUNCT', 'ADP', 'ADV CCONJ', 'NOUN DET', |
|
'X NOUN', 'DET', 'PROPN X', 'SYM', 'PROPN NUM', 'PART VERB', 'SYM INTJ', 'ADP ADV', |
|
'X PROPN', 'X X', 'PROPN', 'ADP DET', 'X', 'AUX ADV', 'NUM NOUN', 'INTJ NOUN', 'AUX PRON', |
|
'PART ADV', 'PRON ADP', 'INTJ INTJ', 'VERB NOUN', 'NOUN AUX' |
|
] |
|
) |
|
) |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
data_dir = os.path.join(dl_manager.download_and_extract(_URL), _DATA_DIR) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={ |
|
'filepath': os.path.join(data_dir, _TRAINING_FILE), |
|
'split': 'train'} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={ |
|
'filepath': os.path.join(data_dir, _DEV_FILE), |
|
'split': 'dev'} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={ |
|
'filepath': os.path.join(data_dir, _TEST_FILE), |
|
'split': 'test'} |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
with open(filepath, encoding='utf-8') as f: |
|
sent_id = '' |
|
tokens = [] |
|
norms = [] |
|
lemmas = [] |
|
xpos_tags = [] |
|
feats = [] |
|
upos_tags = [] |
|
data_id = 0 |
|
for line in f: |
|
if line and line != '\n' and not line.startswith('# global.columns') and not line.startswith('# text'): |
|
if line.startswith('# sent_id'): |
|
if tokens: |
|
yield data_id, { |
|
'sent_id': sent_id, |
|
'tokens': tokens, |
|
'norms': norms, |
|
'lemmas': lemmas, |
|
'xpos_tags': xpos_tags, |
|
'feats': feats, |
|
'upos_tags': upos_tags |
|
} |
|
tokens = [] |
|
norms = [] |
|
lemmas = [] |
|
xpos_tags = [] |
|
feats = [] |
|
upos_tags = [] |
|
data_id += 1 |
|
sent_id = line.split(' = ')[1].strip() |
|
else: |
|
splits = line.split('\t') |
|
tokens.append(splits[1].strip()) |
|
norms.append(splits[2].strip()) |
|
lemmas.append(splits[3].strip()) |
|
upos_tags.append(splits[4].strip()) |
|
xpos_tags.append(splits[5].strip()) |
|
feats.append(splits[6].strip()) |
|
|
|
yield data_id, { |
|
'sent_id': sent_id, |
|
'tokens': tokens, |
|
'norms': norms, |
|
'lemmas': lemmas, |
|
'xpos_tags': xpos_tags, |
|
'feats': feats, |
|
'upos_tags': upos_tags |
|
} |
|
|