File size: 3,930 Bytes
a034668 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
import os
import datasets
import pandas as pd
_CITATION = """"""
_DESCRIPTION = """\
This dataset is designed to be used in training models
that restore punctuation marks from the output of
Automatic Speech Recognition system for Polish language.
"""
_HOMEPAGE = "https://github.com/poleval/2021-punctuation-restoration"
_URL = "https://huggingface.co/datasets/lruczu/2021-punctuation-restoration/resolve/main"
_PATHS = {
"train": os.path.join(_URL, "train"),
"test-A": os.path.join(_URL, "test-A"),
}
class PunctuationDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for AfrikaansNerCorpus"""
def __init__(self, **kwargs):
"""BuilderConfig for PunctuationDataset.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PunctuationDatasetConfig, self).__init__(**kwargs)
class PunctuationDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
PunctuationDatasetConfig(
name="punctuation_dataset",
version=datasets.Version("1.0.0"),
description="PunctuationDataset dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text_in": datasets.Value("string"),
"text_out": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
'B-.',
'B-,',
'B--',
'B-!',
'B-?',
'B-:',
'B-;',
'O',
]
)
)
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": _PATHS["train"]}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": _PATHS["test-A"]}
),
]
def _generate_examples(self, filepath):
in_df = pd.read_csv(os.path.join(filepath, "in.tsv"), sep='\t', header=None)
out_df = pd.read_csv(os.path.join(filepath, 'expected.tsv'), sep='\t', header=None)
for key, ((_, row_in), (_, row_out)) in enumerate(zip(in_df.iterrows(), out_df.iterrows()), 1):
text_in = PunctuationDataset._clean_text(row_in[1])
text_out = PunctuationDataset._clean_text(row_out[0])
tokens = []
tags = []
for token_in, token_out in zip(text_in.split(), text_out.split()):
assert token_in.lower() in token_out.lower()
tokens.append(token_in)
if token_in.lower() == token_out.lower():
tags.append('O')
else:
tags.append(f'B-{token_out[-1]}')
yield key, {
"text_in": text_in,
"text_out": text_out,
"tokens": tokens,
"tags": tags
}
@staticmethod
def _clean_text(text: str, lower: bool = False) -> str:
if lower:
text = text.lower()
text = text.replace(' -', '')
text = text.replace(' .', '')
text = text.replace(' ,', '')
text = text.replace(' ', ' ')
text = text.strip()
return text
|