Datasets:

License:
5roop commited on
Commit
036116b
1 Parent(s): c7edc49

Add loading script

Browse files
Files changed (1) hide show
  1. bertic_data.py +123 -0
bertic_data.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import gzip
3
+ import json
4
+ _URL = "http://nl.ijs.si/nikola/dedup_hbs/"
5
+ _URLS = [
6
+ # "macocu.hbs.translit.dedup.lines.gz",
7
+ # "hr_news.translit.dedup.lines.gz",
8
+ # "srwac.translit.dedup.lines.gz",
9
+ "riznica.translit.dedup.lines.gz",
10
+ # "mC4.sr.translit.dedup.lines.gz",
11
+ # "hrwac.translit.dedup.lines.gz",
12
+ # "cnrwac.translit.dedup.lines.gz",
13
+ # "classla-sr.translit.dedup.lines.gz",
14
+ # "classla-hr.translit.dedup.lines.gz",
15
+ # "classla-bs.translit.dedup.lines.gz",
16
+ # "cc100-sr.translit.dedup.lines.gz",
17
+ # "cc100-hr.translit.dedup.lines.gz",
18
+ # "bswac.translit.dedup.lines.gz",
19
+ ]
20
+ _URLS = [_URL + i for i in _URLS]
21
+
22
+
23
+ _DESCRIPTION = """\
24
+ Data used to train BERTić model and its successors.
25
+ """
26
+ _CITATION = """
27
+ @inproceedings{ljubesic-lauc-2021-bertic,
28
+ title = "{BERT}i{\'c} - The Transformer Language Model for {B}osnian, {C}roatian, {M}ontenegrin and {S}erbian",
29
+ author = "Ljube{\v{s}}i{\'c}, Nikola and
30
+ Lauc, Davor",
31
+ editor = "Babych, Bogdan and
32
+ Kanishcheva, Olga and
33
+ Nakov, Preslav and
34
+ Piskorski, Jakub and
35
+ Pivovarova, Lidia and
36
+ Starko, Vasyl and
37
+ Steinberger, Josef and
38
+ Yangarber, Roman and
39
+ Marci{\'n}czuk, Micha{\l} and
40
+ Pollak, Senja and
41
+ P{\v{r}}ib{\'a}{\v{n}}, Pavel and
42
+ Robnik-{\v{S}}ikonja, Marko",
43
+ booktitle = "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing",
44
+ month = apr,
45
+ year = "2021",
46
+ address = "Kiyv, Ukraine",
47
+ publisher = "Association for Computational Linguistics",
48
+ url = "https://aclanthology.org/2021.bsnlp-1.5",
49
+ pages = "37--42",
50
+ abstract = "In this paper we describe a transformer model pre-trained on 8 billion tokens of crawled text from the Croatian, Bosnian, Serbian and Montenegrin web domains. We evaluate the transformer model on the tasks of part-of-speech tagging, named-entity-recognition, geo-location prediction and commonsense causal reasoning, showing improvements on all tasks over state-of-the-art models. For commonsense reasoning evaluation we introduce COPA-HR - a translation of the Choice of Plausible Alternatives (COPA) dataset into Croatian. The BERTi{\'c} model is made available for free usage and further task-specific fine-tuning through HuggingFace.",
51
+ }"""
52
+
53
+
54
+
55
+ class BerticDataConfig(datasets.BuilderConfig):
56
+ """BuilderConfig for Bertic data sample."""
57
+
58
+ def __init__(self, *args, subsets, **kwargs):
59
+ """BuilderConfig for BerticData.
60
+ Args:
61
+ **kwargs: keyword arguments forwarded to super.
62
+ """
63
+ super(BerticDataConfig, self).__init__(**kwargs)
64
+ self.subsets = subsets
65
+
66
+
67
+ class BerticData(datasets.GeneratorBasedBuilder):
68
+ """Bertic dataset, used for training Bertic model."""
69
+ VERSION = datasets.Version("1.0.0")
70
+
71
+ # This is an example of a dataset with multiple configurations.
72
+ # If you don't want/need to define several sub-sets in your dataset,
73
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
74
+
75
+ # If you need to make complex sub-parts in the datasets with configurable options
76
+ # You can create your own builder configuration class to store attribute, inheriting from BerticDataConfig
77
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
78
+
79
+ # You will be able to load one or the other configurations in the following list with
80
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
81
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
82
+ BUILDER_CONFIGS = [
83
+ BerticDataConfig(
84
+ name='default',
85
+ subsets=['arxiv', 'open-web-math', 'algebraic-stack'],
86
+ version=VERSION,
87
+ description="All subsets"
88
+ )]
89
+
90
+ self._URLS = _URLS
91
+ def _info(self):
92
+ features = datasets.Features(
93
+ {
94
+ "text": datasets.Value("string"),
95
+ }
96
+ )
97
+ return datasets.DatasetInfo(
98
+ description=_DESCRIPTION,
99
+ features=features,
100
+ homepage=_HOMEPAGE,
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
105
+
106
+ urls_to_download = self._URLS
107
+ urls_to_download = {i, url for i, url in enumerate(self._URLS)}
108
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN,
113
+ gen_kwargs={"filepath": downloaded_files[i]}
114
+ ) for i in urls_to_download.keys()
115
+ ]
116
+
117
+ def _generate_examples(self, data_files):
118
+ key = 0
119
+ for name in data_files:
120
+ with gzip.open(name, "rb") as f:
121
+ for line in f.readlines():
122
+ yield key, {"text": line.decode("uft-8").strip()}
123
+ key += 1