Datasets:

SuzanaB commited on
Commit
0bb99db
1 Parent(s): 61356f1
Files changed (3) hide show
  1. .gitattributes +0 -16
  2. data.zip +0 -0
  3. reldi_hr.py +157 -0
.gitattributes DELETED
@@ -1,16 +0,0 @@
1
- *.bin.* filter=lfs diff=lfs merge=lfs -text
2
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.h5 filter=lfs diff=lfs merge=lfs -text
5
- *.tflite filter=lfs diff=lfs merge=lfs -text
6
- *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
- *.ot filter=lfs diff=lfs merge=lfs -text
8
- *.onnx filter=lfs diff=lfs merge=lfs -text
9
- *.arrow filter=lfs diff=lfs merge=lfs -text
10
- *.ftz filter=lfs diff=lfs merge=lfs -text
11
- *.joblib filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.pb filter=lfs diff=lfs merge=lfs -text
15
- *.pt filter=lfs diff=lfs merge=lfs -text
16
- *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data.zip ADDED
Binary file (801 kB). View file
 
reldi_hr.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the 'License');
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an 'AS IS' BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = ''
23
+ _DESCRIPTION = """The dataset contains 6339 training samples, 815 validation samples and 785 test samples.
24
+ Each sample represents a sentence and includes the following features: sentence ID ('sent_id'),
25
+ list of tokens ('tokens'), list of lemmas ('lemmas'), list of UPOS tags ('upos_tags'),
26
+ list of Multext-East tags ('xpos_tags), list of morphological features ('feats'),
27
+ and list of IOB tags ('iob_tags'), which are encoded as class labels.
28
+ """
29
+ _HOMEPAGE = ''
30
+ _LICENSE = ''
31
+
32
+ _URL = 'https://huggingface.co/datasets/classla/reldi_hr/raw/main/data.zip'
33
+ _TRAINING_FILE = 'train_ner.conllu'
34
+ _DEV_FILE = 'dev_ner.conllu'
35
+ _TEST_FILE = 'test_ner.conllu'
36
+
37
+
38
+ class ReldiHr(datasets.GeneratorBasedBuilder):
39
+ VERSION = datasets.Version('1.0.0')
40
+
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(
43
+ name='reldi_hr',
44
+ version=VERSION,
45
+ description=''
46
+ )
47
+ ]
48
+
49
+ def _info(self):
50
+ features = datasets.Features(
51
+ {
52
+ 'sent_id': datasets.Value('string'),
53
+ 'tokens': datasets.Sequence(datasets.Value('string')),
54
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
55
+ 'upos_tags': datasets.Sequence(datasets.Value('string')),
56
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
57
+ 'feats': datasets.Sequence(datasets.Value('string')),
58
+ 'iob_tags': datasets.Sequence(
59
+ datasets.features.ClassLabel(
60
+ names=[
61
+ 'I-org',
62
+ 'B-misc',
63
+ 'B-per',
64
+ 'B-deriv-per',
65
+ 'B-org',
66
+ 'B-loc',
67
+ 'I-deriv-per',
68
+ 'I-misc',
69
+ 'I-loc',
70
+ 'I-per',
71
+ 'O'
72
+ ]
73
+ )
74
+ )
75
+ }
76
+ )
77
+
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=features,
81
+ supervised_keys=None,
82
+ homepage=_HOMEPAGE,
83
+ license=_LICENSE,
84
+ citation=_CITATION,
85
+ )
86
+
87
+ def _split_generators(self, dl_manager):
88
+ """Returns SplitGenerators."""
89
+ data_dir = dl_manager.download_and_extract(_URL)
90
+
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN, gen_kwargs={
94
+ 'filepath': os.path.join(data_dir, _TRAINING_FILE),
95
+ 'split': 'train'}
96
+ ),
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.VALIDATION, gen_kwargs={
99
+ 'filepath': os.path.join(data_dir, _DEV_FILE),
100
+ 'split': 'dev'}
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TEST, gen_kwargs={
104
+ 'filepath': os.path.join(data_dir, _TEST_FILE),
105
+ 'split': 'test'}
106
+ ),
107
+ ]
108
+
109
+ def _generate_examples(self, filepath, split):
110
+ with open(filepath, encoding='utf-8') as f:
111
+ sent_id = ''
112
+ tokens = []
113
+ lemmas = []
114
+ upos_tags = []
115
+ xpos_tags = []
116
+ feats = []
117
+ iob_tags = []
118
+ data_id = 0
119
+ for line in f:
120
+ if line and not line == '\n':
121
+ if line.startswith('# sent_id'):
122
+ if tokens:
123
+ yield data_id, {
124
+ 'sent_id': sent_id,
125
+ 'tokens': tokens,
126
+ 'lemmas': lemmas,
127
+ 'upos_tags': upos_tags,
128
+ 'xpos_tags': xpos_tags,
129
+ 'feats': feats,
130
+ 'iob_tags': iob_tags
131
+ }
132
+ tokens = []
133
+ lemmas = []
134
+ upos_tags = []
135
+ xpos_tags = []
136
+ feats = []
137
+ iob_tags = []
138
+ data_id += 1
139
+ sent_id = line.split(' = ')[1].strip()
140
+ else:
141
+ splits = line.split('\t')
142
+ tokens.append(splits[1].strip())
143
+ lemmas.append(splits[2].strip())
144
+ upos_tags.append(splits[3].strip())
145
+ xpos_tags.append(splits[4].strip())
146
+ feats.append(splits[5].strip())
147
+ iob_tags.append(splits[9].strip())
148
+
149
+ yield data_id, {
150
+ 'sent_id': sent_id,
151
+ 'tokens': tokens,
152
+ 'lemmas': lemmas,
153
+ 'upos_tags': upos_tags,
154
+ 'xpos_tags': xpos_tags,
155
+ 'feats': feats,
156
+ 'iob_tags': iob_tags
157
+ }