jimregan commited on
Commit
90cbeb0
β€’
1 Parent(s): d1f129e

add script

Browse files
Files changed (1) hide show
  1. lasid.py +326 -0
lasid.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ # Copyright 2021 Phonetics and Speech Laboratory, Trinity College, Dublin
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # Lint as: python3
17
+
18
+ import sys
19
+ from pathlib import Path
20
+
21
+ import datasets
22
+
23
+ try:
24
+ import icu
25
+ except ImportError:
26
+ sys.exit("ICU not found (hint: pip install pyicu)")
27
+
28
+ _DESCRIPTION = """\
29
+ Linguistic Atlas and Survey of Irish Dialects, volume 1
30
+ """
31
+
32
+ _CITATION = """\
33
+ @book{wagner1958linguistic,
34
+ title={Linguistic Atlas and Survey of Irish Dialects: Introduction, 300 maps.},
35
+ author={Wagner, H.},
36
+ number={v. 1},
37
+ year={1958},
38
+ publisher={Dublin Institute for Advanced Studies}
39
+ }
40
+
41
+ @phdthesis{mckendry1982computer,
42
+ title={Computer-aided contributions to the study of Irish dialects},
43
+ author={McKendry, Eugene},
44
+ year={1982},
45
+ school={Queen's University Belfast}
46
+ }
47
+
48
+ @article{mckendry1998linguistic,
49
+ title={The Linguistic Atlas and Survey of Irish Dialects (LASID) and the Computer},
50
+ author={McKendry, Eugene},
51
+ journal={Studia Celtica Upsaliensia},
52
+ volume={2},
53
+ pages={345--354},
54
+ year={1998}
55
+ }
56
+ """
57
+
58
+ _DATA_URL = "https://www3.smo.uhi.ac.uk/oduibhin/oideasra/lasid/lasid.zip"
59
+
60
+ LASID_ICU = """
61
+ \x07 β†’ ᡏ ;
62
+ \\\t β†’ ᡉ ; # \x09
63
+ \x0e β†’ α΄΅ ;
64
+ \x11 β†’ Κ° ;
65
+ \x12 β†’ ⁱ ;
66
+ \x13 β†’ α΅’ ;
67
+ \x14 β†’ α΅’Μ€ ;
68
+ \x15 β†’ Κ³ ;
69
+ \x16 β†’ Λ’ ;
70
+ \x17 β†’ αΆ΄ ;
71
+ \x18 β†’ α΅— ;
72
+ \x19 β†’ ᡘ ;
73
+ \x1a β†’ ᡘ̯ ;
74
+ \x1c β†’ α΅› ;
75
+ \x1d β†’ Κ· ;
76
+ \x1e β†’ αΆΎ ;
77
+ \x1f β†’ ᡊ ;
78
+ \# β†’ αΆ  ; # \x23
79
+ \$ β†’ α΅  ; # \x24
80
+ \% β†’ ᡍ ; # \x25
81
+ \& β†’ ᡞ ; # \x26 Λ  for IPA
82
+ \' β†’ ’ ; # \x27
83
+ \: β†’ ː ; # \x3a
84
+ \< β†’ ⁱ̈ ; # \x3c
85
+ \= β†’ ⁱ̯ ; # \x3d
86
+ \? β†’ Κ” ; # \x3f
87
+ \@ β†’ Κ² ; # \x40
88
+ E β†’ ᴇ ; # \x45
89
+ I β†’ Ιͺ ; # \x49
90
+ L β†’ ʟ ;
91
+ N β†’ Ι΄ ;
92
+ R β†’ Κ€ ;
93
+ \^ β†’ ᡐ ; # \x5e
94
+ \_ β†’ Η° ; # crane, 021 # \x5f
95
+ \` β†’ Ι›Μ€Μƒ ; # limekiln, 078: \x60
96
+ \| β†’ ⁿ ; # lamb, 055: \x7c
97
+ \~ β†’ α΅‘ ; # dreaming, 078; maybe ⁿ̠ ? # \x7e
98
+ \x7f β†’ ᴇ̃ ;
99
+ \x80 β†’ Ο† ; # ΙΈ
100
+ \x81 β†’ ΓΌ ;
101
+ \x83 β†’ Ι› \u0300 ;
102
+ \x84 β†’ eΜ€ \u0323 ; # FIXME
103
+ \\\x85 β†’ eΜ€Μƒ ; # this is οΏ½, so it needs to be escaped
104
+ \x86 β†’ uΜœΜƒ ; # lamb, 038
105
+ \x87 β†’ u̜ ; # finger-nails, 043
106
+ \x88 β†’ ʈ ; # looks like t̜ : toothache, 033
107
+ \x89 β†’ ᡃ ; # eggs, 066
108
+ \x8a β†’ Γ¨ ;
109
+ \x8b β†’ Γ― ;
110
+ \x8c β†’ Ι”ΜœΜƒ ; # grandmother, 007
111
+ \x8d β†’ Ι”Μœ ;
112
+ \x8e β†’ Ι”Μ† ; # before i go, 078
113
+ \x8f β†’ oΜœΜƒ ; # as cute, 062
114
+ \x91 β†’ Γ¦ ;
115
+ \x92 β†’ o̜ ;
116
+ \x93 β†’ Ι– ;
117
+ \x94 β†’ ΓΆ ;
118
+ \x95 β†’ Ι‘ΜœΜƒ ;
119
+ \x96 β†’ Γ» ; # milking, 067
120
+ \x97 β†’ Ι‘ \u0323 ; # FIXME (maybe Ξ±Μ© or Ι‘Μœ ?)
121
+ \x98 β†’ vΜ  ;
122
+ \x99 β†’ tΜ  ; # toothache, 021
123
+ \x9a β†’ rΜ  ;
124
+ \x9b β†’ ΓΈ ;
125
+ \x9c β†’ Ι΄Μ  ; # sick, 034
126
+ \x9d β†’ Ε‹Μ  ; # grazing, 002
127
+ \x9e β†’ nΜ  ;
128
+ \x9f β†’ lΜ  ; # plumage, 068
129
+ \xa4 β†’ kΜ  ; # plumage, 068
130
+ \xa5 β†’ gΜ  ;
131
+ \xa6 β†’ dΜ  ; # wedge, 021
132
+ \xa7 → ŭ ;
133
+ \xa8 β†’ oΜˆΜ† ;
134
+ \xa9 → ŏ ;
135
+ \xaa → ĭ ;
136
+ \xab β†’ Ι›Μ† ;
137
+ \xac → ĕ ;
138
+ \xad β†’ oΜ€ ;
139
+ \xae β†’ Ξ» ;
140
+ \xaf β†’ Ι‘ ; # Ξ± in the software
141
+ \xb0 β†’ Ι” ;
142
+ \xb1 β†’ Ι‘Μ† \u0323 ; # FIXME
143
+ \xb2 β†’ Ι™ ;
144
+ \xb4 β†’ ᡈ ; # tail, 007
145
+ \xb6 β†’ Ι‘Μ† ; # Ξ±Μ† in the software
146
+ \xb7 → ă ;
147
+ \xb8 β†’ Ξ» \u0323 ; # FIXME
148
+ \xb9 β†’ Ι› ;
149
+ \xba β†’ Κƒ \u030c ; # calling, 067
150
+ \xbb β†’ Ε‘ ;
151
+ \xbc β†’ Ε™ ;
152
+ \xbd β†’ Ι‘Μƒ ;
153
+ \xbe → ẽ ; # tied, 88N
154
+ \xc1 β†’ β€² ; # superscript prime
155
+ \xc5 β†’ ᴍ̠ ; # fart, 071
156
+ \xc6 → ã ; # calf, 046
157
+ \xc7 β†’ t \u0323 ; # probably t̞
158
+ \xc8 β†’ Ξ»Μ― ; # mane, 067
159
+ \xc9 β†’ oΜ― ; # hare, 088
160
+ \xca β†’ β±’ ; # loaf, 001
161
+ \xcb β†’ Ι« ; # loaf, 003
162
+ \xcc β†’ mΜ₯ ; # awake, 001
163
+ \xcd β†’ Κ€Μ₯ ; # thieving, 003
164
+ \xce β†’ ˈ ;
165
+ \xcf β†’ ˌ ; # cattle, 040
166
+ \xd0 β†’ Γ° ; # boar, 88N
167
+ \xd1 β†’ s \u0323 ; # FIXME # slime 008
168
+ \xd2 β†’ r \u0323 ; # FIXME # bulls 067
169
+ \xd3 β†’ ΙͺΜ† ; # suit of clothes 039
170
+ \xd4 β†’ ᴇ̀ ;
171
+ \xd5 β†’ p \u0323 ; # FIXME # castrating 053
172
+ \xd7 β†’ ΙͺΜƒ ; # slime, 007
173
+ \xd8 β†’ Ιͺ̈ ; # calf 027
174
+ \xdb β†’ o \u0323 ; # FIXME # cow 028
175
+ \xdc β†’ Ε‹ \u0323 ; # FIXME # tied 078
176
+ \xdd β†’ ö̀ ;
177
+ \xde β†’ k \u0323 ; # FIXME
178
+ \xdf β†’ i \u0323 ; # FIXME # sick 069
179
+ \xe1 β†’ g \u0323 ; # FIXME
180
+ \xe2 β†’ e \u0323 ; # FIXME
181
+ \xe3 β†’ d \u0323 ; # FIXME # agut 052
182
+ \xe4 → õ ; # I shall tie 062
183
+ \xe5 β†’ b \u0323 ; # FIXME # castrating 071
184
+ \xe6 β†’ Ι‘Μƒ \u0323 ; #FIXME # barking 049
185
+ \xe7 β†’ Ι‘ \u0323 ; # FIXME # slime 008
186
+ \xe8 → ỹ ;
187
+ \xea β†’ Ξ»Μƒ ;
188
+ \xeb β†’ uΜˆΜƒ ; # churn-dash, 011
189
+ \xec → ũ ;
190
+ \xed β†’ Ι”Μƒ ; # cow 074
191
+ \xee β†’ oΜ€Μƒ ; # barking 055
192
+ \xef β†’ β€² ;
193
+ \xf0 β†’ β€³ ;
194
+ \xf1 β†’ oΜˆΜ€Μƒ ; # dreaming, 078
195
+ \xf2 β†’ oΜˆΜƒ ; # sheep shears 074
196
+ \xf3 β†’ iΜˆΜƒ ; # churn-dash, 034
197
+ \xf4 → ĩ ; # sick 001
198
+ \xf5 β†’ Ι£Μƒ ; # tied 075
199
+ \xf6 β†’ Ι›Μƒ ; # tied 067
200
+ \xf7 β†’ nΜ₯ ; # awake, 059
201
+ \xf8 β†’ rΜ₯ ; # slime 002
202
+ \xf9 β†’ Κƒ ;
203
+ \xfb β†’ Β· ; # slime 058
204
+ \xfa β†’ Ι£ ;
205
+ \xfc β†’ Ο‡ ; # limekiln, 080
206
+ \xfd β†’ Κ’ ; # sheep shears 054
207
+ \xfe β†’ Ε‹ ;
208
+ """
209
+
210
+ LASID_TITLES_ICU = """
211
+ \xb5 β†’ Á ;
212
+ \xd6 β†’ Í ;
213
+ \x90 β†’ Γ‰ ;
214
+ \xe0 β†’ Γ“ ;
215
+ \xe9 β†’ Ú ;
216
+ """
217
+
218
+ def transliterator_from_rules(name, rules):
219
+ fromrules = icu.Transliterator.createFromRules(name, rules)
220
+ icu.Transliterator.registerInstance(fromrules)
221
+ return icu.Transliterator.createInstance(name)
222
+
223
+ LASID = transliterator_from_rules('lasid_icu', LASID_ICU)
224
+ TITLES = transliterator_from_rules('lasid_titles', LASID_TITLES_ICU)
225
+
226
+ def translit_phon(text):
227
+ # could have been any 8-bit encoding
228
+ return LASID.transliterate(text.decode('ISO-8859-1').rstrip())
229
+
230
+ def translit_irish(text):
231
+ return TITLES.transliterate(text.decode('ISO-8859-1').rstrip())
232
+
233
+
234
+ class LasidDataset(datasets.GeneratorBasedBuilder):
235
+ """Scraper dataset for LASID."""
236
+
237
+ VERSION = datasets.Version("1.1.0")
238
+
239
+ BUILDER_CONFIGS = [
240
+ datasets.BuilderConfig(name="lasid"),
241
+ ]
242
+
243
+ def _info(self):
244
+ features = datasets.Features(
245
+ {
246
+ "english": datasets.Value("string"),
247
+ "irish": datasets.Value("string"),
248
+ "map_id": datasets.Value("string"),
249
+ "place_ids": datasets.Sequence(datasets.Value("string")),
250
+ "transcripts": datasets.Sequence(datasets.Value("string")),
251
+ }
252
+ )
253
+
254
+ return datasets.DatasetInfo(
255
+ description=_DESCRIPTION,
256
+ features=features,
257
+ supervised_keys=None,
258
+ citation=_CITATION
259
+ )
260
+
261
+ def _split_generators(self, dl_manager):
262
+ """Returns SplitGenerators."""
263
+ dl_path = dl_manager.download_and_extract(_DATA_URL)
264
+ infile = f"{dl_path}/mapdata.dat"
265
+
266
+ return [
267
+ datasets.SplitGenerator(
268
+ name=datasets.Split.TRAIN,
269
+ gen_kwargs={
270
+ "split": "train",
271
+ "data_file": infile
272
+ },
273
+ ),
274
+ ]
275
+
276
+ def _generate_examples(
277
+ self, split, data_file
278
+ ):
279
+ """ Yields examples as (key, example) tuples. """
280
+ data = process_lasid(data_file)
281
+ _id = 1
282
+ for map in data.keys():
283
+ item = data[map]
284
+ place_ids = list(item["data"])
285
+ transcripts = [item["data"][a] for a in place_ids]
286
+
287
+ yield _id, {
288
+ "english": item.get("en", ""),
289
+ "irish": item.get("ga", ""),
290
+ "map_id": item.get("id", ""),
291
+ "place_ids": place_ids,
292
+ "transcripts": transcripts
293
+ }
294
+ _id += 1
295
+
296
+
297
+ def process_lasid(filename):
298
+ data = {}
299
+ cur = {}
300
+ en = ''
301
+ ga = ''
302
+ id = ''
303
+ with open(filename, "rb") as file:
304
+ for line in file.readlines():
305
+ if b'{M' in line:
306
+ if en and id:
307
+ tmp = {}
308
+ tmp['en'] = en
309
+ tmp['id'] = id
310
+ tmp['ga'] = ga
311
+ tmp['data'] = cur
312
+ data[id] = tmp
313
+ text = line.decode('ISO-8859-1').rstrip()
314
+ id = text[3:7].strip()
315
+ en = text[7:-1].strip()
316
+ cur = {}
317
+ elif b'{F' in line:
318
+ raw = translit_irish(line)
319
+ ga = raw[3:-1].strip()
320
+ elif line.decode('ISO-8859-1')[0:1].isnumeric():
321
+ pid = line.decode('ISO-8859-1')[0:3]
322
+ ptext = translit_phon(line[3:-1])
323
+ if ptext[-1] == '*':
324
+ ptext = ptext[0:-1]
325
+ cur[pid] = ptext.strip()
326
+ return data