Datasets:

Languages:
Abui
ArXiv:
License:
holylovenia commited on
Commit
4ab63ae
1 Parent(s): d05732b

Upload abui_wordnet.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. abui_wordnet.py +146 -0
abui_wordnet.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ from pathlib import Path
5
+ from typing import Dict, List, Tuple
6
+
7
+ import datasets
8
+
9
+ from seacrowd.utils import schemas
10
+ from seacrowd.utils.configs import SEACrowdConfig
11
+ from seacrowd.utils.constants import Licenses, Tasks
12
+
13
+ _CITATION = """\
14
+ @inproceedings{kratochvil-morgado-da-costa-2022-abui,
15
+ title = "{A}bui {W}ordnet: Using a Toolbox Dictionary to develop a wordnet for a low-resource language",
16
+ author = "Kratochvil, Frantisek and
17
+ Morgado da Costa, Lu{\'}s",
18
+ editor = "Serikov, Oleg and
19
+ Voloshina, Ekaterina and
20
+ Postnikova, Anna and
21
+ Klyachko, Elena and
22
+ Neminova, Ekaterina and
23
+ Vylomova, Ekaterina and
24
+ Shavrina, Tatiana and
25
+ Ferrand, Eric Le and
26
+ Malykh, Valentin and
27
+ Tyers, Francis and
28
+ Arkhangelskiy, Timofey and
29
+ Mikhailov, Vladislav and
30
+ Fenogenova, Alena",
31
+ booktitle = "Proceedings of the first workshop on NLP applications to field linguistics",
32
+ month = oct,
33
+ year = "2022",
34
+ address = "Gyeongju, Republic of Korea",
35
+ publisher = "International Conference on Computational Linguistics",
36
+ url = "https://aclanthology.org/2022.fieldmatters-1.7",
37
+ pages = "54--63",
38
+ abstract = "This paper describes a procedure to link a Toolbox dictionary of a low-resource language to correct
39
+ synsets, generating a new wordnet. We introduce a bootstrapping technique utilising the information in the gloss
40
+ fields (English, national, and regional) to generate sense candidates using a naive algorithm based on
41
+ multilingual sense intersection. We show that this technique is quite effective when glosses are available in
42
+ more than one language. Our technique complements the previous work by Rosman et al. (2014) which linked the
43
+ SIL Semantic Domains to wordnet senses. Through this work we have created a small, fully hand-checked wordnet
44
+ for Abui, containing over 1,400 concepts and 3,600 senses.",
45
+ }
46
+ """
47
+ _DATASETNAME = "abui_wordnet"
48
+ _DESCRIPTION = """\
49
+ A small fully hand-checked wordnet for Abui, containing over 1,400 concepts and 3,600 senses, is created. A
50
+ bootstrapping technique is introduced to utilise the information in the gloss fields (English, national, and regional)
51
+ to generate sense candidates using a naive algorithm based on multilingual sense intersection.
52
+ """
53
+
54
+ _HOMEPAGE = "https://github.com/fanacek/abuiwn"
55
+ _LANGUAGES = ["abz"]
56
+ _LICENSE = Licenses.CC_BY_4_0.value
57
+ _LOCAL = False
58
+ _URLS = {
59
+ _DATASETNAME: "https://raw.githubusercontent.com/fanacek/abuiwn/main/abwn_lmf.tsv",
60
+ }
61
+
62
+ _SUPPORTED_TASKS = [Tasks.WORD_ANALOGY]
63
+
64
+ _SOURCE_VERSION = "1.0.0"
65
+ _SEACROWD_VERSION = "2024.06.20"
66
+
67
+
68
+ class AbuiwordnetDataset(datasets.GeneratorBasedBuilder):
69
+
70
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
71
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
72
+
73
+ BUILDER_CONFIGS = [
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_source",
76
+ version=SOURCE_VERSION,
77
+ description=_DESCRIPTION,
78
+ schema="source",
79
+ subset_id="abui_wordnet",
80
+ ),
81
+ # SEACrowdConfig(
82
+ # name="abui_wordnet_seacrowd_ww",
83
+ # version=SEACROWD_VERSION,
84
+ # description="abuiw SEACrowd schema",
85
+ # schema="seacrowd_a",
86
+ # subset_id="abui_wordnet",
87
+ # ),
88
+ ]
89
+
90
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
91
+
92
+ def _info(self) -> datasets.DatasetInfo:
93
+ features = None
94
+ if self.config.schema == "source":
95
+ features = datasets.Features(
96
+ {
97
+ "sense": datasets.Value("string"),
98
+ "pos": datasets.Value("string"),
99
+ "lang": datasets.Value("string"),
100
+ "lemma": datasets.Value("string"),
101
+ "form": datasets.Value("string"),
102
+ }
103
+ )
104
+ elif self.config.schema == "seacrowd_pair":
105
+ features = schemas.pairs_features
106
+ raise NotImplementedError()
107
+
108
+ return datasets.DatasetInfo(
109
+ description=_DESCRIPTION,
110
+ features=features,
111
+ homepage=_HOMEPAGE,
112
+ license=_LICENSE,
113
+ citation=_CITATION,
114
+ )
115
+
116
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
117
+ urls = _URLS[_DATASETNAME]
118
+ data_dir = dl_manager.download_and_extract(urls)
119
+ return [
120
+ datasets.SplitGenerator(
121
+ name="senses",
122
+ gen_kwargs={
123
+ "filepath": data_dir,
124
+ },
125
+ ),
126
+ ]
127
+
128
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
129
+ with open(filepath, "r") as filein:
130
+ data_instances = [inst.strip("\n").split("\t") for inst in filein.readlines()]
131
+ if self.config.schema == "source":
132
+ for idx, example in enumerate(data_instances):
133
+ sense = example[0]
134
+ pos = example[0][-1]
135
+ lang = example[1]
136
+ lemma = example[2]
137
+ form = "" if len(example) == 3 else example[3]
138
+ yield idx, {
139
+ "sense": sense,
140
+ "pos": pos,
141
+ "lang": lang,
142
+ "lemma": lemma,
143
+ "form": form,
144
+ }
145
+ # elif self.config.schema == "seacrowd_pair":
146
+ #