Datasets:

Languages:
Hebrew
ArXiv:
License:
imvladikon commited on
Commit
bcf1c9f
1 Parent(s): 5039823

Create UD_Hebrew-IAHLTwiki.py

Browse files
Files changed (1) hide show
  1. UD_Hebrew-IAHLTwiki.py +142 -0
UD_Hebrew-IAHLTwiki.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os.path
3
+
4
+ try:
5
+ import conllu
6
+ except:
7
+ raise Exception("please install `pip install conllu`")
8
+
9
+ import datasets
10
+
11
+
12
+ _CITATION = r"""\
13
+ @InProceedings{ZeldesHowellOrdanBenMoshe2022,
14
+ author = {Amir Zeldes and Nick Howell and Noam Ordan and Yifat Ben Moshe},
15
+ booktitle = {Proceedings of {EMNLP} 2022},
16
+ title = {A SecondWave of UD Hebrew Treebanking and Cross-Domain Parsing},
17
+ year = {2022},
18
+ address = {Abu Dhabi, UAE},
19
+ }
20
+ """ # noqa: W605
21
+
22
+ _DESCRIPTION = """\
23
+ Publicly available subset of the IAHLT UD Hebrew Treebank's Wikipedia section (https://www.iahlt.org/)
24
+ """
25
+
26
+
27
+ _UD_DATASETS = {
28
+ "train": "data/he_iahltwiki-ud-dev.conllu",
29
+ "validation": "data/he_iahltwiki-ud-dev.conllu",
30
+ "test": "data/he_iahltwiki-ud-test.conllu"
31
+ }
32
+
33
+
34
+ class UniversaldependenciesConfig(datasets.BuilderConfig):
35
+ """BuilderConfig for Universal dependencies"""
36
+
37
+ def __init__(self, data_url, **kwargs):
38
+ super(UniversaldependenciesConfig, self).__init__(version=datasets.Version("2.7.0", ""), **kwargs)
39
+
40
+ self.data_url = data_url
41
+
42
+
43
+ class UniversalDependencies(datasets.GeneratorBasedBuilder):
44
+ VERSION = datasets.Version("2.7.0")
45
+ BUILDER_CONFIGS = [
46
+ UniversaldependenciesConfig(
47
+ name="he_iahltwiki",
48
+ description=_DESCRIPTION,
49
+ data_url="https://github.com/UniversalDependencies/UD_Hebrew-IAHLTwiki",
50
+ )
51
+ ]
52
+ BUILDER_CONFIG_CLASS = UniversaldependenciesConfig
53
+
54
+ def _info(self):
55
+ return datasets.DatasetInfo(
56
+ description=_DESCRIPTION,
57
+ features=datasets.Features(
58
+ {
59
+ "idx": datasets.Value("string"),
60
+ "text": datasets.Value("string"),
61
+ "tokens": datasets.Sequence(datasets.Value("string")),
62
+ "lemmas": datasets.Sequence(datasets.Value("string")),
63
+ "upos": datasets.Sequence(
64
+ datasets.features.ClassLabel(
65
+ names=[
66
+ "NOUN",
67
+ "PUNCT",
68
+ "ADP",
69
+ "NUM",
70
+ "SYM",
71
+ "SCONJ",
72
+ "ADJ",
73
+ "PART",
74
+ "DET",
75
+ "CCONJ",
76
+ "PROPN",
77
+ "PRON",
78
+ "X",
79
+ "_",
80
+ "ADV",
81
+ "INTJ",
82
+ "VERB",
83
+ "AUX",
84
+ ]
85
+ )
86
+ ),
87
+ "xpos": datasets.Sequence(datasets.Value("string")),
88
+ "feats": datasets.Sequence(datasets.Value("string")),
89
+ "head": datasets.Sequence(datasets.Value("string")),
90
+ "deprel": datasets.Sequence(datasets.Value("string")),
91
+ "deps": datasets.Sequence(datasets.Value("string")),
92
+ "misc": datasets.Sequence(datasets.Value("string")),
93
+ }
94
+ ),
95
+ supervised_keys=None,
96
+ homepage="https://www.iahlt.org/",
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+ """Returns SplitGenerators."""
102
+ data_dir = dl_manager.download_and_extract(_UD_DATASETS)
103
+ filepaths = {
104
+ split: data_dir[split]
105
+ for split, _ in _UD_DATASETS.items()
106
+ }
107
+ return [
108
+ datasets.SplitGenerator(name=datasets.Split(split), gen_kwargs={"filepath": filepaths[split]})
109
+ for split in filepaths
110
+ ]
111
+
112
+ def _generate_examples(self, filepath):
113
+ id_ = 0
114
+ with open(filepath, "r", encoding="utf-8") as data_file:
115
+ tokenlist = list(conllu.parse_incr(data_file))
116
+ for sent in tokenlist:
117
+ if "sent_id" in sent.metadata:
118
+ idx = sent.metadata["sent_id"]
119
+ else:
120
+ idx = id_
121
+
122
+ tokens = [token["form"] for token in sent]
123
+
124
+ if "text" in sent.metadata:
125
+ txt = sent.metadata["text"]
126
+ else:
127
+ txt = " ".join(tokens)
128
+
129
+ yield id_, {
130
+ "idx": str(idx),
131
+ "text": txt,
132
+ "tokens": [token["form"] for token in sent],
133
+ "lemmas": [token["lemma"] for token in sent],
134
+ "upos": [token["upos"] for token in sent],
135
+ "xpos": [token["xpos"] for token in sent],
136
+ "feats": [str(token["feats"]) for token in sent],
137
+ "head": [str(token["head"]) for token in sent],
138
+ "deprel": [str(token["deprel"]) for token in sent],
139
+ "deps": [str(token["deps"]) for token in sent],
140
+ "misc": [str(token["misc"]) for token in sent],
141
+ }
142
+ id_ += 1