holylovenia commited on
Commit
43be7e0
1 Parent(s): 959ed0e

Upload indolem_ntp.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indolem_ntp.py +166 -0
indolem_ntp.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import json
5
+ import datasets
6
+ from nusacrowd.utils import schemas
7
+
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks
10
+
11
+ _CITATION = """\
12
+ @article{DBLP:journals/corr/abs-2011-00677,
13
+ author = {Fajri Koto and
14
+ Afshin Rahimi and
15
+ Jey Han Lau and
16
+ Timothy Baldwin},
17
+ title = {IndoLEM and IndoBERT: {A} Benchmark Dataset and Pre-trained Language
18
+ Model for Indonesian {NLP}},
19
+ journal = {CoRR},
20
+ volume = {abs/2011.00677},
21
+ year = {2020},
22
+ url = {https://arxiv.org/abs/2011.00677},
23
+ eprinttype = {arXiv},
24
+ eprint = {2011.00677},
25
+ timestamp = {Fri, 06 Nov 2020 15:32:47 +0100},
26
+ biburl = {https://dblp.org/rec/journals/corr/abs-2011-00677.bib},
27
+ bibsource = {dblp computer science bibliography, https://dblp.org}
28
+ }
29
+ """
30
+
31
+ _LOCAL = False
32
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
33
+ _DATASETNAME = "indolem_ntp"
34
+
35
+ _DESCRIPTION = """\
36
+ NTP (Next Tweet prediction) is one of the comprehensive Indonesian benchmarks that given a list of tweets and an option, we predict if the option is the next tweet or not.
37
+ This task is similar to the next sentence prediction (NSP) task used to train BERT (Devlin et al., 2019).
38
+ In NTP, each instance consists of a Twitter thread (containing 2 to 4 tweets) that we call the premise, and four possible options for the next tweet, one of which is the actual response from the original thread.
39
+
40
+ Train: 5681 threads
41
+ Development: 811 threads
42
+ Test: 1890 threads
43
+ """
44
+
45
+ _HOMEPAGE = "https://indolem.github.io/"
46
+
47
+ _LICENSE = "Creative Commons Attribution 4.0"
48
+
49
+ _URLS = {
50
+ _DATASETNAME: {
51
+ "train": "https://raw.githubusercontent.com/indolem/indolem/main/next_tweet_prediction/data/train.json",
52
+ "validation": "https://raw.githubusercontent.com/indolem/indolem/main/next_tweet_prediction/data/dev.json",
53
+ "test": "https://raw.githubusercontent.com/indolem/indolem/main/next_tweet_prediction/data/test.json",
54
+ }
55
+ }
56
+
57
+ _SUPPORTED_TASKS = [Tasks.NEXT_SENTENCE_PREDICTION]
58
+
59
+ _SOURCE_VERSION = "1.0.0"
60
+ _NUSANTARA_VERSION = "1.0.0"
61
+
62
+
63
+ class IndolemNTPDataset(datasets.GeneratorBasedBuilder):
64
+ """NTP (Next Tweet prediction) is based on next sentence prediction (NSP), consists of a Twitter thread (containing 2 to 4 tweets) and four possible options for the next tweet, one of which is the actual response from the original thread."""
65
+
66
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
67
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
68
+
69
+ BUILDER_CONFIGS = [
70
+ NusantaraConfig(
71
+ name="indolem_ntp_source",
72
+ version=SOURCE_VERSION,
73
+ description="Indolem NTP source schema",
74
+ schema="source",
75
+ subset_id="indolem_ntp",
76
+ ),
77
+ NusantaraConfig(
78
+ name="indolem_ntp_nusantara_pairs",
79
+ version=NUSANTARA_VERSION,
80
+ description="Indolem NTP Nusantara schema",
81
+ schema="nusantara_pairs",
82
+ subset_id="indolem_ntp",
83
+ ),
84
+ ]
85
+
86
+ DEFAULT_CONFIG_NAME = "indolem_ntp_source"
87
+
88
+ def _info(self) -> datasets.DatasetInfo:
89
+ if self.config.schema == "source":
90
+ features = datasets.Features(
91
+ {
92
+ "id": datasets.Value("string"),
93
+ "tweets": datasets.Value("string"),
94
+ "next_tweet": datasets.Value("string"),
95
+ "label": datasets.Value("int8"),
96
+ }
97
+ )
98
+ elif self.config.schema == "nusantara_pairs":
99
+ features = schemas.pairs_features([0, 1])
100
+
101
+ return datasets.DatasetInfo(
102
+ description=_DESCRIPTION,
103
+ features=features,
104
+ homepage=_HOMEPAGE,
105
+ license=_LICENSE,
106
+ citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
110
+ urls = _URLS[_DATASETNAME]
111
+ data_dir = dl_manager.download_and_extract(urls)
112
+
113
+ return [
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TRAIN,
116
+ gen_kwargs={
117
+ "filepath": data_dir["train"],
118
+ "split": "train",
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TEST,
123
+ gen_kwargs={
124
+ "filepath": data_dir["test"],
125
+ "split": "test",
126
+ },
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.VALIDATION,
130
+ gen_kwargs={
131
+ "filepath": data_dir["validation"],
132
+ "split": "dev",
133
+ },
134
+ ),
135
+ ]
136
+
137
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
138
+ data = self._read_data(filepath)
139
+ if self.config.schema == "source":
140
+ for i, row in enumerate(data):
141
+ ex = {
142
+ "id": str(i),
143
+ "tweets": row[0],
144
+ "next_tweet": row[1],
145
+ "label": row[2],
146
+ }
147
+ yield i, ex
148
+
149
+ elif self.config.schema == "nusantara_pairs":
150
+ for i, row in enumerate(data):
151
+ ex = {
152
+ "id": str(i),
153
+ "text_1": row[0],
154
+ "text_2": row[1],
155
+ "label": row[2],
156
+ }
157
+ yield i, ex
158
+
159
+ def _read_data(self, fname):
160
+ data = json.load(open(fname, "r"))
161
+ results = []
162
+ for datum in data:
163
+ tweets = " ".join(datum["tweets"])
164
+ for key, option in datum["next_tweet"]:
165
+ results.append((tweets, option, key))
166
+ return results