Datasets:

Languages:
Filipino
ArXiv:
License:
holylovenia commited on
Commit
1703442
1 Parent(s): 59f3038

Upload filipino_slang_norm.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. filipino_slang_norm.py +136 -0
filipino_slang_norm.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ from datasets.download.download_manager import DownloadManager
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """
12
+ @inproceedings{flores-radev-2022-look,
13
+ title = "Look Ma, Only 400 Samples! Revisiting the Effectiveness of Automatic N-Gram Rule Generation for Spelling Normalization in {F}ilipino",
14
+ author = "Flores, Lorenzo Jaime and
15
+ Radev, Dragomir",
16
+ booktitle = "Proceedings of The Third Workshop on Simple and Efficient Natural Language Processing (SustaiNLP)",
17
+ month = dec,
18
+ year = "2022",
19
+ address = "Abu Dhabi, United Arab Emirates (Hybrid)",
20
+ publisher = "Association for Computational Linguistics",
21
+ url = "https://aclanthology.org/2022.sustainlp-1.5",
22
+ pages = "29--35",
23
+ }
24
+ """
25
+
26
+ _LOCAL = False
27
+ _LANGUAGES = ["fil"]
28
+ _DATASETNAME = "filipino_slang_norm"
29
+ _DESCRIPTION = """\
30
+ This dataset contains 398 abbreviated and/or contracted Filipino words used in
31
+ Facebook comments made on weather advisories from a Philippine weather bureau.
32
+ volunteers.
33
+ """
34
+
35
+ _HOMEPAGE = "https://github.com/ljyflores/efficient-spelling-normalization-filipino"
36
+ _LICENSE = Licenses.UNKNOWN.value
37
+ _URLS = {
38
+ "train": "https://github.com/ljyflores/efficient-spelling-normalization-filipino/raw/main/data/train_words.csv",
39
+ "test": "https://github.com/ljyflores/efficient-spelling-normalization-filipino/raw/main/data/test_words.csv",
40
+ }
41
+
42
+ _SUPPORTED_TASKS = [Tasks.MULTILEXNORM]
43
+ _SOURCE_VERSION = "1.0.0"
44
+ _SEACROWD_VERSION = "2024.06.20"
45
+
46
+
47
+ class FilipinoSlangNormDataset(datasets.GeneratorBasedBuilder):
48
+ """Filipino Slang Norm dataset by Flores and Radev (2022)"""
49
+
50
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
51
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
52
+
53
+ SEACROWD_SCHEMA_NAME = "t2t"
54
+
55
+ BUILDER_CONFIGS = [
56
+ SEACrowdConfig(
57
+ name=f"{_DATASETNAME}_source",
58
+ version=SOURCE_VERSION,
59
+ description=f"{_DATASETNAME} source schema",
60
+ schema="source",
61
+ subset_id=_DATASETNAME,
62
+ ),
63
+ SEACrowdConfig(
64
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
65
+ version=SEACROWD_VERSION,
66
+ description=f"{_DATASETNAME} SEACrowd schema",
67
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
68
+ subset_id=_DATASETNAME,
69
+ ),
70
+ ]
71
+
72
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
73
+
74
+ def _info(self) -> datasets.DatasetInfo:
75
+ if self.config.schema == "source":
76
+ features = datasets.Features(
77
+ {
78
+ "id": datasets.Value("string"),
79
+ "src_sent": datasets.Value("string"),
80
+ "norm_sent": datasets.Value("string"),
81
+ }
82
+ )
83
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
84
+ features = schemas.text2text_features
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION,
87
+ features=features,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+
93
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
94
+ """Returns SplitGenerators."""
95
+ data_files = {
96
+ "train": Path(dl_manager.download_and_extract(_URLS["train"])),
97
+ "test": Path(dl_manager.download_and_extract(_URLS["test"])),
98
+ }
99
+
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN,
103
+ gen_kwargs={
104
+ "filepath": data_files["train"],
105
+ "split": "train",
106
+ },
107
+ ),
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.TEST,
110
+ gen_kwargs={
111
+ "filepath": data_files["test"],
112
+ "split": "test",
113
+ },
114
+ ),
115
+ ]
116
+
117
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
118
+ """Yield examples as (key, example) tuples"""
119
+ with open(filepath, encoding="utf-8") as f:
120
+ for guid, line in enumerate(f):
121
+ src_sent, norm_sent = line.strip("\n").split(",")
122
+ if self.config.schema == "source":
123
+ example = {
124
+ "id": str(guid),
125
+ "src_sent": src_sent,
126
+ "norm_sent": norm_sent,
127
+ }
128
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
129
+ example = {
130
+ "id": str(guid),
131
+ "text_1": src_sent,
132
+ "text_2": norm_sent,
133
+ "text_1_name": "src_sent",
134
+ "text_2_name": "norm_sent",
135
+ }
136
+ yield guid, example