Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
b9eb052
1 Parent(s): 1e22465

Upload visim400.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. visim400.py +159 -0
visim400.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+ import pandas as pd
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = """\
13
+ @inproceedings{nguyen-etal-2018-introducing,
14
+ title = "Introducing Two {V}ietnamese Datasets for Evaluating Semantic Models of (Dis-)Similarity and Relatedness",
15
+ author = "Nguyen, Kim Anh and
16
+ Schulte im Walde, Sabine and
17
+ Vu, Ngoc Thang",
18
+ editor = "Walker, Marilyn and
19
+ Ji, Heng and
20
+ Stent, Amanda",
21
+ booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)",
22
+ month = jun,
23
+ year = "2018",
24
+ address = "New Orleans, Louisiana",
25
+ publisher = "Association for Computational Linguistics",
26
+ url = "https://aclanthology.org/N18-2032",
27
+ doi = "10.18653/v1/N18-2032",
28
+ pages = "199--205"
29
+ }
30
+ """
31
+
32
+ _DATASETNAME = "visim400"
33
+
34
+ _DESCRIPTION = """\
35
+ ViSim-400 is a Vietnamese dataset of semantic relation \
36
+ pairs for evaluation of models that reflect the \
37
+ continuum between similarity and relatedness.
38
+
39
+ We choose 'Sim2' instead of 'Sim1' for the label output of \
40
+ our SEACrowd dataloader schema because it's been normalized to [1, 10].
41
+ """
42
+
43
+ _HOMEPAGE = "https://www.ims.uni-stuttgart.de/forschung/ressourcen/experiment-daten/vnese-sem-datasets/"
44
+
45
+ _LANGUAGES = ["vie"]
46
+
47
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
48
+
49
+ _LOCAL = False
50
+
51
+ _URLS = {_DATASETNAME: "https://www.ims.uni-stuttgart.de/documents/ressourcen/experiment-daten/ViData.zip"}
52
+
53
+ _SUPPORTED_TASKS = [Tasks.SEMANTIC_SIMILARITY]
54
+
55
+ _SOURCE_VERSION = "1.0.0"
56
+
57
+ _SEACROWD_VERSION = "2024.06.20"
58
+
59
+
60
+ class ViSim400Dataset(datasets.GeneratorBasedBuilder):
61
+ """
62
+ ViSim-400 is a Vietnamese dataset of semantic relation \
63
+ pairs for evaluation of models that reflect the \
64
+ continuum between similarity and relatedness.
65
+ """
66
+
67
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
68
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
69
+ SEACROWD_SCHEMA_NAME = "pairs_score"
70
+
71
+ BUILDER_CONFIGS = [
72
+ SEACrowdConfig(
73
+ name=f"{_DATASETNAME}_source",
74
+ version=_SOURCE_VERSION,
75
+ description=f"{_DATASETNAME} source schema",
76
+ schema="source",
77
+ subset_id=f"{_DATASETNAME}",
78
+ ),
79
+ SEACrowdConfig(
80
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
81
+ version=_SEACROWD_VERSION,
82
+ description=f"{_DATASETNAME} SEACrowd schema",
83
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
84
+ subset_id=f"{_DATASETNAME}",
85
+ ),
86
+ ]
87
+
88
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
89
+
90
+ def _info(self) -> datasets.DatasetInfo:
91
+
92
+ if self.config.schema == "source":
93
+
94
+ features = datasets.Features(
95
+ {
96
+ "Word1": datasets.Value("string"),
97
+ "Word2": datasets.Value("string"),
98
+ "POS": datasets.Value("string"),
99
+ "Sim1": datasets.Value("string"),
100
+ "Sim2": datasets.Value("string"),
101
+ "STD": datasets.Value("string"),
102
+ }
103
+ )
104
+
105
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
106
+ features = schemas.pairs_features_score()
107
+
108
+ return datasets.DatasetInfo(
109
+ description=_DESCRIPTION,
110
+ features=features,
111
+ homepage=_HOMEPAGE,
112
+ license=_LICENSE,
113
+ citation=_CITATION,
114
+ )
115
+
116
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
117
+ """Returns SplitGenerators."""
118
+
119
+ data_dir = dl_manager.download_and_extract(_URLS[_DATASETNAME])
120
+
121
+ return [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TEST,
124
+ gen_kwargs={
125
+ "filepath": os.path.join(data_dir, "ViData/ViSim-400/Visim-400.txt"),
126
+ "split": "test",
127
+ },
128
+ )
129
+ ]
130
+
131
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
132
+ """Yields examples as (key, example) tuples."""
133
+
134
+ with open(filepath, "r", encoding="utf-8") as file:
135
+ lines = file.readlines()
136
+
137
+ data = []
138
+ for line in lines:
139
+ columns = line.strip().split("\t")
140
+ data.append(columns)
141
+
142
+ df = pd.DataFrame(data[1:], columns=data[0])
143
+
144
+ for index, row in df.iterrows():
145
+
146
+ if self.config.schema == "source":
147
+ example = row.to_dict()
148
+
149
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
150
+
151
+ example = {
152
+ "id": str(index),
153
+ "text_1": str(row["Word1"]),
154
+ "text_2": str(row["Word2"]),
155
+ # I choose Sim2 instead of Sim1 because it's been normalized to [1, 10]
156
+ "label": str(row["Sim2"]),
157
+ }
158
+
159
+ yield index, example