Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
f68a4f3
1 Parent(s): 415f8a7

Upload uit_viocd.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. uit_viocd.py +141 -0
uit_viocd.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """\
12
+ @incollection{nguyen2021vietnamese,
13
+ title={Vietnamese Complaint Detection on E-Commerce Websites},
14
+ author={Nguyen, Nhung Thi-Hong and Ha, Phuong Phan-Dieu and Nguyen, Luan Thanh and Nguyen, Kiet Van and Nguyen, Ngan Luu-Thuy},
15
+ booktitle={New Trends in Intelligent Software Methodologies, Tools and Techniques},
16
+ pages={618--629},
17
+ year={2021},
18
+ publisher={IOS Press}
19
+ }
20
+ """
21
+
22
+ _DATASETNAME = "uit_viocd"
23
+
24
+ _DESCRIPTION = """\
25
+ The UIT-ViOCD dataset includes 5,485 reviews e-commerce sites across four categories: fashion, cosmetics, applications,
26
+ and phones. Each review is annotated by humans, assigning a label of 1 for complaints and 0 for non-complaints.
27
+ The dataset is divided into training, validation, and test sets, distributed approximately in an 80:10:10 ratio.
28
+ """
29
+
30
+ _HOMEPAGE = "https://huggingface.co/datasets/tarudesu/ViOCD"
31
+
32
+ _LANGUAGES = ["vie"]
33
+
34
+ _LICENSE = Licenses.UNKNOWN.value
35
+
36
+ _LOCAL = False
37
+
38
+ _URLS = {
39
+ "train": "https://huggingface.co/datasets/tarudesu/ViOCD/resolve/main/train.csv?download=true",
40
+ "val": "https://huggingface.co/datasets/tarudesu/ViOCD/resolve/main/val.csv?download=true",
41
+ "test": "https://huggingface.co/datasets/tarudesu/ViOCD/resolve/main/test.csv?download=true",
42
+ }
43
+
44
+ _SUPPORTED_TASKS = [Tasks.COMPLAINT_DETECTION]
45
+
46
+ _SOURCE_VERSION = "1.0.0"
47
+
48
+ _SEACROWD_VERSION = "2024.06.20"
49
+
50
+
51
+ class UITVIOCDDataset(datasets.GeneratorBasedBuilder):
52
+ """The UIT-ViOCD dataset includes 5,485 reviews e-commerce sites across four categories: fashion, cosmetics, applications, and phones."""
53
+
54
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
55
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
56
+
57
+ LABEL_CLASSES = [1, 0]
58
+
59
+ SEACROWD_SCHEMA_NAME = "text"
60
+
61
+ BUILDER_CONFIGS = [
62
+ SEACrowdConfig(
63
+ name=f"{_DATASETNAME}_source",
64
+ version=SOURCE_VERSION,
65
+ description=f"{_DATASETNAME} source schema",
66
+ schema="source",
67
+ subset_id=_DATASETNAME,
68
+ ),
69
+ SEACrowdConfig(
70
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
71
+ version=SEACROWD_VERSION,
72
+ description=f"{_DATASETNAME} SEACrowd schema",
73
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
74
+ subset_id=_DATASETNAME,
75
+ ),
76
+ ]
77
+
78
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
79
+
80
+ def _info(self) -> datasets.DatasetInfo:
81
+
82
+ if self.config.schema == "source":
83
+ features = datasets.Features(
84
+ {
85
+ "review": datasets.Value("string"),
86
+ "review_tokenize": datasets.Value("string"),
87
+ "label": datasets.ClassLabel(names=self.LABEL_CLASSES),
88
+ "domain": datasets.Value("string"),
89
+ }
90
+ )
91
+
92
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
93
+ features = schemas.text_features(self.LABEL_CLASSES)
94
+
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=features,
98
+ homepage=_HOMEPAGE,
99
+ license=_LICENSE,
100
+ citation=_CITATION,
101
+ )
102
+
103
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
104
+ data_dir = dl_manager.download_and_extract(_URLS)
105
+
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN,
109
+ gen_kwargs={
110
+ "filepath": data_dir["train"],
111
+ },
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={
116
+ "filepath": data_dir["test"],
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.VALIDATION,
121
+ gen_kwargs={
122
+ "filepath": data_dir["val"],
123
+ },
124
+ ),
125
+ ]
126
+
127
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
128
+ df = pd.read_csv(filepath)
129
+
130
+ if self.config.schema == "source":
131
+ for key, example in df.iterrows():
132
+ yield key, {
133
+ "review": example["review"],
134
+ "review_tokenize": example["review_tokenize"],
135
+ "label": example["label"],
136
+ "domain": example["domain"],
137
+ }
138
+
139
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
140
+ for key, example in df.iterrows():
141
+ yield key, {"id": str(key), "text": str(example["review"]), "label": int(example["label"])}