mkon commited on
Commit
530acac
1 Parent(s): 1f82fe0

rename dataloadeR

Browse files
Files changed (2) hide show
  1. dataset_infos.json +0 -1
  2. nlpcc-stance.py +97 -0
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"task_a": {"description": "This is a stance prediction dataset in Chinese.\nThe data is that from a shared task, stance detection in Chinese microblogs, in NLPCC-ICCPOL 2016. It covers Task A, a mandatory supervised task which detects stance towards five targets of interest with given labeled data. \n", "citation": "@incollection{xu2016overview,\n title={Overview of nlpcc shared task 4: Stance detection in chinese microblogs},\n author={Xu, Ruifeng and Zhou, Yu and Wu, Dongyin and Gui, Lin and Du, Jiachen and Xue, Yun},\n booktitle={Natural language understanding and intelligent applications},\n pages={907--916},\n year={2016},\n publisher={Springer}\n}\n", "homepage": "", "license": "cc-by-4.0", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "stance": {"num_classes": 3, "names": ["AGAINST", "FAVOR", "NONE"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "nlpcc_stance", "config_name": "task_a", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 775152, "num_examples": 2986, "dataset_name": "nlpcc_stance"}}, "download_checksums": {"taska_train.csv": {"num_bytes": 744329, "checksum": "8d27fe452e00bbcb5f20c3d7eaac21ef9a6df27598db6c87b79353e2fdd96e41"}}, "download_size": 744329, "post_processing_size": null, "dataset_size": 775152, "size_in_bytes": 1519481}}
 
 
nlpcc-stance.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Mads Kongsbak and Leon Derczynski
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """NLPCC Shared Task 4, Stance Detection in Chinese Microblogs (Task A)"""
15
+
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+ _CITATION = """\
24
+ @incollection{xu2016overview,
25
+ title={Overview of nlpcc shared task 4: Stance detection in chinese microblogs},
26
+ author={Xu, Ruifeng and Zhou, Yu and Wu, Dongyin and Gui, Lin and Du, Jiachen and Xue, Yun},
27
+ booktitle={Natural language understanding and intelligent applications},
28
+ pages={907--916},
29
+ year={2016},
30
+ publisher={Springer}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ This is a stance prediction dataset in Chinese.
36
+ The data is that from a shared task, stance detection in Chinese microblogs, in NLPCC-ICCPOL 2016. It covers Task A, a mandatory supervised task which detects stance towards five targets of interest with given labeled data.
37
+ """
38
+
39
+ _HOMEPAGE = ""
40
+
41
+ _LICENSE = "cc-by-4.0"
42
+
43
+ class NLPCCConfig(datasets.BuilderConfig):
44
+
45
+ def __init__(self, **kwargs):
46
+ super(NLPCCConfig, self).__init__(**kwargs)
47
+
48
+ class NLPCCStance(datasets.GeneratorBasedBuilder):
49
+ """The NLPCC Shared Task 4 dataset regarding Stance Detection in Chinese Microblogs (Task A)"""
50
+
51
+ VERSION = datasets.Version("1.0.0")
52
+
53
+ BUILDER_CONFIGS = [
54
+ NLPCCConfig(name="task_a", version=VERSION, description="Task A, the supervised learning task."),
55
+ ]
56
+
57
+ def _info(self):
58
+ features = datasets.Features(
59
+ {
60
+ "id": datasets.Value("string"),
61
+ "target": datasets.Value("string"),
62
+ "text": datasets.Value("string"),
63
+ "stance": datasets.features.ClassLabel(
64
+ names=[
65
+ "AGAINST",
66
+ "FAVOR",
67
+ "NONE",
68
+ ]
69
+ )
70
+ }
71
+ )
72
+
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=features,
76
+ homepage=_HOMEPAGE,
77
+ license=_LICENSE,
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ train_text = dl_manager.download_and_extract("taska_train.csv")
83
+ return [
84
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_text, "split": "train"}),
85
+ ]
86
+
87
+ def _generate_examples(self, filepath, split):
88
+ with open(filepath, encoding="utf-8") as f:
89
+ reader = csv.DictReader(f, delimiter=",")
90
+ guid = 0
91
+ for instance in reader:
92
+ instance["target"] = instance.pop("target")
93
+ instance["text"] = instance.pop("text")
94
+ instance["stance"] = instance.pop("stance")
95
+ instance['id'] = str(guid)
96
+ yield guid, instance
97
+ guid += 1