shanya commited on
Commit
6cdec46
1 Parent(s): c4acb33

Upload crd3.py

Browse files
Files changed (1) hide show
  1. crd3.py +154 -0
crd3.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """CRD3 dataset"""
18
+
19
+
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ logger = datasets.logging.get_logger(__name__)
27
+
28
+
29
+ _CITATION = """
30
+ @inproceedings{
31
+ title = {Storytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset},
32
+ author = {Rameshkumar, Revanth and Bailey, Peter},
33
+ year = {2020},
34
+ publisher = {Association for Computational Linguistics},
35
+ conference = {ACL}
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """
40
+ Storytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset.
41
+ Critical Role is an unscripted, live-streamed show where a fixed group of people play Dungeons and Dragons, an open-ended role-playing game.
42
+ The dataset is collected from 159 Critical Role episodes transcribed to text dialogues, consisting of 398,682 turns. It also includes corresponding
43
+ abstractive summaries collected from the Fandom wiki. The dataset is linguistically unique in that the narratives are generated entirely through player
44
+ collaboration and spoken interaction. For each dialogue, there are a large number of turns, multiple abstractive summaries with varying levels of detail,
45
+ and semantic ties to the previous dialogues.
46
+ """
47
+
48
+ _URL = "https://github.com/RevanthRameshkumar/CRD3/archive/master.zip"
49
+
50
+
51
+ def get_train_test_dev_files(files, test_split, train_split, dev_split):
52
+ test_files = dev_files = train_files = []
53
+ for file in files:
54
+ filename = os.path.split(file)[1].split("_")[0]
55
+ if filename in test_split:
56
+ test_files.append(file)
57
+ elif filename in train_split:
58
+ train_files.append(file)
59
+ elif filename in dev_split:
60
+ dev_files.append(file)
61
+ else:
62
+ logger.info(f"skipped file {file}")
63
+ return test_files, train_files, dev_files
64
+
65
+
66
+ class CRD3(datasets.GeneratorBasedBuilder):
67
+ def _info(self):
68
+ return datasets.DatasetInfo(
69
+ description=_DESCRIPTION,
70
+ features=datasets.Features(
71
+ {
72
+ "chunk": datasets.Value("string"),
73
+ "chunk_id": datasets.Value("int32"),
74
+ "turn_start": datasets.Value("int32"),
75
+ "turn_end": datasets.Value("int32"),
76
+ "alignment_score": datasets.Value("float32"),
77
+ "turns": [
78
+ {
79
+ "names": datasets.features.Sequence(datasets.Value("string")),
80
+ "utterances": datasets.features.Sequence(datasets.Value("string")),
81
+ "number": datasets.Value("int32"),
82
+ }
83
+ ],
84
+ }
85
+ ),
86
+ homepage="https://github.com/RevanthRameshkumar/CRD3",
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager):
91
+ path = dl_manager.download_and_extract(_URL)
92
+ test_file = os.path.join(path, "CRD3-master", "data", "aligned data", "test_files")
93
+ train_file = os.path.join(path, "CRD3-master", "data", "aligned data", "train_files")
94
+ dev_file = os.path.join(path, "CRD3-master", "data", "aligned data", "val_files")
95
+ with open(test_file, encoding="utf-8") as f:
96
+ test_splits = [file.replace("\n", "") for file in f.readlines()]
97
+
98
+ with open(train_file, encoding="utf-8") as f:
99
+ train_splits = [file.replace("\n", "") for file in f.readlines()]
100
+ with open(dev_file, encoding="utf-8") as f:
101
+ dev_splits = [file.replace("\n", "") for file in f.readlines()]
102
+ c2 = "CRD3-master/data/aligned data/c=2"
103
+ c3 = "CRD3-master/data/aligned data/c=3"
104
+ c4 = "CRD3-master/data/aligned data/c=4"
105
+ files = [os.path.join(path, c2, file) for file in sorted(os.listdir(os.path.join(path, c2)))]
106
+ files.extend([os.path.join(path, c3, file) for file in sorted(os.listdir(os.path.join(path, c3)))])
107
+ files.extend([os.path.join(path, c4, file) for file in sorted(os.listdir(os.path.join(path, c4)))])
108
+
109
+ test_files, train_files, dev_files = get_train_test_dev_files(files, test_splits, train_splits, dev_splits)
110
+
111
+ return [
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TRAIN,
114
+ gen_kwargs={"files_path": train_files},
115
+ ),
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.TEST,
118
+ gen_kwargs={"files_path": test_files},
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.VALIDATION,
122
+ gen_kwargs={"files_path": dev_files},
123
+ ),
124
+ ]
125
+
126
+ def _generate_examples(self, files_path):
127
+ """Yields examples."""
128
+
129
+ for id0, file in enumerate(files_path):
130
+ with open(file, encoding="utf-8") as f:
131
+ data = json.load(f)
132
+ for id1, row in enumerate(data):
133
+ chunk = row["CHUNK"]
134
+ chunk_id = row["ALIGNMENT"]["CHUNK ID"]
135
+ turn_start = row["ALIGNMENT"]["TURN START"]
136
+ turn_end = row["ALIGNMENT"]["TURN END"]
137
+ score = row["ALIGNMENT"]["ALIGNMENT SCORE"]
138
+ for turn in row["TURNS"]:
139
+ turn["names"] = turn["NAMES"]
140
+ turn["utterances"] = turn["UTTERANCES"]
141
+ turn["number"] = turn["NUMBER"]
142
+
143
+ del turn["NAMES"]
144
+ del turn["UTTERANCES"]
145
+ del turn["NUMBER"]
146
+
147
+ yield str(id0) + "_" + str(id1), {
148
+ "chunk": chunk,
149
+ "chunk_id": chunk_id,
150
+ "turn_start": turn_start,
151
+ "turn_end": turn_end,
152
+ "alignment_score": score,
153
+ "turns": row["TURNS"],
154
+ }