ag2435 commited on
Commit
89c707a
·
verified ·
1 Parent(s): ba4b3a0

Delete loading script

Browse files
Files changed (1) hide show
  1. phantom-wiki-v050.py +0 -182
phantom-wiki-v050.py DELETED
@@ -1,182 +0,0 @@
1
- """Dataset script for PhantomWiki v0.5.
2
-
3
- Template: https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py
4
- """
5
-
6
-
7
- import csv
8
- import json
9
- import os
10
-
11
- import datasets
12
-
13
-
14
- # TODO: Add BibTeX citation
15
- # Find for instance the citation on arxiv or on the dataset repo/website
16
- _CITATION = """\
17
- @InProceedings{huggingface:dataset,
18
- title = {A great new dataset},
19
- author={huggingface, Inc.
20
- },
21
- year={2020}
22
- }
23
- """
24
-
25
- # TODO: Add description of the dataset here
26
- # You can copy an official description
27
- _DESCRIPTION = """\
28
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
29
- """
30
-
31
- # TODO: Add a link to an official homepage for the dataset here
32
- _HOMEPAGE = "https://github.com/albertgong1/phantom-wiki"
33
-
34
- # TODO: Add the licence for the dataset here if you can find it
35
- _LICENSE = ""
36
-
37
- # TODO: Add link to the official dataset URLs here
38
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
39
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
40
- _URLS = {}
41
- # Construct splits
42
- SIZES = [
43
- 25,
44
- 50,
45
- 100,
46
- 200,
47
- 300,
48
- 400,
49
- 500,
50
- 1000,
51
- 2500,
52
- 5000,
53
- 10000,
54
- ]
55
- SPLITS = []
56
- for depth in [20]:
57
- for size in SIZES:
58
- for seed in [1, 2, 3]:
59
- SPLITS.append(f"depth_{depth}_size_{size}_seed_{seed}")
60
- for filename, config in [("articles.json", "text-corpus"), ("questions.json", "question-answer"), ("facts.pl", "database")]:
61
- _URLS[config] = {}
62
- for split in SPLITS:
63
- _URLS[config][split] = f"https://huggingface.co/datasets/mlcore/phantom-wiki-v050/resolve/main/{split}/{filename}"
64
-
65
- class PhantomWiki(datasets.GeneratorBasedBuilder):
66
- """PhantomWiki v0.5"""
67
-
68
- VERSION = datasets.Version("0.5.0")
69
-
70
- # This is an example of a dataset with multiple configurations.
71
- # If you don't want/need to define several sub-sets in your dataset,
72
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
73
-
74
- # If you need to make complex sub-parts in the datasets with configurable options
75
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
76
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
77
-
78
- # You will be able to load one or the other configurations in the following list with
79
- # data = datasets.load_dataset('my_dataset', 'first_domain')
80
- # data = datasets.load_dataset('my_dataset', 'second_domain')
81
- BUILDER_CONFIGS = [
82
- datasets.BuilderConfig(name="text-corpus", version=VERSION, description="This config contains the documents in the text corpus"),
83
- datasets.BuilderConfig(name="question-answer", version=VERSION, description="This config containst the question-answer pairs"),
84
- datasets.BuilderConfig(name="database", version=VERSION, description="This config contains the complete Prolog database"),
85
- ]
86
-
87
- # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
88
-
89
- def _info(self):
90
- """This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
91
- """
92
- if self.config.name == "text-corpus": # This is the name of the configuration selected in BUILDER_CONFIGS above
93
- features = datasets.Features(
94
- {
95
- "title": datasets.Value("string"),
96
- "article": datasets.Value("string"),
97
- # "facts": datasets.Value("string"), # TODO
98
- }
99
- )
100
- elif self.config.name == "question-answer":
101
- # NOTE: to see available data types: https://huggingface.co/docs/datasets/v2.5.2/en/package_reference/main_classes#datasets.Features
102
- features = datasets.Features(
103
- {
104
- "id": datasets.Value("string"),
105
- "question": datasets.Value("string"),
106
- "intermediate_answers": datasets.Value("string"),
107
- "answer": datasets.Sequence(datasets.Value("string")),
108
- "prolog": datasets.Features(
109
- {
110
- "query": datasets.Value("string"),
111
- "answer": datasets.Value("string"),
112
- }
113
- ),
114
- "template": datasets.Sequence(datasets.Value("string")),
115
- "type": datasets.Value("int64"), # this references the template type
116
- "difficulty": datasets.Value("int64"),
117
- }
118
- )
119
- elif self.config.name == "database":
120
- features = datasets.Features(
121
- {
122
- "content": datasets.Value("string"),
123
- }
124
- )
125
- else:
126
- raise ValueError(f"Unknown configuration name {self.config.name}")
127
- return datasets.DatasetInfo(
128
- # This is the description that will appear on the datasets page.
129
- description=_DESCRIPTION,
130
- # This defines the different columns of the dataset and their types
131
- features=features, # Here we define them above because they are different between the two configurations
132
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
133
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
134
- # supervised_keys=("sentence", "label"),
135
- # Homepage of the dataset for documentation
136
- homepage=_HOMEPAGE,
137
- # License for the dataset if available
138
- license=_LICENSE,
139
- # Citation for the dataset
140
- citation=_CITATION,
141
- )
142
-
143
- def _split_generators(self, dl_manager):
144
- """This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
145
-
146
- NOTE: If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
147
- """
148
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
149
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
150
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
151
- urls = _URLS[self.config.name]
152
- data_dir = dl_manager.download_and_extract(urls)
153
- splits = []
154
- for name, filepath in data_dir.items():
155
- splits.append(datasets.SplitGenerator(
156
- name=name,
157
- # These kwargs will be passed to _generate_examples
158
- gen_kwargs={
159
- "filepath": filepath,
160
- "split": name,
161
- },
162
- ))
163
- return splits
164
-
165
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
166
- def _generate_examples(self, filepath, split):
167
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
168
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
169
- if self.config.name in ["text-corpus", "question-answer"]:
170
- with open(filepath, encoding="utf-8") as f:
171
- for key, data in enumerate(json.load(f)):
172
- yield key, data
173
- elif self.config.name == "database":
174
- with open(filepath, encoding="utf-8") as f:
175
- data = f.read()
176
- # NOTE: Our schema expects a dictionary with a single key "content"
177
- key = 0
178
- yield key, {
179
- "content": data,
180
- }
181
- else:
182
- raise ValueError(f"Unknown configuration name {self.config.name}")