# Adapted from https://github.com/huggingface/datasets/blob/d69d1c6/templates/new_dataset_script.py | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import datasets | |
import json | |
from .mediawikidump import MediaWikiDump | |
class IncelWikiDumpDataset(datasets.GeneratorBasedBuilder): | |
VERSION = datasets.Version("1.0.0") | |
def _info(self): | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description="Dump of the Incels Wiki (https://incels.wiki) as of 2024-02-25", | |
# This defines the different columns of the dataset and their types | |
features=datasets.Features( | |
{ | |
"id": datasets.Value("int32"), | |
"title": datasets.Value("string"), # The title of the page | |
"revisions": datasets.Value( | |
"large_string" | |
), # JSON-encoded list of revisions | |
"text": datasets.Value("string"), # The text of the latest revision | |
} | |
), | |
# Homepage of the dataset for documentation | |
homepage="https://incels.wiki", | |
# License for the dataset if available | |
license="https://incels.wiki/w/Incel_Wiki:About#Legal", | |
# Citation for the dataset | |
citation="N/A", | |
) | |
def _split_generators(self, dl_manager): | |
data_dir = dl_manager.download("https://huggingface.co/datasets/NyxKrage/incelwiki-20240225-dump/resolve/main/incelswiki-20240225-history.xml") | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"filepath": data_dir, | |
}, | |
), | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, filepath): | |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
data = MediaWikiDump(filepath) | |
for page in data.pages: | |
yield page.id, { | |
"id": page.id, | |
"title": page.title, | |
"revisions": json.dumps( | |
[r.__dict__ for r in page.revisions], default=str | |
), | |
"text": page.text, | |
} | |