# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Address all TODOs and remove all explanatory comments """TODO: Add a description here.""" import csv import json import os import datasets # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ This new dataset is designed to solve this great NLP task and is crafted with a lot of care. """ # TODO: Add link to the official dataset URLs here # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URLS = { "train": "train.csv", "test": "test.csv", } # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case class SmallWikiNewsBooks(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="plain_text", version=VERSION, description="World level dataset. Can be used for language modelling"), ] DEFAULT_CONFIG_NAME = "small_aze_text" # It's not mandatory to have a default configuration. Just use one if it make sense. def _info(self): # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset features = datasets.Features( { "sentence": datasets.Value("string"), # These are the features of your dataset like images, labels ... } ) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations supervised_keys=None, ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), ] def _generate_examples(self, filepath): key = 0 for idx, line in enumerate(open(filepath, "rb")): if line is not None: line = line.strip().decode("utf-8") row = json.loads(line) level = row["difficulty"] if self.config.filter_difficulties and not level in self.config.difficulties: continue yield key, {"problem_id": row["id"], "question": row["question"], "solutions": row["solutions"], "input_output": row["input_output"], "difficulty": level, "url": row["url"], "starter_code": row["starter_code"]} key += 1 # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath): # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. with open(filepath, encoding="utf-8") as f: for key, row in enumerate(f): data = row # Yields examples as (key, example) tuples yield key, { "sentence": data, }