heziyevv commited on
Commit
be65518
1 Parent(s): 71162c8

Delete small_wiki_news_books.py

Browse files
Files changed (1) hide show
  1. small_wiki_news_books.py +0 -88
small_wiki_news_books.py DELETED
@@ -1,88 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
-
18
- import pandas as pd
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
-
25
-
26
- # TODO: Add description of the dataset here
27
- # You can copy an official description
28
- _DESCRIPTION = """\
29
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
30
- """
31
-
32
-
33
- # TODO: Add link to the official dataset URLs here
34
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
35
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
36
- _URLS = {
37
- "train": "train.csv",
38
- "test": "test.csv",
39
- }
40
-
41
-
42
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
43
- class SmallWikiNewsBooks(datasets.GeneratorBasedBuilder):
44
- """TODO: Short description of my dataset."""
45
-
46
- VERSION = datasets.Version("1.0.0")
47
-
48
- BUILDER_CONFIGS = [
49
- datasets.BuilderConfig(name="plain_text",
50
- version=VERSION,
51
- description="World level dataset. Can be used for language modelling"),
52
- ]
53
-
54
- DEFAULT_CONFIG_NAME = "small_aze_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
55
-
56
- def _info(self):
57
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
58
- features = datasets.Features(
59
- {
60
- "text": datasets.Value("string"),
61
- # These are the features of your dataset like images, labels ...
62
- }
63
- )
64
- return datasets.DatasetInfo(
65
- # This is the description that will appear on the datasets page.
66
- description=_DESCRIPTION,
67
- # This defines the different columns of the dataset and their types
68
- features=features, # Here we define them above because they are different between the two configurations
69
- supervised_keys=None,
70
- )
71
-
72
-
73
- def _split_generators(self, dl_manager):
74
- downloaded_files = dl_manager.download_and_extract(_URLS)
75
-
76
- return [
77
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
78
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
79
- ]
80
-
81
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
82
- def _generate_examples(self, filepath):
83
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
84
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
85
- df = pd.read_csv(file_path)
86
- for index, row in df.iterrows():
87
- yield index, {"text": row,}
88
-