File size: 1,425 Bytes
81484f7
 
 
 
 
d65a7c3
81484f7
 
54694dd
 
 
 
 
 
 
81484f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d65a7c3
 
81484f7
d65a7c3
81484f7
d65a7c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from __future__ import absolute_import, division, print_function

from typing import List

import datasets
import pandas as pd


DESCRIPTION = '''
A dataset of all the sentences in Wikipedia.

Filtered to only include sentences <=64 characters.

Taken from the OPTIMUS project. https://github.com/ChunyuanLI/Optimus/blob/master/download_datasets.md
'''


DOWNLOAD_URL = "https://textae.blob.core.windows.net/optimus/data/datasets/wikipedia.segmented.nltk.txt"


class WikiSentences(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(
            description=DESCRIPTION,
            features=datasets.Features(
                {
                    'text': datasets.Value("string"),
                }
            ),
            homepage="https://github.com/ChunyuanLI/Optimus/blob/master/download_datasets.md",
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        path = dl_manager.download(DOWNLOAD_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": path}),
        ]

    def _generate_examples(self, filepath):
        with open(filepath, encoding="utf-8") as txt_file:
            i = 0
            for line in txt_file:
                line = line.strip()
                if line:
                    yield i, {"text": line}
                    i += 1