|
from __future__ import absolute_import, division, print_function |
|
|
|
from typing import List |
|
|
|
import datasets |
|
import pandas as pd |
|
|
|
|
|
DESCRIPTION = ''' |
|
A dataset of all the sentences in Wikipedia. |
|
|
|
Filtered to only include sentences <=64 characters. |
|
|
|
Taken from the OPTIMUS project. https://github.com/ChunyuanLI/Optimus/blob/master/download_datasets.md |
|
''' |
|
|
|
|
|
DOWNLOAD_URL = "https://textae.blob.core.windows.net/optimus/data/datasets/wikipedia.segmented.nltk.txt" |
|
|
|
|
|
class WikiSentences(datasets.GeneratorBasedBuilder): |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
'text': datasets.Value("string"), |
|
} |
|
), |
|
homepage="https://github.com/ChunyuanLI/Optimus/blob/master/download_datasets.md", |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
path = dl_manager.download(DOWNLOAD_URL) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": path}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as txt_file: |
|
i = 0 |
|
for line in txt_file: |
|
line = line.strip() |
|
if line: |
|
yield i, {"text": line} |
|
i += 1 |
|
|