|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The SocialGrep dataset loader base.""" |
|
|
|
|
|
import csv |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
DATASET_NAME = "the-reddit-dataset-dataset" |
|
DATASET_TITLE = "the-reddit-dataset-dataset" |
|
|
|
DATASET_DESCRIPTION = """\ |
|
A meta dataset of Reddit's own /r/datasets community. |
|
""" |
|
|
|
_HOMEPAGE = f"https://socialgrep.com/datasets/{DATASET_NAME}" |
|
|
|
_LICENSE = "CC-BY v4.0" |
|
|
|
URL_TEMPLATE = "https://exports.socialgrep.com/download/public/{dataset_file}.zip" |
|
DATASET_FILE_TEMPLATE = "{dataset}-{type}.csv" |
|
|
|
_DATASET_FILES = { |
|
'posts': DATASET_FILE_TEMPLATE.format(dataset=DATASET_NAME, type="posts"), |
|
'comments': DATASET_FILE_TEMPLATE.format(dataset=DATASET_NAME, type="comments"), |
|
} |
|
|
|
_CITATION = f"""\ |
|
@misc{{socialgrep:{DATASET_NAME}, |
|
title = {{{DATASET_TITLE}}}, |
|
author={{Lexyr Inc. |
|
}}, |
|
year={{2022}} |
|
}} |
|
""" |
|
|
|
|
|
class FiveYearsOfAAPLOnReddit(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="posts", version=VERSION, description="The dataset posts."), |
|
datasets.BuilderConfig(name="comments", version=VERSION, description="The dataset comments."), |
|
] |
|
|
|
def _info(self): |
|
if self.config.name == "posts": |
|
features = datasets.Features( |
|
{ |
|
"type": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
"subreddit.id": datasets.Value("string"), |
|
"subreddit.name": datasets.Value("string"), |
|
"subreddit.nsfw": datasets.Value("bool"), |
|
"created_utc": datasets.Value("timestamp[s,tz=utc]"), |
|
"permalink": datasets.Value("string"), |
|
"domain": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"selftext": datasets.Value("large_string"), |
|
"title": datasets.Value("string"), |
|
"score": datasets.Value("int32"), |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"type": datasets.ClassLabel(num_classes=2, names=['post', 'comment']), |
|
"id": datasets.Value("string"), |
|
"subreddit.id": datasets.Value("string"), |
|
"subreddit.name": datasets.Value("string"), |
|
"subreddit.nsfw": datasets.Value("bool"), |
|
"created_utc": datasets.Value("timestamp[s,tz=utc]"), |
|
"permalink": datasets.Value("string"), |
|
"body": datasets.Value("large_string"), |
|
"sentiment": datasets.Value("float32"), |
|
"score": datasets.Value("int32"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=DATASET_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
my_urls = [URL_TEMPLATE.format(dataset_file=_DATASET_FILES[self.config.name])] |
|
data_dir = dl_manager.download_and_extract(my_urls)[0] |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, _DATASET_FILES[self.config.name]), |
|
"split": "train", |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples( |
|
self, filepath, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
bool_cols = ["subreddit.nsfw"] |
|
int_cols = ["score", "created_utc"] |
|
float_cols = ["sentiment"] |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
reader = csv.DictReader(f) |
|
for row in reader: |
|
for col in bool_cols: |
|
if col in row: |
|
if row[col]: |
|
row[col] = (row[col] == "true") |
|
else: |
|
row[col] = None |
|
for col in int_cols: |
|
if col in row: |
|
if row[col]: |
|
row[col] = int(row[col]) |
|
else: |
|
row[col] = None |
|
for col in float_cols: |
|
if col in row: |
|
if row[col]: |
|
row[col] = float(row[col]) |
|
else: |
|
row[col] = None |
|
|
|
if row["type"] == "post": |
|
key = f"t3_{row['id']}" |
|
if row["type"] == "comment": |
|
key = f"t1_{row['id']}" |
|
yield key, row |
|
|
|
|