SocialGrep commited on
Commit
0de5fd8
1 Parent(s): 11f25c6

Delete reddit-wallstreetbets-aug-2021.py

Browse files
Files changed (1) hide show
  1. reddit-wallstreetbets-aug-2021.py +0 -182
reddit-wallstreetbets-aug-2021.py DELETED
@@ -1,182 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """The SocialGrep dataset loader base."""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- DATASET_NAME = "wallstreetbets-posts-and-comments-for-august-2021"
25
- DATASET_TITLE = "wallstreetbets-posts-and-comments-for-august-2021"
26
-
27
- DATASET_DESCRIPTION = """\
28
- This corpus contains the complete data for the activity on /r/WallStreetBets for the entire month of August 2021.
29
- """
30
-
31
- _HOMEPAGE = f"https://socialgrep.com/datasets/{DATASET_NAME}"
32
-
33
- _LICENSE = "CC-BY v4.0"
34
-
35
- URL_TEMPLATE = "https://exports.socialgrep.com/download/public/{dataset_file}.zip"
36
- DATASET_FILE_TEMPLATE = "{dataset}-{type}.csv"
37
-
38
- _DATASET_FILES = {
39
- 'posts': DATASET_FILE_TEMPLATE.format(dataset=DATASET_NAME, type="posts"),
40
- 'comments': DATASET_FILE_TEMPLATE.format(dataset=DATASET_NAME, type="comments"),
41
- }
42
-
43
- _CITATION = f"""\
44
- @misc{{socialgrep:{DATASET_NAME},
45
- title = {{{DATASET_TITLE}}},
46
- author={{Lexyr Inc.
47
- }},
48
- year={{2022}}
49
- }}
50
- """
51
-
52
-
53
- class redditwallstreetbetsaug2021(datasets.GeneratorBasedBuilder):
54
- VERSION = datasets.Version("1.0.0")
55
-
56
- # This is an example of a dataset with multiple configurations.
57
- # If you don't want/need to define several sub-sets in your dataset,
58
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
59
-
60
- # If you need to make complex sub-parts in the datasets with configurable options
61
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
62
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
63
-
64
- # You will be able to load one or the other configurations in the following list with
65
- # data = datasets.load_dataset('my_dataset', 'first_domain')
66
- # data = datasets.load_dataset('my_dataset', 'second_domain')
67
- BUILDER_CONFIGS = [
68
- datasets.BuilderConfig(name="posts", version=VERSION, description="The dataset posts."),
69
- datasets.BuilderConfig(name="comments", version=VERSION, description="The dataset comments."),
70
- ]
71
-
72
- def _info(self):
73
- if self.config.name == "posts": # This is the name of the configuration selected in BUILDER_CONFIGS above
74
- features = datasets.Features(
75
- {
76
- "type": datasets.Value("string"),
77
- "id": datasets.Value("string"),
78
- "subreddit.id": datasets.Value("string"),
79
- "subreddit.name": datasets.Value("string"),
80
- "subreddit.nsfw": datasets.Value("bool"),
81
- "created_utc": datasets.Value("timestamp[s,tz=utc]"),
82
- "permalink": datasets.Value("string"),
83
- "domain": datasets.Value("string"),
84
- "url": datasets.Value("string"),
85
- "selftext": datasets.Value("large_string"),
86
- "title": datasets.Value("string"),
87
- "score": datasets.Value("int32"),
88
- }
89
- )
90
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
91
- features = datasets.Features(
92
- {
93
- "type": datasets.ClassLabel(num_classes=2, names=['post', 'comment']),
94
- "id": datasets.Value("string"),
95
- "subreddit.id": datasets.Value("string"),
96
- "subreddit.name": datasets.Value("string"),
97
- "subreddit.nsfw": datasets.Value("bool"),
98
- "created_utc": datasets.Value("timestamp[s,tz=utc]"),
99
- "permalink": datasets.Value("string"),
100
- "body": datasets.Value("large_string"),
101
- "sentiment": datasets.Value("float32"),
102
- "score": datasets.Value("int32"),
103
- }
104
- )
105
- return datasets.DatasetInfo(
106
- # This is the description that will appear on the datasets page.
107
- description=DATASET_DESCRIPTION,
108
- # This defines the different columns of the dataset and their types
109
- features=features, # Here we define them above because they are different between the two configurations
110
- # If there's a common (input, target) tuple from the features,
111
- # specify them here. They'll be used if as_supervised=True in
112
- # builder.as_dataset.
113
- supervised_keys=None,
114
- # Homepage of the dataset for documentation
115
- homepage=_HOMEPAGE,
116
- # License for the dataset if available
117
- license=_LICENSE,
118
- # Citation for the dataset
119
- citation=_CITATION,
120
- )
121
-
122
- def _split_generators(self, dl_manager):
123
- """Returns SplitGenerators."""
124
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
125
-
126
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
127
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
128
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
129
- my_urls = [URL_TEMPLATE.format(dataset_file=_DATASET_FILES[self.config.name])]
130
- data_dir = dl_manager.download_and_extract(my_urls)[0]
131
- return [
132
- datasets.SplitGenerator(
133
- name=datasets.Split.TRAIN,
134
- # These kwargs will be passed to _generate_examples
135
- gen_kwargs={
136
- "filepath": os.path.join(data_dir, _DATASET_FILES[self.config.name]),
137
- "split": "train",
138
- },
139
- )
140
- ]
141
-
142
- def _generate_examples(
143
- self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
144
- ):
145
- """ Yields examples as (key, example) tuples. """
146
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
147
- bool_cols = ["subreddit.nsfw"]
148
- int_cols = ["score", "created_utc"]
149
- float_cols = ["sentiment"]
150
-
151
- with open(filepath, encoding="utf-8") as f:
152
- reader = csv.DictReader(f)
153
- for row in reader:
154
- for col in bool_cols:
155
- if col in row:
156
- if row[col]:
157
- row[col] = (row[col] == "true")
158
- else:
159
- row[col] = None
160
- for col in int_cols:
161
- if col in row:
162
- if row[col]:
163
- row[col] = int(row[col])
164
- else:
165
- row[col] = None
166
- for col in float_cols:
167
- if col in row:
168
- if row[col]:
169
- row[col] = float(row[col])
170
- else:
171
- row[col] = None
172
-
173
- if row["type"] == "post":
174
- key = f"t3_{row['id']}"
175
- if row["type"] == "comment":
176
- key = f"t1_{row['id']}"
177
- yield key, row
178
-
179
-
180
- if __name__ == "__main__":
181
- print("Please use the HuggingFace dataset library, or")
182
- print("download from https://socialgrep.com/datasets.")