Datasets:
Create diffusiondb-pixelart.py
Browse files- diffusiondb-pixelart.py +408 -0
diffusiondb-pixelart.py
ADDED
@@ -0,0 +1,408 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Original Copyright 2022 Jay Wang, Evan Montoya, David Munechika, Alex Yang, Ben Hoover, Polo Chau
|
2 |
+
# MIT License
|
3 |
+
"""Loading script for DiffusionDB."""
|
4 |
+
|
5 |
+
import re
|
6 |
+
import numpy as np
|
7 |
+
import pandas as pd
|
8 |
+
|
9 |
+
from json import load, dump
|
10 |
+
from os.path import join, basename
|
11 |
+
from huggingface_hub import hf_hub_url
|
12 |
+
|
13 |
+
import datasets
|
14 |
+
|
15 |
+
# Find for instance the citation on arxiv or on the dataset repo/website
|
16 |
+
_CITATION = """\
|
17 |
+
@article{wangDiffusionDBLargescalePrompt2022,
|
18 |
+
title = {{{DiffusionDB}}: {{A}} Large-Scale Prompt Gallery Dataset for Text-to-Image Generative Models},
|
19 |
+
author = {Wang, Zijie J. and Montoya, Evan and Munechika, David and Yang, Haoyang and Hoover, Benjamin and Chau, Duen Horng},
|
20 |
+
year = {2022},
|
21 |
+
journal = {arXiv:2210.14896 [cs]},
|
22 |
+
url = {https://arxiv.org/abs/2210.14896}
|
23 |
+
}
|
24 |
+
"""
|
25 |
+
|
26 |
+
# You can copy an official description
|
27 |
+
_DESCRIPTION = """
|
28 |
+
DiffusionDB is the first large-scale text-to-image prompt dataset. It contains 2
|
29 |
+
million images generated by Stable Diffusion using prompts and hyperparameters
|
30 |
+
specified by real users. The unprecedented scale and diversity of this
|
31 |
+
human-actuated dataset provide exciting research opportunities in understanding
|
32 |
+
the interplay between prompts and generative models, detecting deepfakes, and
|
33 |
+
designing human-AI interaction tools to help users more easily use these models.
|
34 |
+
"""
|
35 |
+
|
36 |
+
_HOMEPAGE = "https://poloclub.github.io/diffusiondb"
|
37 |
+
_LICENSE = "CC0 1.0"
|
38 |
+
_VERSION = datasets.Version("0.9.1")
|
39 |
+
|
40 |
+
# Programmatically generate the URLs for different parts
|
41 |
+
# hf_hub_url() provides a more flexible way to resolve the file URLs
|
42 |
+
# https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/images/part-000001.zip
|
43 |
+
_URLS = {}
|
44 |
+
_URLS_LARGE = {}
|
45 |
+
_PART_IDS = range(1, 2001)
|
46 |
+
_PART_IDS_LARGE = range(1, 14001)
|
47 |
+
|
48 |
+
for i in _PART_IDS:
|
49 |
+
_URLS[i] = hf_hub_url(
|
50 |
+
"poloclub/diffusiondb",
|
51 |
+
filename=f"images/part-{i:06}.zip",
|
52 |
+
repo_type="dataset",
|
53 |
+
)
|
54 |
+
|
55 |
+
for i in _PART_IDS_LARGE:
|
56 |
+
if i < 10001:
|
57 |
+
_URLS_LARGE[i] = hf_hub_url(
|
58 |
+
"poloclub/diffusiondb",
|
59 |
+
filename=f"diffusiondb-large-part-1/part-{i:06}.zip",
|
60 |
+
repo_type="dataset",
|
61 |
+
)
|
62 |
+
else:
|
63 |
+
_URLS_LARGE[i] = hf_hub_url(
|
64 |
+
"poloclub/diffusiondb",
|
65 |
+
filename=f"diffusiondb-large-part-2/part-{i:06}.zip",
|
66 |
+
repo_type="dataset",
|
67 |
+
)
|
68 |
+
|
69 |
+
# Add the metadata parquet URL as well
|
70 |
+
_URLS["metadata"] = hf_hub_url(
|
71 |
+
"poloclub/diffusiondb", filename="metadata.parquet", repo_type="dataset"
|
72 |
+
)
|
73 |
+
|
74 |
+
_URLS_LARGE["metadata"] = hf_hub_url(
|
75 |
+
"poloclub/diffusiondb",
|
76 |
+
filename="metadata-large.parquet",
|
77 |
+
repo_type="dataset",
|
78 |
+
)
|
79 |
+
|
80 |
+
_SAMPLER_DICT = {
|
81 |
+
1: "ddim",
|
82 |
+
2: "plms",
|
83 |
+
3: "k_euler",
|
84 |
+
4: "k_euler_ancestral",
|
85 |
+
5: "ddik_heunm",
|
86 |
+
6: "k_dpm_2",
|
87 |
+
7: "k_dpm_2_ancestral",
|
88 |
+
8: "k_lms",
|
89 |
+
9: "others",
|
90 |
+
}
|
91 |
+
|
92 |
+
|
93 |
+
class DiffusionDBConfig(datasets.BuilderConfig):
|
94 |
+
"""BuilderConfig for DiffusionDB."""
|
95 |
+
|
96 |
+
def __init__(self, part_ids, is_large, **kwargs):
|
97 |
+
"""BuilderConfig for DiffusionDB.
|
98 |
+
Args:
|
99 |
+
part_ids([int]): A list of part_ids.
|
100 |
+
is_large(bool): If downloading data from DiffusionDB Large (14 million)
|
101 |
+
**kwargs: keyword arguments forwarded to super.
|
102 |
+
"""
|
103 |
+
super(DiffusionDBConfig, self).__init__(version=_VERSION, **kwargs)
|
104 |
+
self.part_ids = part_ids
|
105 |
+
self.is_large = is_large
|
106 |
+
|
107 |
+
|
108 |
+
class DiffusionDB(datasets.GeneratorBasedBuilder):
|
109 |
+
"""A large-scale text-to-image prompt gallery dataset based on Stable Diffusion."""
|
110 |
+
|
111 |
+
BUILDER_CONFIGS = []
|
112 |
+
|
113 |
+
# Programmatically generate configuration options (HF requires to use a string
|
114 |
+
# as the config key)
|
115 |
+
for num_k in [1, 5, 10, 50, 100, 500, 1000]:
|
116 |
+
for sampling in ["first", "random"]:
|
117 |
+
for is_large in [False, True]:
|
118 |
+
num_k_str = f"{num_k}k" if num_k < 1000 else f"{num_k // 1000}m"
|
119 |
+
subset_str = "large_" if is_large else "2m_"
|
120 |
+
|
121 |
+
if sampling == "random":
|
122 |
+
# Name the config
|
123 |
+
cur_name = subset_str + "random_" + num_k_str
|
124 |
+
|
125 |
+
# Add a short description for each config
|
126 |
+
cur_description = (
|
127 |
+
f"Random {num_k_str} images with their prompts and parameters"
|
128 |
+
)
|
129 |
+
|
130 |
+
# Sample part_ids
|
131 |
+
total_part_ids = _PART_IDS_LARGE if is_large else _PART_IDS
|
132 |
+
part_ids = np.random.choice(
|
133 |
+
total_part_ids, num_k, replace=False
|
134 |
+
).tolist()
|
135 |
+
else:
|
136 |
+
# Name the config
|
137 |
+
cur_name = subset_str + "first_" + num_k_str
|
138 |
+
|
139 |
+
# Add a short description for each config
|
140 |
+
cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters"
|
141 |
+
|
142 |
+
# Sample part_ids
|
143 |
+
total_part_ids = _PART_IDS_LARGE if is_large else _PART_IDS
|
144 |
+
part_ids = total_part_ids[1 : num_k + 1]
|
145 |
+
|
146 |
+
# Create configs
|
147 |
+
BUILDER_CONFIGS.append(
|
148 |
+
DiffusionDBConfig(
|
149 |
+
name=cur_name,
|
150 |
+
part_ids=part_ids,
|
151 |
+
is_large=is_large,
|
152 |
+
description=cur_description,
|
153 |
+
),
|
154 |
+
)
|
155 |
+
|
156 |
+
# Add few more options for Large only
|
157 |
+
for num_k in [5000, 10000]:
|
158 |
+
for sampling in ["first", "random"]:
|
159 |
+
num_k_str = f"{num_k // 1000}m"
|
160 |
+
subset_str = "large_"
|
161 |
+
|
162 |
+
if sampling == "random":
|
163 |
+
# Name the config
|
164 |
+
cur_name = subset_str + "random_" + num_k_str
|
165 |
+
|
166 |
+
# Add a short description for each config
|
167 |
+
cur_description = (
|
168 |
+
f"Random {num_k_str} images with their prompts and parameters"
|
169 |
+
)
|
170 |
+
|
171 |
+
# Sample part_ids
|
172 |
+
total_part_ids = _PART_IDS_LARGE
|
173 |
+
part_ids = np.random.choice(
|
174 |
+
total_part_ids, num_k, replace=False
|
175 |
+
).tolist()
|
176 |
+
else:
|
177 |
+
# Name the config
|
178 |
+
cur_name = subset_str + "first_" + num_k_str
|
179 |
+
|
180 |
+
# Add a short description for each config
|
181 |
+
cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters"
|
182 |
+
|
183 |
+
# Sample part_ids
|
184 |
+
total_part_ids = _PART_IDS_LARGE
|
185 |
+
part_ids = total_part_ids[1 : num_k + 1]
|
186 |
+
|
187 |
+
# Create configs
|
188 |
+
BUILDER_CONFIGS.append(
|
189 |
+
DiffusionDBConfig(
|
190 |
+
name=cur_name,
|
191 |
+
part_ids=part_ids,
|
192 |
+
is_large=True,
|
193 |
+
description=cur_description,
|
194 |
+
),
|
195 |
+
)
|
196 |
+
|
197 |
+
# Need to manually add all (2m) and all (large)
|
198 |
+
BUILDER_CONFIGS.append(
|
199 |
+
DiffusionDBConfig(
|
200 |
+
name="2m_all",
|
201 |
+
part_ids=_PART_IDS,
|
202 |
+
is_large=False,
|
203 |
+
description="All images with their prompts and parameters",
|
204 |
+
),
|
205 |
+
)
|
206 |
+
|
207 |
+
BUILDER_CONFIGS.append(
|
208 |
+
DiffusionDBConfig(
|
209 |
+
name="large_all",
|
210 |
+
part_ids=_PART_IDS_LARGE,
|
211 |
+
is_large=True,
|
212 |
+
description="All images with their prompts and parameters",
|
213 |
+
),
|
214 |
+
)
|
215 |
+
|
216 |
+
# We also prove a text-only option, which loads the meatadata parquet file
|
217 |
+
BUILDER_CONFIGS.append(
|
218 |
+
DiffusionDBConfig(
|
219 |
+
name="2m_text_only",
|
220 |
+
part_ids=[],
|
221 |
+
is_large=False,
|
222 |
+
description="Only include all prompts and parameters (no image)",
|
223 |
+
),
|
224 |
+
)
|
225 |
+
|
226 |
+
BUILDER_CONFIGS.append(
|
227 |
+
DiffusionDBConfig(
|
228 |
+
name="large_text_only",
|
229 |
+
part_ids=[],
|
230 |
+
is_large=True,
|
231 |
+
description="Only include all prompts and parameters (no image)",
|
232 |
+
),
|
233 |
+
)
|
234 |
+
|
235 |
+
# Default to only load 1k random images
|
236 |
+
DEFAULT_CONFIG_NAME = "2m_random_1k"
|
237 |
+
|
238 |
+
def _info(self):
|
239 |
+
"""Specify the information of DiffusionDB."""
|
240 |
+
|
241 |
+
if "text_only" in self.config.name:
|
242 |
+
features = datasets.Features(
|
243 |
+
{
|
244 |
+
"image_name": datasets.Value("string"),
|
245 |
+
"prompt": datasets.Value("string"),
|
246 |
+
"part_id": datasets.Value("uint16"),
|
247 |
+
"seed": datasets.Value("uint32"),
|
248 |
+
"step": datasets.Value("uint16"),
|
249 |
+
"cfg": datasets.Value("float32"),
|
250 |
+
"sampler": datasets.Value("string"),
|
251 |
+
"width": datasets.Value("uint16"),
|
252 |
+
"height": datasets.Value("uint16"),
|
253 |
+
"user_name": datasets.Value("string"),
|
254 |
+
"timestamp": datasets.Value("timestamp[us, tz=UTC]"),
|
255 |
+
"image_nsfw": datasets.Value("float32"),
|
256 |
+
"prompt_nsfw": datasets.Value("float32"),
|
257 |
+
},
|
258 |
+
)
|
259 |
+
|
260 |
+
else:
|
261 |
+
features = datasets.Features(
|
262 |
+
{
|
263 |
+
"image": datasets.Image(),
|
264 |
+
"prompt": datasets.Value("string"),
|
265 |
+
"seed": datasets.Value("uint32"),
|
266 |
+
"step": datasets.Value("uint16"),
|
267 |
+
"cfg": datasets.Value("float32"),
|
268 |
+
"sampler": datasets.Value("string"),
|
269 |
+
"width": datasets.Value("uint16"),
|
270 |
+
"height": datasets.Value("uint16"),
|
271 |
+
"user_name": datasets.Value("string"),
|
272 |
+
"timestamp": datasets.Value("timestamp[us, tz=UTC]"),
|
273 |
+
"image_nsfw": datasets.Value("float32"),
|
274 |
+
"prompt_nsfw": datasets.Value("float32"),
|
275 |
+
},
|
276 |
+
)
|
277 |
+
|
278 |
+
return datasets.DatasetInfo(
|
279 |
+
description=_DESCRIPTION,
|
280 |
+
features=features,
|
281 |
+
supervised_keys=None,
|
282 |
+
homepage=_HOMEPAGE,
|
283 |
+
license=_LICENSE,
|
284 |
+
citation=_CITATION,
|
285 |
+
)
|
286 |
+
|
287 |
+
def _split_generators(self, dl_manager):
|
288 |
+
# If several configurations are possible (listed in BUILDER_CONFIGS),
|
289 |
+
# the configuration selected by the user is in self.config.name
|
290 |
+
|
291 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to
|
292 |
+
# download and extract URLS It can accept any type or nested list/dict
|
293 |
+
# and will give back the same structure with the url replaced with path
|
294 |
+
# to local files. By default the archives will be extracted and a path
|
295 |
+
# to a cached folder where they are extracted is returned instead of the
|
296 |
+
# archive
|
297 |
+
|
298 |
+
# Download and extract zip files of all sampled part_ids
|
299 |
+
data_dirs = []
|
300 |
+
json_paths = []
|
301 |
+
|
302 |
+
# Resolve the urls
|
303 |
+
if self.config.is_large:
|
304 |
+
urls = _URLS_LARGE
|
305 |
+
else:
|
306 |
+
urls = _URLS
|
307 |
+
|
308 |
+
for cur_part_id in self.config.part_ids:
|
309 |
+
cur_url = urls[cur_part_id]
|
310 |
+
data_dir = dl_manager.download_and_extract(cur_url)
|
311 |
+
|
312 |
+
data_dirs.append(data_dir)
|
313 |
+
json_paths.append(join(data_dir, f"part-{cur_part_id:06}.json"))
|
314 |
+
|
315 |
+
# Also download the metadata table
|
316 |
+
metadata_path = dl_manager.download(urls["metadata"])
|
317 |
+
|
318 |
+
return [
|
319 |
+
datasets.SplitGenerator(
|
320 |
+
name=datasets.Split.TRAIN,
|
321 |
+
# These kwargs will be passed to _generate_examples
|
322 |
+
gen_kwargs={
|
323 |
+
"data_dirs": data_dirs,
|
324 |
+
"json_paths": json_paths,
|
325 |
+
"metadata_path": metadata_path,
|
326 |
+
},
|
327 |
+
),
|
328 |
+
]
|
329 |
+
|
330 |
+
def _generate_examples(self, data_dirs, json_paths, metadata_path):
|
331 |
+
# This method handles input defined in _split_generators to yield
|
332 |
+
# (key, example) tuples from the dataset.
|
333 |
+
# The `key` is for legacy reasons (tfds) and is not important in itself,
|
334 |
+
# but must be unique for each example.
|
335 |
+
|
336 |
+
# Load the metadata parquet file if the config is text_only
|
337 |
+
if "text_only" in self.config.name:
|
338 |
+
metadata_df = pd.read_parquet(metadata_path)
|
339 |
+
for _, row in metadata_df.iterrows():
|
340 |
+
yield row["image_name"], {
|
341 |
+
"image_name": row["image_name"],
|
342 |
+
"prompt": row["prompt"],
|
343 |
+
"part_id": row["part_id"],
|
344 |
+
"seed": row["seed"],
|
345 |
+
"step": row["step"],
|
346 |
+
"cfg": row["cfg"],
|
347 |
+
"sampler": _SAMPLER_DICT[int(row["sampler"])],
|
348 |
+
"width": row["width"],
|
349 |
+
"height": row["height"],
|
350 |
+
"user_name": row["user_name"],
|
351 |
+
"timestamp": None
|
352 |
+
if pd.isnull(row["timestamp"])
|
353 |
+
else row["timestamp"],
|
354 |
+
"image_nsfw": row["image_nsfw"],
|
355 |
+
"prompt_nsfw": row["prompt_nsfw"],
|
356 |
+
}
|
357 |
+
|
358 |
+
else:
|
359 |
+
num_data_dirs = len(data_dirs)
|
360 |
+
assert num_data_dirs == len(json_paths)
|
361 |
+
|
362 |
+
# Read the metadata table (only rows with the needed part_ids)
|
363 |
+
part_ids = []
|
364 |
+
for path in json_paths:
|
365 |
+
cur_id = int(re.sub(r"part-(\d+)\.json", r"\1", basename(path)))
|
366 |
+
part_ids.append(cur_id)
|
367 |
+
|
368 |
+
# We have to use pandas here to make the dataset preview work (it
|
369 |
+
# uses streaming mode)
|
370 |
+
metadata_table = pd.read_parquet(
|
371 |
+
metadata_path,
|
372 |
+
filters=[("part_id", "in", part_ids)],
|
373 |
+
)
|
374 |
+
|
375 |
+
# Iterate through all extracted zip folders for images
|
376 |
+
for k in range(num_data_dirs):
|
377 |
+
cur_data_dir = data_dirs[k]
|
378 |
+
cur_json_path = json_paths[k]
|
379 |
+
|
380 |
+
json_data = load(open(cur_json_path, "r", encoding="utf8"))
|
381 |
+
|
382 |
+
for img_name in json_data:
|
383 |
+
img_params = json_data[img_name]
|
384 |
+
img_path = join(cur_data_dir, img_name)
|
385 |
+
|
386 |
+
# Query the metadata
|
387 |
+
query_result = metadata_table.query(f'`image_name` == "{img_name}"')
|
388 |
+
|
389 |
+
# Yields examples as (key, example) tuples
|
390 |
+
yield img_name, {
|
391 |
+
"image": {
|
392 |
+
"path": img_path,
|
393 |
+
"bytes": open(img_path, "rb").read(),
|
394 |
+
},
|
395 |
+
"prompt": img_params["p"],
|
396 |
+
"seed": int(img_params["se"]),
|
397 |
+
"step": int(img_params["st"]),
|
398 |
+
"cfg": float(img_params["c"]),
|
399 |
+
"sampler": img_params["sa"],
|
400 |
+
"width": query_result["width"].to_list()[0],
|
401 |
+
"height": query_result["height"].to_list()[0],
|
402 |
+
"user_name": query_result["user_name"].to_list()[0],
|
403 |
+
"timestamp": None
|
404 |
+
if pd.isnull(query_result["timestamp"].to_list()[0])
|
405 |
+
else query_result["timestamp"].to_list()[0],
|
406 |
+
"image_nsfw": query_result["image_nsfw"].to_list()[0],
|
407 |
+
"prompt_nsfw": query_result["prompt_nsfw"].to_list()[0],
|
408 |
+
}
|