File size: 2,870 Bytes
67ce876
 
 
 
 
 
 
 
 
 
 
 
147c0d5
9e41f27
147c0d5
8d340f0
147c0d5
 
 
8d340f0
147c0d5
 
 
67ce876
 
 
147c0d5
67ce876
 
 
147c0d5
 
67ce876
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147c0d5
 
67ce876
 
 
 
 
8d340f0
147c0d5
 
 
 
 
e08bae2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import os
import datasets
from huggingface_hub import HfApi
from datasets import DownloadManager, DatasetInfo
from datasets.data_files import DataFilesDict

_EXTENSION = [".png", ".jpg", ".jpeg"]
_NAME = "animelover/danbooru2022"
_REVISION = "main"


class DanbooruDataset(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        # add number before name for sorting
        datasets.BuilderConfig(
            name="0-sfw",
            description="sfw subset",
        ),
        datasets.BuilderConfig(
            name="1-full",
            description="full dataset",
        ),
    ]

    def _info(self) -> DatasetInfo:
        return datasets.DatasetInfo(
            description=self.config.description,
            features=datasets.Features(
                {
                    "image": datasets.Image(),
                    "tags": datasets.Value("string"),
                    "post_id": datasets.Value("int64"),
                }
            ),
            supervised_keys=None,
            citation="",
        )

    def _split_generators(self, dl_manager: DownloadManager):
        hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
        data_files = DataFilesDict.from_hf_repo(
            {datasets.Split.TRAIN: ["**"]},
            dataset_info=hfh_dataset_info,
            allowed_extensions=["zip"],
        )
        gs = []
        for split, files in data_files.items():
            downloaded_files = dl_manager.download_and_extract(files)
            gs.append(datasets.SplitGenerator(name=split, gen_kwargs={"filepath": downloaded_files}))
        return gs

    def _generate_examples(self, filepath):
        for path in filepath:
            all_fnames = {os.path.relpath(os.path.join(root, fname), start=path)
                          for root, _dirs, files in os.walk(path) for fname in files}
            image_fnames = sorted([fname for fname in all_fnames if os.path.splitext(fname)[1].lower() in _EXTENSION],
                                  reverse=True)
            for image_fname in image_fnames:
                image_path = os.path.join(path, image_fname)
                tags_path = os.path.join(path, os.path.splitext(image_fname)[0] + ".txt")
                with open(tags_path, "r", encoding="utf-8") as f:
                    tags = f.read()
                if self.config.name == "0-sfw" and any(tag.strip() in nsfw_tags for tag in tags.split(",")):
                    continue
                post_id = int(os.path.splitext(os.path.basename(image_fname))[0])
                yield image_fname, {"image": image_path, "tags": tags, "post_id": post_id}


nsfw_tags = ["nude", "completely nude", "topless", "bottomless", "sex", "oral", "fellatio gesture", "tentacle sex", "nipples", "pussy", "vaginal", "pubic hair", "anus", "ass focus", "penis", "cum", "condom", "sex toy"]