File size: 1,705 Bytes
67613a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import numpy as np
import datasets

from huggingface_hub import HfApi
api = HfApi()
repo_files = list(api.dataset_info(repo_id="laion/laion2b-en-vit-h-14-embeddings").siblings)
filenames = [x.rfilename for x in repo_files]
img_embs = [x for x in filenames if x.startswith("img_emb/")]


class LAIONEmbeddingsConfig(datasets.BuilderConfig):
    def __init__(self, **kwargs):
        super(LAIONEmbeddingsConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)


class LAIONEmbeddings(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        LAIONEmbeddingsConfig()
    ]

    def _get_features(self) -> datasets.Features:
        return datasets.Features({
            "embedding": datasets.Sequence(datasets.Value("float32")),
        })

    def _info(self):
        features = self._get_features()

        return datasets.DatasetInfo(
            features=features,
        )

    def _split_generators(self, dl_manager):
        main_url = "https://huggingface.co/datasets/laion/laion2b-en-vit-h-14-embeddings/resolve/main/"
        archive_paths = dl_manager.download([main_url + x for x in img_embs])

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "chunks": archive_paths,
                    "split": "train",
                },
            ),
        ]

    def _generate_examples(self, chunks, split):
        for chunk in chunks:
            file = np.DataSource().open(chunk)
            data = np.load(file.name)
            for example in data:
                yield "", {
                    "embedding": example
                }