File size: 3,379 Bytes
928aa57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import datasets
import json
import numpy
import tarfile
import io
from io import BytesIO

_FEATURES = datasets.Features(
    {
        "id": datasets.Value("string"),
        "metadata": datasets.Value("string"),
        "prompt": datasets.Array3D(shape=(1, 77, 768), dtype="float32"),
        "vidmean": datasets.Sequence(feature=datasets.Array3D(shape=(4, 64, 64), dtype="float32")),
        "vidstd": datasets.Sequence(feature=datasets.Array3D(shape=(4, 64, 64), dtype="float32"))
    }
)

class FunkLoaderStream(datasets.GeneratorBasedBuilder):
    """TempoFunk Dataset"""

    def _info(self):
        return datasets.DatasetInfo(
            description="TempoFunk Dataset",
            features=_FEATURES,
            homepage="None",
            citation="None",
            license="None"
        )

    def _split_generators(self, dl_manager):
        # Load the chunk list.
        _CHUNK_LIST = json.loads(open(dl_manager.download("lists/chunk_list.json"), 'r').read())

        # Create a list to hold the downloaded chunks.
        _list = []

        # Download each chunk file.
        for chunk in _CHUNK_LIST:
           _list.append(dl_manager.download(f"data/{chunk}.tar"))

        # Return the list of downloaded chunks.
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "chunks": _list,
                },
            ),
        ]
    
    def _generate_examples(self, chunks):
        """Generate images and labels for splits."""
        for chunk in chunks:
            tar_data = open(chunk, 'rb')
            tar_bytes = tar_data.read()
            tar_bytes_io = io.BytesIO(tar_bytes)

            response_dict = {}

            with tarfile.open(fileobj=tar_bytes_io, mode='r') as tar:
                for file_info in tar:
                    if file_info.isfile():
                        file_name = file_info.name
                        #filename format is typ_id.ext
                        file_type = file_name.split('_')[0]
                        file_id = file_name.split('_')[1].split('.')[0]
                        file_ext = file_name.split('_')[1].split('.')[1]
                        file_contents = tar.extractfile(file_info)

                        if file_id not in response_dict:
                            response_dict[file_id] = {}

                        # vis = video std; vim = video mean
                        if file_type == 'txt' or file_type == 'vis' or file_type == 'vim':
                            #don't ask me why, it just works
                            _tmp = BytesIO()
                            _tmp.write(tar.extractfile(file_name).read())  
                            _tmp.seek(0)
                            file_contents = _tmp
                            response_dict[file_id][file_type] = numpy.load(file_contents)
                        elif file_type == 'jso':
                            response_dict[file_id][file_type] = json.loads(file_contents.read())
            
            for key, value in response_dict.items():
                yield key, {
                    "id": key,
                    "metadata": json.dumps(value['jso']),
                    "prompt": value['txt'],
                    "vidmean": value['vim'],
                    "vidstd": value['vis'],
                }