File size: 1,442 Bytes
2b625b3
53a7514
2b625b3
ae19be5
2b625b3
 
 
e9480d5
 
 
2b625b3
 
 
 
 
 
 
53a7514
2b625b3
 
53a7514
 
 
2b625b3
 
 
 
2629cd7
ae19be5
0d1ac36
10d2a2e
2629cd7
 
cebd506
 
 
2b625b3
2629cd7
e9480d5
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import datasets
import os
import numpy as np
import itertools

SHARD_SIZE = 2500
NUM_SHARDS = 40
_DATA_FILES = [
      f'data_{i*SHARD_SIZE}_to_{(i+1)*SHARD_SIZE}.zip' for i in range(NUM_SHARDS)
    ] + [ 'val.zip' ]

_DESCRIPTION = """\
TODO
"""

class CommaVQ(datasets.GeneratorBasedBuilder):

  def _info(self):
    return datasets.DatasetInfo(
      description=_DESCRIPTION,
      features=datasets.Features(
        {"path": datasets.Value("string")}
      )
    )

  def _split_generators(self, dl_manager):
    """Returns SplitGenerators."""
    dl_manager.download_config.ignore_url_params = True
    to_dl = list(itertools.chain(*[x[1] for x in self.config.data_files.items()])) if self.config.data_files is not None else _DATA_FILES
    downloaded_files = dl_manager.download(to_dl)
    local_extracted_archive = dl_manager.extract(downloaded_files) if not dl_manager.is_streaming else [None]*len(downloaded_files)
    return [
      datasets.SplitGenerator(
        name=str(i),
        gen_kwargs={"local_extracted_archive":local_extracted_archive[i], "files": dl_manager.iter_archive(downloaded_files[i])}
        ) for i in range(len(downloaded_files))]

  def _generate_examples(self, local_extracted_archive, files):
    for path_in_archive, f in files:
      path = os.path.join(local_extracted_archive, path_in_archive) if local_extracted_archive is not None else path_in_archive
      yield path_in_archive, {'path': path}