|
import datasets |
|
import glob |
|
import os |
|
import numpy as np |
|
|
|
NUM_SHARDS = 10 |
|
_URLS = [ |
|
f'https://huggingface.co/datasets/commaai/comma2k19/resolve/main/Chunk_{i}.zip' for i in range(1,NUM_SHARDS+1) |
|
] |
|
|
|
_DESCRIPTION = """\ |
|
comma2k19 is a dataset of over 33 hours of commute in California's 280 highway. |
|
This means 2019 segments, 1 minute long each, on a 20km section of highway driving between California's San Jose and San Francisco. |
|
comma2k19 is a fully reproducible and scalable dataset. |
|
The data was collected using comma EONs that has sensors similar to those of any modern smartphone including a road-facing camera, phone GPS, thermometers and 9-axis IMU. |
|
Additionally, the EON captures raw GNSS measurements and all CAN data sent by the car with a comma grey panda. |
|
""" |
|
|
|
class Comma2k19(datasets.GeneratorBasedBuilder): |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{"path": datasets.Value("string")} |
|
) |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
dl_manager.download_config.ignore_url_params = True |
|
downloaded_files = dl_manager.download(_URLS) |
|
local_extracted_archive = dl_manager.extract(downloaded_files) if not dl_manager.is_streaming else [None]*len(downloaded_files) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=str(i), |
|
gen_kwargs={"local_extracted_archive":local_extracted_archive[i], "files": dl_manager.iter_archive(downloaded_files[i])} |
|
) for i in range(len(downloaded_files))] |
|
|
|
def _generate_examples(self, local_extracted_archive, files): |
|
files = [os.path.join(dp, f) for dp, dn, filenames in os.walk(local_extracted_archive) for f in filenames] |
|
for path in files: |
|
yield path, {'path': path} |
|
|
|
def _get_examples_iterable_for_split(self, split_generator): |
|
for path in split_generator.gen_kwargs['files']: |
|
yield path[0], {'path': path[0]} |
|
|