Dataset Viewer
Full Screen Viewer
Full Screen
The dataset viewer is not available for this subset.
Cannot get the split names for the config 'default' of the dataset.
Exception: SplitsNotFoundError Message: The split names could not be parsed from the dataset config. Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/mongoengine/queryset/base.py", line 269, in get result = next(queryset) File "/src/services/worker/.venv/lib/python3.9/site-packages/mongoengine/queryset/base.py", line 1608, in __next__ raw_doc = next(self._cursor) File "/src/services/worker/.venv/lib/python3.9/site-packages/pymongo/cursor.py", line 1267, in next raise StopIteration StopIteration During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/libs/libcommon/src/libcommon/simple_cache.py", line 516, in get_response_with_details CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split) File "/src/services/worker/.venv/lib/python3.9/site-packages/mongoengine/queryset/base.py", line 272, in get raise queryset._document.DoesNotExist(msg) libcommon.simple_cache.DoesNotExist: CachedResponseDocument matching query does not exist. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/split_names.py", line 159, in compute compute_split_names_from_info_response( File "/src/services/worker/src/worker/job_runners/config/split_names.py", line 131, in compute_split_names_from_info_response config_info_response = get_previous_step_or_raise(kind="config-info", dataset=dataset, config=config) File "/src/libs/libcommon/src/libcommon/simple_cache.py", line 565, in get_previous_step_or_raise response = get_response_with_details(kind=kind, dataset=dataset, config=config, split=split) File "/src/libs/libcommon/src/libcommon/simple_cache.py", line 529, in get_response_with_details raise CachedArtifactNotFoundError(kind=kind, dataset=dataset, config=config, split=split) from e libcommon.simple_cache.CachedArtifactNotFoundError: Cache entry does not exist: kind='config-info' dataset='csarron/4m-img-caps' config='default' split=None During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 499, in get_dataset_config_info for split_generator in builder._split_generators( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/arrow/arrow.py", line 50, in _split_generators self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/ipc.py", line 190, in open_stream return RecordBatchStreamReader(source, options=options, File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/ipc.py", line 52, in __init__ self._open(source, options=options, memory_pool=memory_pool) File "pyarrow/ipc.pxi", line 974, in pyarrow.lib._RecordBatchStreamReader._open File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status OSError: Invalid flatbuffers message. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/split_names.py", line 75, in compute_split_names_from_streaming_response for split in get_dataset_split_names( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 572, in get_dataset_split_names info = get_dataset_config_info( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 504, in get_dataset_config_info raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err datasets.inspect.SplitsNotFoundError: The split names could not be parsed from the dataset config.
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
YAML Metadata
Warning:
empty or missing yaml metadata in repo card
(https://huggingface.co/docs/hub/datasets-cards)
see read_pyarrow.py for how to read one pyarrow file.
example PyTorch dataset:
from torch.utils.data import Dataset
class ImageCaptionArrowDataset(Dataset):
def __init__(
self,
dataset_file,
tokenizer,
):
import pyarrow as pa
data = [pa.ipc.open_file(pa.memory_map(f, "rb")).read_all() for f in glob.glob(dataset_file)]
self.data = pa.concat_tables(data)
# do other initialization, like init image preprocessing fn,
def __getitem__(self, index):
# item_id = self.data["id"][index].as_py()
text = self.data["text"][index].as_py() # get text
if isinstance(text, list):
text = random.choice(text)
img_bytes = self.data["image"][index].as_py() # get image bytes
# do some processing with image and text, return the features
# img_feat = self.image_bytes_to_tensor(img_bytes)
# inputs = self.tokenizer(
# text,
# padding="max_length",
# max_length=self.max_text_len,
# truncation=True,
# return_token_type_ids=True,
# return_attention_mask=True,
# add_special_tokens=True,
# return_tensors="pt",
# )
# input_ids = inputs.input_ids.squeeze(0)
# attention_mask = inputs.attention_mask.squeeze(0)
# return {
# # "item_ids": item_id,
# "text_ids": input_ids,
# "input_ids": input_ids,
# "text_masks": attention_mask,
# "pixel_values": img_feat,
# }
def __len__(self):
return len(self.data)
- Downloads last month
- 39