Error loading data

#2
by ShakilaMT - opened

Hi,

Is the Semeval 2025 task-8 devset not available yet? When running the below code to load the data, I am getting a "NotImplementedError". Below is the code snippet and the error:

from datasets import load_dataset
semeval_dev_qa = load_dataset("cardiffnlp/databench", name="semeval", split="dev")
Downloading data: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 49/49 [00:00<00:00, 24563.27files/s]
Downloading data: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 16/16 [00:00<?, ?files/s]
---------------------------------------------------------------------------
NotImplementedError                       Traceback (most recent call last)
Cell In[5], line 3
      1 from datasets import load_dataset
----> 3 semeval_dev_qa = load_dataset("cardiffnlp/databench", name="semeval", split="dev")

File ~\AppData\Local\anaconda3\envs\nlp\Lib\site-packages\datasets\load.py:2609, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)
   2606     return builder_instance.as_streaming_dataset(split=split)
   2608 # Download and prepare data
-> 2609 builder_instance.download_and_prepare(
   2610     download_config=download_config,
   2611     download_mode=download_mode,
   2612     verification_mode=verification_mode,
   2613     num_proc=num_proc,
   2614     storage_options=storage_options,
   2615 )
   2617 # Build dataset for splits
   2618 keep_in_memory = (
   2619     keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
   2620 )

File ~\AppData\Local\anaconda3\envs\nlp\Lib\site-packages\datasets\builder.py:1027, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
   1025     if num_proc is not None:
   1026         prepare_split_kwargs["num_proc"] = num_proc
-> 1027     self._download_and_prepare(
   1028         dl_manager=dl_manager,
   1029         verification_mode=verification_mode,
   1030         **prepare_split_kwargs,
   1031         **download_and_prepare_kwargs,
   1032     )
   1033 # Sync info
   1034 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())

File ~\AppData\Local\anaconda3\envs\nlp\Lib\site-packages\datasets\builder.py:1100, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
   1098 split_dict = SplitDict(dataset_name=self.dataset_name)
   1099 split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
-> 1100 split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
   1102 # Checksums verification
   1103 if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:

File ~\AppData\Local\anaconda3\envs\nlp\Lib\site-packages\datasets\packaged_modules\parquet\parquet.py:62, in Parquet._split_generators(self, dl_manager)
     60     for file in itertools.chain.from_iterable(files):
     61         with open(file, "rb") as f:
---> 62             self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
     63         break
     64 splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))

File ~\AppData\Local\anaconda3\envs\nlp\Lib\site-packages\datasets\features\features.py:1718, in Features.from_arrow_schema(cls, pa_schema)
   1712         metadata_features = Features.from_dict(metadata["info"]["features"])
   1713 metadata_features_schema = metadata_features.arrow_schema
   1714 obj = {
   1715     field.name: (
   1716         metadata_features[field.name]
   1717         if field.name in metadata_features and metadata_features_schema.field(field.name) == field
-> 1718         else generate_from_arrow_type(field.type)
   1719     )
   1720     for field in pa_schema
   1721 }
   1722 return cls(**obj)

File ~\AppData\Local\anaconda3\envs\nlp\Lib\site-packages\datasets\features\features.py:1430, in generate_from_arrow_type(pa_type)
   1428     return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
   1429 elif isinstance(pa_type, pa.DictionaryType):
-> 1430     raise NotImplementedError  # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
   1431 elif isinstance(pa_type, pa.DataType):
   1432     return Value(dtype=_arrow_to_datasets_dtype(pa_type))

NotImplementedError: 

Appreciate your help.

Kind regards.

Cardiff NLP org

This kind of error is usually fixed by making sure you're not using an older version of the datasets library. Here's a working demonstration with the last version, 3.0.0,
that installs the correct version of arrow it needs under the hood.

Screenshot 2024-09-23 at 10.51.16.png

jorses changed discussion status to closed

Sign up or log in to comment