Datasets
Collection
14 items
•
Updated
Error code: DatasetGenerationCastError Exception: DatasetGenerationCastError Message: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 4 new columns ({'split', 'Dataset', 'num_samples', 'attributes'}) and 3 missing columns ({'config', 'chunks', 'updated_at'}). This happened while the json dataset builder was generating data using hf://datasets/snchen1230/LEVIRCDPlus/train/metadata.json (at revision 8a9342908fa06104cca3539d9d897eabd9ca4474) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations) Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1870, in _prepare_split_single writer.write_table(table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 622, in write_table pa_table = table_cast(pa_table, self._schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2292, in table_cast return cast_table_to_schema(table, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2240, in cast_table_to_schema raise CastError( datasets.table.CastError: Couldn't cast Dataset: string split: string num_samples: int64 attributes: struct<image_a: struct<dtype: string, format: string>, image_b: struct<dtype: string, format: string>, class: struct<dtype: string, format: string>, classes: struct<0: string, 255: string>> child 0, image_a: struct<dtype: string, format: string> child 0, dtype: string child 1, format: string child 1, image_b: struct<dtype: string, format: string> child 0, dtype: string child 1, format: string child 2, class: struct<dtype: string, format: string> child 0, dtype: string child 1, format: string child 3, classes: struct<0: string, 255: string> child 0, 0: string child 1, 255: string to {'chunks': [{'chunk_bytes': Value(dtype='int64', id=None), 'chunk_size': Value(dtype='int64', id=None), 'dim': Value(dtype='null', id=None), 'filename': Value(dtype='string', id=None)}], 'config': {'chunk_bytes': Value(dtype='int64', id=None), 'chunk_size': Value(dtype='null', id=None), 'compression': Value(dtype='null', id=None), 'data_format': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'data_spec': Value(dtype='string', id=None), 'encryption': Value(dtype='null', id=None), 'item_loader': Value(dtype='string', id=None)}, 'updated_at': Value(dtype='string', id=None)} because column names don't match During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1412, in compute_config_parquet_and_info_response parquet_operations, partial, estimated_dataset_info = stream_convert_to_parquet( File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 988, in stream_convert_to_parquet builder._prepare_split( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1872, in _prepare_split_single raise DatasetGenerationCastError.from_cast_error( datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 4 new columns ({'split', 'Dataset', 'num_samples', 'attributes'}) and 3 missing columns ({'config', 'chunks', 'updated_at'}). This happened while the json dataset builder was generating data using hf://datasets/snchen1230/LEVIRCDPlus/train/metadata.json (at revision 8a9342908fa06104cca3539d9d897eabd9ca4474) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
chunks
list | config
dict | updated_at
string | Dataset
string | split
string | num_samples
int64 | attributes
dict |
---|---|---|---|---|---|---|
[
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-0-0.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-0-1.bin"
},
{
"chunk_bytes": 80741100,
"chunk_size": 11,
"dim": null,
"filename": "chunk-0-2.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-1-0.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-1-1.bin"
},
{
"chunk_bytes": 80741100,
"chunk_size": 11,
"dim": null,
"filename": "chunk-1-2.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-2-0.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-2-1.bin"
},
{
"chunk_bytes": 80741100,
"chunk_size": 11,
"dim": null,
"filename": "chunk-2-2.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-3-0.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-3-1.bin"
},
{
"chunk_bytes": 88081200,
"chunk_size": 12,
"dim": null,
"filename": "chunk-3-2.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-4-0.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-4-1.bin"
},
{
"chunk_bytes": 88081200,
"chunk_size": 12,
"dim": null,
"filename": "chunk-4-2.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-5-0.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-5-1.bin"
},
{
"chunk_bytes": 88081200,
"chunk_size": 12,
"dim": null,
"filename": "chunk-5-2.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-6-0.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-6-1.bin"
},
{
"chunk_bytes": 88081200,
"chunk_size": 12,
"dim": null,
"filename": "chunk-6-2.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-7-0.bin"
},
{
"chunk_bytes": 249563400,
"chunk_size": 34,
"dim": null,
"filename": "chunk-7-1.bin"
},
{
"chunk_bytes": 88081200,
"chunk_size": 12,
"dim": null,
"filename": "chunk-7-2.bin"
}
] | {
"chunk_bytes": 256000000,
"chunk_size": null,
"compression": null,
"data_format": [
"numpy",
"numpy",
"numpy"
],
"data_spec": "[1, {\"type\": \"builtins.dict\", \"context\": \"[\\\"image_a\\\", \\\"image_b\\\", \\\"class\\\"]\", \"children_spec\": [{\"type\": null, \"context\": null, \"children_spec\": []}, {\"type\": null, \"context\": null, \"children_spec\": []}, {\"type\": null, \"context\": null, \"children_spec\": []}]}]",
"encryption": null,
"item_loader": "PyTreeLoader"
} | 1736263833.7249866 | null | null | null | null |
null | null | null | LEVIRCDPlus | train | 637 | {
"image_a": {
"dtype": "uint8",
"format": "numpy"
},
"image_b": {
"dtype": "uint8",
"format": "numpy"
},
"class": {
"dtype": "uint8",
"format": "numpy"
},
"classes": {
"0": "no change",
"255": "change"
}
} |
No dataset card yet