Datasets:
File size: 12,956 Bytes
b162ba8 6f3949c 24b638e 5bd56e7 c0f8bd9 5bd56e7 fc58e3b 5bd56e7 8675294 5bd56e7 46921b9 b162ba8 5bd56e7 b162ba8 5bd56e7 b162ba8 5bd56e7 46921b9 b162ba8 5bd56e7 b162ba8 5bd56e7 b162ba8 5bd56e7 46921b9 b162ba8 5bd56e7 b162ba8 5bd56e7 b162ba8 5bd56e7 46921b9 b162ba8 5bd56e7 b162ba8 5bd56e7 b162ba8 5bd56e7 b162ba8 6f3949c 24b638e c0f8bd9 fc58e3b 8675294 0e6cb83 b162ba8 46921b9 b162ba8 46921b9 b162ba8 46921b9 b162ba8 5bd56e7 e3564a6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 |
---
license: cc-by-nc-4.0
dataset_info:
- config_name: symbolic_simulation
features:
- name: num_var
dtype: int64
- name: function
dtype: string
splits:
- name: train
num_bytes: 700
num_examples: 15
download_size: 1816
dataset_size: 700
- config_name: audio_RBFP
features:
- name: file_name
dtype: string
- name: label
dtype: string
- name: position
dtype: int64
- name: audio
dtype:
audio:
sampling_rate: 16000
mono: false
splits:
- name: train
num_bytes: 27157123544.625
num_examples: 84843
- name: validation
num_bytes: 3194785897.375
num_examples: 9981
download_size: 30351871119
dataset_size: 30351909442
- config_name: audio_RBRP
features:
- name: file_name
dtype: string
- name: label
dtype: string
- name: position
dtype: int64
- name: audio
dtype:
audio:
sampling_rate: 16000
mono: false
splits:
- name: train
num_bytes: 27157123544.625
num_examples: 84843
- name: validation
num_bytes: 3194785897.375
num_examples: 9981
download_size: 30351960252
dataset_size: 30351909442
- config_name: audio_SBFP
features:
- name: file_name
dtype: string
- name: label
dtype: string
- name: position
dtype: int64
- name: audio
dtype:
audio:
sampling_rate: 16000
mono: false
splits:
- name: train
num_bytes: 27157123544.625
num_examples: 84843
- name: validation
num_bytes: 3194785897.375
num_examples: 9981
download_size: 30351832397
dataset_size: 30351909442
- config_name: audio_SBRP
features:
- name: file_name
dtype: string
- name: label
dtype: string
- name: position
dtype: int64
- name: audio
dtype:
audio:
sampling_rate: 16000
mono: false
splits:
- name: train
num_bytes: 27157123544.625
num_examples: 84843
- name: validation
num_bytes: 3194785897.375
num_examples: 9981
download_size: 30351924920
dataset_size: 30351909442
- config_name: vision_RBFP
features:
- name: image
dtype: image
- name: foreground_label
dtype: int64
- name: position_x
dtype: int64
- name: position_y
dtype: int64
splits:
- name: train
num_bytes: 7539715850
num_examples: 50000
- name: validation
num_bytes: 1507888500
num_examples: 10000
download_size: 9047652019
dataset_size: 9047604350
- config_name: vision_RBRP
features:
- name: image
dtype: image
- name: foreground_label
dtype: int64
- name: position_x
dtype: int64
- name: position_y
dtype: int64
splits:
- name: train
num_bytes: 7540447300
num_examples: 50000
- name: validation
num_bytes: 1508033000
num_examples: 10000
download_size: 9049041434
dataset_size: 9048480300
- config_name: vision_SBFP
features:
- name: image
dtype: image
- name: foreground_label
dtype: int64
- name: background_label
dtype: int64
- name: position_x
dtype: int64
- name: position_y
dtype: int64
splits:
- name: train
num_bytes: 4010519200
num_examples: 50000
- name: validation
num_bytes: 808377090
num_examples: 10000
download_size: 4858077382
dataset_size: 4818896290
- config_name: vision_SBRP
features:
- name: image
dtype: image
- name: foreground_label
dtype: int64
- name: background_label
dtype: int64
- name: position_x
dtype: int64
- name: position_y
dtype: int64
splits:
- name: train
num_bytes: 4015499450
num_examples: 50000
- name: validation
num_bytes: 810681220
num_examples: 10000
download_size: 4874719512
dataset_size: 4826180670
configs:
- config_name: symbolic_simulation
data_files:
- split: train
path: data/symbolic_simulation/train-*
- config_name: audio_RBFP
data_files:
- split: train
path: data/audio/RBFP/train-*
- split: validation
path: data/audio/RBFP/validation-*
- config_name: audio_RBRP
data_files:
- split: train
path: data/audio/RBRP/train-*
- split: validation
path: data/audio/RBRP/validation-*
- config_name: audio_SBFP
data_files:
- split: train
path: data/audio/SBFP/train-*
- split: validation
path: data/audio/SBFP/validation-*
- config_name: audio_SBRP
data_files:
- split: train
path: data/audio/SBRP/train-*
- split: validation
path: data/audio/SBRP/validation-*
- config_name: vision_RBFP
data_files:
- split: train
path: data/vision/RBFP/train-*
- split: validation
path: data/vision/RBFP/validation-*
- config_name: vision_RBRP
data_files:
- split: train
path: data/vision/RBRP/train-*
- split: validation
path: data/vision/RBRP/validation-*
- config_name: vision_SBFP
data_files:
- split: train
path: data/vision/SBFP/train-*
- split: validation
path: data/vision/SBFP/validation-*
- config_name: vision_SBRP
data_files:
- split: train
path: data/vision/SBRP/train-*
- split: validation
path: data/vision/SBRP/validation-*
task_categories:
- feature-extraction
language:
- en
pretty_name: ChaosMining
size_categories:
- 10B<n<100B
---
# Dataset Card for Dataset Name
ChaosMining is a synthetic dataset that evaluates post-hoc local attribution methods in low signal-to-noise ratio (SNR) environments.
The post-hoc local attribution methods are explainable AI methods such as Saliency (SA), DeepLift (DL), Integrated Gradient (IG), and Feature Ablation (FA).
This dataset is used to evaluate the feature selection ability of these methods when a large amount of noise exists.
## Dataset Descriptions
There exist three modalities:
- **Symbolic Functional Data**: Mathematical functions with noise, used to study regression tasks. Derived from human-designed symbolic functions with predictive and irrelevant features.
- **Vision Data**: Images combining foreground objects from the CIFAR-10 dataset and background noise or flower images. 224x224 images with 32x32 foreground objects and either Gaussian noise or structural flower backgrounds.
- **Audio Data**: Audio sequences with a mix of relevant (speech commands) and irrelevant (background noise) signals.
### Dataset Sources [optional]
Please check out the following
- **Repository:** [https://github.com/geshijoker/ChaosMining/tree/main] for data curation and evaluation.
- **Paper:** [https://arxiv.org/pdf/2406.12150] for details.
### Dataset Details
### Symbolic Functional Data
- **Synthetic Generation:** Data is derived from predefined mathematical functions, ensuring a clear ground truth for evaluation.
- **Functions:** Human-designed symbolic functions combining primitive mathematical operations (e.g., polynomial, trigonometric, exponential functions).
- **Generation Process:** Each feature is sampled from a normal distribution N(μ,σ^2) with μ=0 and σ=1. Predictive features are computed using the defined symbolic functions, while noise is added by including irrelevant features.
- **Annotations:** Ground truth annotations are generated based on the symbolic functions used to create the data.
- **Normalization:** Data values are normalized to ensure consistency across samples.
### Vision Data
- **Foreground Images:** CIFAR-10 dataset, containing 32x32 pixel images of common objects.
- **Background Images:** Flower102 dataset and Gaussian noise images.
- **Combination:** Foreground images are overlaid onto background images to create synthetic samples. Foreground images are either centered or randomly placed.
- **Noise Types:** Backgrounds are generated using Gaussian noise for random noise conditions, or sampled from the Flower102 dataset for structured noise conditions.
- **Annotations:** Each image is annotated with the position of the foreground object and its class label.
- **Splitting:** The dataset is divided into training and validation sets to ensure no data leakage.
### Audio Data
- **Foreground Audio:** Speech Command dataset, containing audio clips of spoken commands.
- **Background Audio:** Random noise generated from a normal distribution and samples from the Rainforest Connection Species dataset.
- **Combination:** Each audio sample consists of multiple channels, with only one channel containing the foreground audio and the rest containing background noise.
- **Noise Conditions:** Background noise is either random (generated from a normal distribution) or structured (sampled from environmental sounds).
- **Annotations:** Each audio sample is annotated with the class label of the foreground audio and the position of the predictive channel.
- **Normalization:** Audio signals are normalized to a consistent range for uniform processing.
### Benchmark Metrics:
The benchmark processes a **Model × Attribution × Noise Condition** triplet design to evaluate the performance of various post-hoc attribution methods across different scenarios.
- **Uniform Score (UScore)**: Measures prediction accuracy normalized to a range of 0 to 1.
- **Functional Precision (FPrec)**: Measures the overlap between top-k predicted features and actual predictive features.
## Uses
### Dataset Structure
The configurations of the sub-datasets are ('symbolic_simulation', 'audio_RBFP', 'audio_RBRP', 'audio_SBFP', 'audio_SBRP', 'vision_RBFP', 'vision_RBRP', 'vision_SBFP', 'vision_SBRP').
Please pick one of them for use. The 'symbolic_simulation' data only has the 'train' split while the others have both the 'train' and 'validation' splits.
### Load Dataset
For the general dataloading usage of huggingface API, please refer to [general usage](https://huggingface.co/docs/datasets/loading), including how to work with TensorFlow, PyTorch, JAX ...
Here we provide the template codes for PyTorch users.
```python
from datasets import Dataset
from torch.utils.data import DataLoader
# Load the symbolic functional data from huggingface datasets
dataset = load_dataset('geshijoker/chaosmining', 'symbolic_simulation')
print(dataset)
Out: DatasetDict({
train: Dataset({
features: ['num_var', 'function'],
num_rows: 15
})
})
# Read the formulas as a list of (number_of_features, function_string) pairs
formulas = [[data_slice['num_var'], data_slice['function']] for data_slice in dataset['train']]
# Load the vision data from huggingface datasets
dataset = load_dataset('geshijoker/chaosmining', 'vision_RBFP', split='validation', streaming=True)
# Convert hugging face Dataset to pytorch Dataset for vision data
dataset = dataset.with_format('torch')
# Use a dataloader for minibatch loading
dataloader = DataLoader(dataset, batch_size=32)
next(iter(dataloader_vision))
Out: {'image':torch.Size([32, 3, 224, 224]), 'foreground_label':torch.Size([32]), 'position_x':torch.Size([32]), 'position_y':torch.Size([32])}
# Load the audio data from huggingface datasets
dataset = load_dataset('geshijoker/chaosmining', 'audio_RBFP', split='validation', streaming=True)
# Convert hugging face Dataset to pytorch Dataset for audio data.
# Define the transformation
def transform_audio(example):
# Remove the 'path' field
del example['audio']['path']
# Directly access the 'array' and 'sampling_rate' from the 'audio' field
example['sampling_rate'] = example['audio']['sampling_rate']
example['audio'] = example['audio']['array']
return example
# Apply the transformation to the dataset
dataset = dataset.map(transform_audio)
dataset = dataset.with_format('torch')
# Use a dataloader for minibatch loading
dataloader = DataLoader(dataset, batch_size=32)
next(iter(dataloader_vision))
Out: {'audio':torch.Size([32, 10, 16000]), 'sampling_rate':torch.Size([32]), 'label':List_of_32, 'file_name':List_of_32}
```
### Curation Rationale
To create controlled, low signal-to-noise ratio environments that test the efficacy of post-hoc local attribution methods.
- **Purpose:** To study the effectiveness of neural networks in regression tasks where relevant features are mixed with noise.
- **Challenges Addressed:** Differentiating between predictive and irrelevant features in a controlled, low signal-to-noise ratio environment.
### Source Data
Synthetic data derived from known public datasets (CIFAR-10, Flower102, Speech Commands, Rainforest Connection Species) and generated noise.
### Citation
If you use this dataset or code in your research, please cite the paper as follows:
**BibTeX:**
@article{shi2024chaosmining,
title={ChaosMining: A Benchmark to Evaluate Post-Hoc Local Attribution Methods in Low SNR Environments},
author={Shi, Ge and Kan, Ziwen and Smucny, Jason and Davidson, Ian},
journal={arXiv preprint arXiv:2406.12150},
year={2024}
}
**APA:**
Shi, G., Kan, Z., Smucny, J., & Davidson, I. (2024). ChaosMining: A Benchmark to Evaluate Post-Hoc Local Attribution Methods in Low SNR Environments. arXiv preprint arXiv:2406.12150.
## Dataset Card Contact
Davidson Lab at UC Davis
Ian: indavidson@ucdavis.edu |