Datasets:

Modalities:
Image
Languages:
English
ArXiv:
DOI:
Libraries:
Datasets
License:
kdoherty commited on
Commit
7878b87
·
verified ·
1 Parent(s): 28e9c23

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -177
README.md CHANGED
@@ -68,180 +68,3 @@ ds = load_dataset('mpg-ranch/leafy_spurge', 'labelled', split='train')
68
  ds['image'][405]
69
  ```
70
  <img src="https://huggingface.co/datasets/mpg-ranch/leafy_spurge/resolve/main/doc_figures/full_size_tile.png" width="1024px" height="1024px">
71
-
72
- We will now center crop the image to the size of the ground truth:
73
-
74
- ```python
75
- from torchvision.transforms import CenterCrop, Compose
76
-
77
- ground_truth_sz = 39
78
-
79
- ccrop = Compose([CenterCrop(ground_truth_sz)])
80
-
81
- def preproc_transforms(examples):
82
- examples["pixel_values"] = [ccrop(image.convert("RGB")) for image in examples["image"]]
83
- return examples
84
-
85
- ds = ds.map(preproc_transforms, batched=True)
86
- ds['pixel_values'][405]
87
- ```
88
- <img src="https://huggingface.co/datasets/mpg-ranch/leafy_spurge/resolve/main/doc_figures/ground_truth_tile.png" width="39px" height="39px">
89
-
90
- # Geographic splits within the training set
91
- <img src="https://huggingface.co/datasets/mpg-ranch/leafy_spurge/resolve/main/doc_figures/train_clusters.png" width="75%" height="75%">
92
-
93
- We gathered ground truth at multiple sites and observations within a site were geographically clustered. We suggest using the cluster feature to establish holdout sets for cross-validated hyperparameter tuning. This will simluate model performance when classifying leafy spurge at new sites (such as those of the test set). You can filter by cluster metadata as follows:
94
-
95
- ```python
96
- #define holdout sets with ground truth clusters; 6 and 7 overlap geographically
97
- holdout_sets = [[0], [1], [2], [4], [5], [6,7], [8]]
98
-
99
- set_0 = ds.filter(lambda example: example['cluster'] in holdout_sets[0])
100
- unq_vals = list(set(set_0['cluster']))
101
- print(f'Unique cluster values in set 0: {unq_vals}')
102
- ```
103
-
104
- # Example cross-validation loop
105
- We will use the the geographic cluster feature to cross-validate performance. First let's reformat the dataset for torch and define some functions for our training loop:
106
-
107
- ```python
108
- import torch.nn as nn
109
- import torch.optim as optim
110
- from torchvision.models import resnet50
111
- from tqdm import tqdm
112
- import pandas as pd
113
-
114
- ds = ds.with_format("torch")
115
-
116
- def train_one_epoch(model, train_loader, criterion, optimizer, device):
117
- model.train()
118
- running_loss = 0.0
119
- correct_predictions = 0
120
- total_predictions = 0
121
- for i, batch in enumerate(train_loader):
122
- inputs = batch['pixel_values'].permute(0,3,1,2).float().to(device)
123
- labels = batch['label'].to(device)
124
-
125
- optimizer.zero_grad()
126
-
127
- outputs = model(inputs)
128
- _, predicted = torch.max(outputs.data, 1)
129
- total_predictions += labels.size(0)
130
- correct_predictions += (predicted == labels).sum().item()
131
-
132
- loss = criterion(outputs, labels)
133
- loss.backward()
134
- optimizer.step()
135
-
136
- running_loss += loss.item()
137
-
138
- train_accuracy = correct_predictions / total_predictions
139
- train_loss = running_loss / len(train_loader)
140
-
141
- return train_loss, train_accuracy
142
-
143
- def evaluate_one_epoch(model, test_loader, criterion, device):
144
- model.eval()
145
- running_loss = 0.0
146
- correct_predictions = 0
147
- total_predictions = 0
148
- with torch.no_grad():
149
- for batch in test_loader:
150
- inputs = batch['pixel_values'].permute(0,3,1,2).float().to(device)
151
- labels = batch['label'].to(device)
152
-
153
- outputs = model(inputs)
154
- _, predicted = torch.max(outputs.data, 1)
155
- total_predictions += labels.size(0)
156
- correct_predictions += (predicted == labels).sum().item()
157
-
158
- loss = criterion(outputs, labels)
159
- running_loss += loss.item()
160
-
161
- test_accuracy = correct_predictions / total_predictions
162
- test_loss = running_loss / len(test_loader)
163
-
164
- return test_loss, test_accuracy
165
-
166
- def cross_val(ds, holdout_set):
167
- train = ds.filter(lambda example: example['cluster'] not in holdout_set)
168
- test = ds.filter(lambda example: example['cluster'] in holdout_set)
169
-
170
- model = resnet50(pretrained=True)
171
- num_classes = len(ds['label'].unique())
172
- model.fc = nn.Linear(2048, num_classes)
173
-
174
- # Define the loss function and optimizer
175
- criterion = nn.CrossEntropyLoss()
176
- optimizer = optim.Adam(model.parameters(), lr=0.001)
177
-
178
- # Define the data loaders
179
- train_loader = torch.utils.data.DataLoader(train, batch_size=32, shuffle=True)
180
- test_loader = torch.utils.data.DataLoader(test, batch_size=32, shuffle=False)
181
-
182
- # Train the model
183
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
184
- model.to(device)
185
-
186
- results = []
187
-
188
- for epoch in range(5):
189
- train_loss, train_accuracy = train_one_epoch(model, train_loader, criterion, optimizer, device)
190
- test_loss, test_accuracy = evaluate_one_epoch(model, test_loader, criterion, device)
191
-
192
- results.append({
193
- 'epoch': epoch + 1,
194
- 'train_loss': train_loss,
195
- 'train_accuracy': train_accuracy,
196
- 'test_loss': test_loss,
197
- 'test_accuracy': test_accuracy,
198
- 'holdout_set': holdout_set
199
- })
200
-
201
- results_df = pd.DataFrame(results)
202
-
203
- return results_df
204
-
205
- ```
206
-
207
- Next we'll sequentially holdout geographic clusters and store performance:
208
- ```python
209
- results = []
210
- pbar_holdout = tqdm(holdout_sets, desc="Holdout Sets")
211
- for holdout_set in pbar_holdout:
212
- results.append(cross_val(ds, holdout_set))
213
- pbar_holdout.set_postfix_str(f"Completed holdout set {holdout_set}")
214
-
215
- results_df = pd.concat(results)
216
- ```
217
-
218
- Finally, we plot the results of geographic cross-validation:
219
- ```python
220
- import numpy as np
221
- import matplotlib.pyplot as plt
222
-
223
- # Group the results by epoch
224
- grouped_results = results_df.groupby('epoch')
225
-
226
- # Compute the mean and standard deviation of the test accuracy at each epoch
227
- mean_test_accuracy = grouped_results['test_accuracy'].mean()
228
- std_test_accuracy = grouped_results['test_accuracy'].std()
229
-
230
- # Compute the 68% confidence interval
231
- lower_bound = mean_test_accuracy - std_test_accuracy
232
- upper_bound = mean_test_accuracy + std_test_accuracy
233
-
234
- # Plot the mean test accuracy
235
- plt.plot(mean_test_accuracy.index, mean_test_accuracy)
236
-
237
- # Plot the error ribbon
238
- plt.fill_between(lower_bound.index, lower_bound, upper_bound, color='b', alpha=.1)
239
-
240
- # Set the labels and title
241
- plt.xlabel('Epoch')
242
- plt.ylabel('Cross-validated Accuracy')
243
-
244
- # Show the plot
245
- plt.show()
246
- ```
247
- <img src="https://huggingface.co/datasets/mpg-ranch/leafy_spurge/resolve/main/doc_figures/cluster_cv_fig.png" width="75%" height="75%">
 
68
  ds['image'][405]
69
  ```
70
  <img src="https://huggingface.co/datasets/mpg-ranch/leafy_spurge/resolve/main/doc_figures/full_size_tile.png" width="1024px" height="1024px">